diff --git a/AUTHORS b/AUTHORS
index 657216e8546b029e2a1efb599c7639db34da9fb5..d48803eddbdefb5889bd5cdbc54f1cb1788a98a2 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -22,6 +22,7 @@ Greg Johnson <gjohnson(at)lanl.gov>
 Morris Jette <jette1(at)llnl.gov>
 Jason King <king49(at)llnl.gov>
 Nancy Kritkausky <Nancy.Kritkausky(at)bull.com>
+Eric Lin <Eric.Lin(at)bull.com>
 Bernard Li <bli(at)bcgsc.ca>
 Puenlap Lee <Puen-Lap.Lee(at)bull.com>
 Steven McDougall <smcdougall(at)sicortex.com>
diff --git a/BUILD.NOTES b/BUILD.NOTES
index d52b6be883db252bee7d8442d4bcc9d75c89c32d..1a50191ef95e5515e43279c9e715b7dc316cdda5 100644
--- a/BUILD.NOTES
+++ b/BUILD.NOTES
@@ -91,8 +91,9 @@ To build and run on AIX:
 1. export OBJECT_MODE=32
 2. Build with:
    ./configure --enable-debug --prefix=/opt/freeware \
-	--sysconfdir=/opt/freeware/etc/slurm
-	--with-ssl=/opt/freeware --with-munge=/opt/freeware
+	--sysconfdir=/opt/freeware/etc/slurm \
+	--with-ssl=/opt/freeware --with-munge=/opt/freeware \
+	--with-proctrack=/opt/freeware
    make
    make uninstall  # remove old shared libraries, aix caches them
    make install
@@ -110,7 +111,7 @@ To build and run on AIX:
 	%_with_aix		1
 	%with_ssl               "--with-ssl=/opt/freeware"
 	%with_munge             "--with-munge=/opt/freeware"
-	%with_proctrack         "--with-proctrack=/admin/llnl/include"
+	%with_proctrack         "--with-proctrack=/opt/freeware"
    Log in to the machine "uP".  uP is currently the lowest-common-denominator
      AIX machine.
    CC=/usr/bin/gcc build -s https://eris.llnl.gov/svn/slurm/tags/slurm-1-2-0-0-pre3
diff --git a/DISCLAIMER b/DISCLAIMER
index ea55a6dbacea33dba418ba350ac159bf8f1dea10..f6080c5a876ea569b1867704834e212d4dba372a 100644
--- a/DISCLAIMER
+++ b/DISCLAIMER
@@ -44,10 +44,11 @@ Prashanth Tamraparni <prashanth.tamraparni(at)hp.com>
 Jay Windley <jwindley(at)lnxi.com>
 Ann-Marie Wunderlin<Anne-Marie.Wunderlin(at)Bull.com>
 
-LLNL-CODE-402394.
+CODE-OCEC-09-009. All rights reserved.
 
 This file is part of SLURM, a resource management program.
-For details, see <http://www.llnl.gov/linux/slurm/>.
+For details, see <https://computing.llnl.gov/linux/slurm/>.
+Please also read the supplied file: DISCLAIMER.
 
 SLURM is free software; you can redistribute it and/or modify it under
 the terms of the GNU General Public License as published by the Free
diff --git a/META b/META
index dda32ed2f552c6a6906973942f844c5aab309851..38904def4ac7a266a078869490ae8d1094f84d82 100644
--- a/META
+++ b/META
@@ -1,11 +1,11 @@
   Api_age:       0
-  Api_current:   13
+  Api_current:   20
   Api_revision:  0
-  Major:         1
+  Major:         2
   Meta:          1
-  Micro:         15
-  Minor:         3
+  Micro:         2
+  Minor:         0
   Name:          slurm
   Release:       1
   Release_tags:  dist
-  Version:       1.3.15
+  Version:       2.0.2
diff --git a/Makefile.in b/Makefile.in
index 60157dd94abad650bdd6eece1b0120da1d3389f1..d5133056df5e4688d2fb07f55b934b0a0a028ec1 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -48,14 +48,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,6 +124,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/NEWS b/NEWS
index b1ae1ded512a94c4ca06b414296b187c46ba8837..5cf17dce6b1763a5b205bfed341ffe7d44d37686 100644
--- a/NEWS
+++ b/NEWS
@@ -1,8 +1,422 @@
 This file describes changes in recent versions of SLURM. It primarily
 documents those changes that are of interest to users and admins.
 
+* Changes in SLURM 2.0.2
+========================
+ -- Fix, don't remove job details when a job is cancelled while pending.
+ -- Do correct type for mktime so garbage isn't returned on 64bit systems 
+    for accounting archival.
+ -- Better checking in sacctmgr to avoid infinite loops.
+ -- fix minor memory leak in fake_slurm_step_layout_create()
+ -- Fix node weight (scheduling priority) calculation for powered down
+    nodes. Patch from Hongjia Cao, NUDT.
+ -- Fix node suspend/resume rate calculations. Patch from Hongjia Cao, NUDT.
+ -- Change calculations using ResumeRate and SuspendRate to provide higher
+    resolution.
+ -- Log the IP address for incoming messages having an invalid protocol 
+    version number.
+ -- Fix for sacct to show jobs that start the same second as the sacct
+    command is issued.
+ -- BLUEGENE - Fix for -n option to work on correct cpu counts for each 
+    midplane instead of treating -n as a c-node count.
+ -- salloc now sets SLURM_NTASKS_PER_NODE if --ntasks-per-node option is set.
+ -- Fix select/linear to properly set a job's count of allocated processors
+    (all processors on the allocated nodes).
+ -- Fix select/cons_res to allocate proper CPU count when --ntasks-per-node
+    option is used without a task count in the job request.
+ -- Insure that no node is allocated to a job for which the CPU count is less
+    than --ntasks-per-node * --cpus-per-task.
+ -- Correct AllocProcs reported by "scontrol show node" when ThreadsPerCore
+    is greater than 1 and select/cons_res is used.
+ -- Fix scontrol show config for accounting information when values are 
+    not set in the slurm.conf.
+ -- Added a set of SBATCH_CPU_BIND* and SBATCH_MEM_BIND* env variables to keep
+    jobsteps launched from within a batch script from inheriting the CPU and
+    memory affinity that was applied to the batch script.
+ -- Ignore the extra processors on a node above configured size if either 
+    sched/gang or select/cons_res is configured.
+ -- Fix bug in tracking memory allocated on a node for select/cons_res plugin.
+ -- Fixed a race condition when writing labelled output with a file per task
+    or per node, which potentially closed a file before all data was written.
+ -- BLUEGENE - Fix, for if a job comes in spanning both less than and 
+    over 1 midplane in size we check the connection type appropriately.
+ -- Make sched/backfill properly schedule jobs with constraints having node 
+    counts. NOTE: Backfill of jobs with constraings having exclusive OR 
+    operators are not fully supported.  
+ -- If srun is cancelled by SIGINT, set the job state to cancelled, not 
+    failed.
+ -- BLUEGENE - Fix, for if you are setting an subbp into an error mode 
+    where the subbp stated isn't the first ionode in a nodecard.
+ -- Fix for backfill to not core when checking shared nodes.
+ -- Fix for scontrol to not core when hitting just return in interactive mode.
+ -- Improve sched/backfill logic with respect to shared nodes (multiple jobs
+    per node).
+ -- In sched/wiki (Maui interface) add job info fields QOS, RCLASS, DMEM and
+    TASKSPERNODE.
+
+* Changes in SLURM 2.0.1
+========================
+ -- Fix, truncate time of start and end for job steps in sacct.
+ -- Initialize all messages to slurmdbd. Previously uninitialized string could
+    cause slurmctld to fail with invalid memory reference.
+ -- BLUEGENE - Fix, for when trying to finish a torus on a block already 
+    visited.  Even though this may be possible electrically this isn't valid
+    in the under lying infrastructure.
+ -- Fix, in mysql plugins change mediumints to int to support full 32bit 
+    numbers.
+ -- Add sinfo node state filtering support for NO_RESPOND, POWER_SAVE, FAIL, 
+    MAINT, DRAINED and DRAINING states. The state filter of DRAIN still maps
+    to any node in either DRAINED or DRAINING state.
+ -- Fix reservation logic when job requests specific nodes that are already
+    in some reservation the job can not use.
+ -- Fix recomputation of a job's end time when allocated nodes which are
+    being powered up. The end time would be set in the past if the job's
+    time limit was INFINITE, resulting in it being prematurely terminated.
+ -- Permit regular user to change the time limit of his pending jobs up to
+    the partition's limit.
+ -- Fix "-Q" (quiet) option for salloc and sbatch which was previously 
+    ignored.
+ -- BLUEGENE - fix for finding odd shaped blocks in dynamic mode.
+ -- Fix logic supporting SuspendRate and ResumeRate configuration parameters.
+    Previous logic was changing state of one too many nodes per minute.
+ -- Save new reservation state file on shutdown (even if no changes).
+ -- Fix, when partitions are deleted the sched and select plugins are notified.
+ -- Fix for slurmdbd to create wckeyid's when they don't exist
+ -- Fix linking problem that prevented checkpoint/aix from working.
+
+* Changes in SLURM 2.0.0
+========================
+ -- Fix for bluegene systems to be able to create 32 node blocks with only 
+    16 psets defined in dynamic layout mode.
+ -- Improve srun_cr handling of child srun forking. Patch from Hongjia Cao, 
+    NUDT.
+ -- Configuration parameter ResumeDelay replaced by SuspendTimeout and 
+    ResumeTimeout.
+ -- BLUEGENE - sview/sinfo now displays correct cnode numbers for drained nodes
+    or blocks in error state.
+ -- Fix some batch job launch bugs when powering up suspended nodes.
+ -- Added option '-T' for sacct to truncate time of start and end and set
+    default of --starttime to Midnight of current day.
+
+* Changes in SLURM 2.0.0-rc2
+============================
+ -- Change fanout logic to start on calling node instead of first node in 
+    message nodelist.
+ -- Fix bug so that smap builds properly on Sun Constellation system.
+ -- Filter white-space out from node feature specification.
+ -- Fixed issue with duration not being honored when updating start time in 
+    reservations.
+ -- Fix bug in sched/wiki and sched/wiki2 plugins for reporting job resource
+    allocation properly when node names are configured out of sort order 
+    with more than one numeric suffix (e.g. "tux10-1" is configured after 
+    "tux5-1").
+ -- Avoid re-use of job_id (if specified at submit time) when the existing
+    job is in completing state (possible race condition with Moab).
+ -- Added SLURM_DISTRIBUTION to env for salloc.
+ -- Add support for "scontrol takeover" command for backup controller to 
+    assume control immediately. Patch from Matthieu Hautreux, CEA.
+ -- If srun is unable to communicate with the slurmd tasks are now marked as 
+    failed with the controller.
+ -- Fixed issues with requeued jobs not being accounted for correctly in 
+    the accounting.
+ -- Clear node's POWER_SAVE flag if configuration changes to one lacking a
+    ResumeProgram.
+ -- Extend a job's time limit as appropriate due to delays powering up nodes.
+ -- If sbatch is used to launch a job step within an existing allocation (as
+    used by LSF) and the required node is powered down, print the message
+    "Job step creation temporarily disabled, retrying", sleep, and retry.
+ -- Configuration parameter ResumeDelay added to control how much time must 
+    after a node has been suspended before resume it (e.g. powering it back 
+    up).
+ -- Fix CPU binding for batch program. Patch from Matthieu Hautreux, CEA.
+ -- Fix for front end systems non-responding nodes now show up correctly in
+    sinfo.
+
+* Changes in SLURM 2.0.0-rc1
+============================
+ -- Fix bug in preservation of advanced reservations when slurmctld restarts.
+ -- Updated perlapi to match correctly with slurm.h structures
+ -- Do not install the srun command on BlueGene systems (mpirun must be used to
+    launch tasks).
+ -- Corrections to scheduling logic for topology/tree in configurations where 
+    nodes are configured in multiple leaf switches.
+ -- Patch from Matthieu Hautreux for backup mysql deamon support.
+ -- Changed DbdBackup to DbdBackupHost for slurmdbd.conf file
+ -- Add support for spank_strerror() function and improve error handling in
+    general for SPANK plugins.
+ -- Added configuration parameter SrunIOTimeout to optionally ping srun's tasks
+    for better fault tolerance (e.g. killed and restarteed SLURM daemons on 
+    compute node).
+ -- Add slurmctld and slurmd binding to appropriate communications address
+    based upon NodeAddr, ControllerAddr and BackupAddr configuration 
+    parameters. Based upon patch from Matthieu Hautreux, CEA.
+    NOTE: Fails when SlurmDBD is configured with some configurations.
+    NOTE: You must define BIND_SPECIFIC_ADDR to enable this option.
+ -- Avoid using powered down nodes when scheduling work if possible. 
+    Fix possible invalid memory reference in power save logic.
+
+* Changes in SLURM 1.4.0-pre13
+==============================
+ -- Added new partition option AllocNodes which controls the hosts from 
+    which jobs can be submitted to this partition. From Matthieu Hautreux, CEA.
+ -- Better support the --contiguous option for job allocations.
+ -- Add new scontrol option: show topology (reports contents of topology.conf 
+    file via RPC if topology/tree plugin is configured).
+ -- Add advanced reservation display to smap command.
+ -- Replaced remaining references to SLURM_JOBID with SLURM_JOB_ID - except
+    when needed for backwards compatibility.
+ -- Fix logic to properly excise a DOWN node from the allocation of a job
+    with the --no-kill option.
+ -- The MySQL and PgSQL plugins for accounting storage and job completion are
+    now only built if the underlying database libraries exists (previously
+    the plugins were built to produce a fatal error when used).
+ -- BLUEGENE - scontrol show config will now display bluegene.conf information.
+
+* Changes in SLURM 1.4.0-pre12
+==============================
+ -- Added support for hard time limit by associations with added configuration 
+    option PriorityUsageResetPeriod. This specifies the interval at which to 
+    clear the record of time used. This is currently only available with the 
+    priority/multifactor plugin.
+ -- Added SLURM_SUBMIT_DIR to sbatch's output environment variables.
+ -- Backup slurmdbd support implemented.
+ -- Update to checkpoint/xlch logic from Hongjia Cao, NUDT.
+ -- Added configuration parameter AccountingStorageBackupHost.
+
+* Changes in SLURM 1.4.0-pre11
+==============================
+ -- Fix slurm.spec file for RPM build.
+
+* Changes in SLURM 1.4.0-pre10
+==============================
+ -- Critical bug fix in task/affinity when the CoresPerSocket is greater
+    than the ThreadsPerCore (invalid memory reference).
+ -- Add DebugFlag parameter of "Wiki" to log sched/wiki and wiki2 
+    communications in greater detail.
+ -- Add "-d <slurmstepd_path>" as an option to the slurmd daemon to
+    specifying a non-stardard slurmstepd file, used  for testing purposes.
+ -- Minor cleanup to crypto/munge plugin.
+    - Restrict uid allowed to decode job credentials in crypto/munge
+    - Get slurm user id early in crypto/munge
+    - Remove buggy error code handling in crypto/munge
+ -- Added sprio command - works only with the priority/multifactor plugin
+ -- Add real topology plugin infrastructure (it was initially added 
+    directly into slurmctld code). To specify topology information,
+    set TopologyType=topology/tree and add configuration information
+    to a new file called topology.conf. See "man topology.conf" or
+    topology.html web page for details.
+ -- Set "/proc/self/oom_adj" for slurmd and slurmstepd daemons based upon
+    the values of SLURMD_OOM_ADJ and SLURMSTEPD_OOM_ADJ environment 
+    variables. This can be used to prevent daemons being killed when
+    a node's memory is exhausted. Based upon patch by Hongjia  Cao, NUDT.
+ -- Fix several bugs in task/affinity: cpuset logic was broken and 
+    --cpus-per-task option not properly handled.
+ -- Ensure slurmctld adopts SlurmUser GID as well as UID on startup.
+
+* Changes in SLURM 1.4.0-pre9
+=============================
+ -- OpenMPI users only: Add srun logic to automatically recreate and 
+    re-launch a job step if the step fails with a reserved port conflict.
+ -- Added TopologyPlugin configuration parameter.
+ -- Added switch topology data structure to slurmctld (for use by select 
+    plugin) add load it based upon new slurm.conf parameters: SwitchName, 
+    Nodes, Switches and LinkSpeed.
+ -- Modify select/linear and select/cons_res plugins to optimize resource
+    allocation with respect to network topology.
+ -- Added  support for new configuration parameter EpilogSlurmctld (executed 
+    by slurmctld daemon).
+ -- Added checkpoint/blcr plugin, SLURM now support job checkpoint/restart 
+    using BLCR. Patch from Hongjia Cao, NUDT, China.
+ -- Made a variety of new environment variables available to PrologSlurmctld
+    and EpilogSlurmctld. See the "Prolog and Epilog Scripts" section of the 
+    slurm.conf man page for details.
+ -- NOTE: Cold-start (without preserving state) required for upgrade from 
+    version 1.4.0-pre8.
+
+* Changes in SLURM 1.4.0-pre8
+=============================
+ -- In order to create a new partition using the scontrol command, use
+    the "create" option rather than "update" (which will only operate
+    upon partitions that already exist).
+ -- Added environment variable SLURM_RESTART_COUNT to batch jobs to
+    indicated the count of job restarts made.
+ -- Added sacctmgr command "show config".
+ -- Added the scancel option --nodelist to cancel any jobs running on a
+    given list of nodes.
+ -- Add partition-specific DefaultTime (default time limit for jobs, 
+    if not specified use MaxTime for the partition. Patch from Par
+    Andersson, National Supercomputer Centre, Sweden.
+ -- Add support for the scontrol command to be able change the Weight
+    associated with nodes. Patch from Krishnakumar Ravi[KK] (HP).
+ -- Add DebugFlag configuration option of "CPU_Bind" for detailed CPU
+    binding information to be logged.
+ -- Fix some significant bugs in task binding logic (possible infinite loops
+    and memory corruption).
+ -- Add new node state flag of NODE_STATE_MAINT indicating the node is in
+    a reservation of type MAINT.
+ -- Modified task/affinity plugin to automatically bind tasks to sockets,
+    cores, or threads as appropriated based upon resource allocation and
+    task count. User can override with srun's --cpu_bind option. 
+ -- Fix bug in backfill logic for select/cons_res plugin, resulted in 
+    error "cons_res:_rm_job_from_res: node_state mis-count".
+ -- Add logic go bind a batch job to the resources allocated to that job.
+ -- Add configuration parameter MpiParams for (future) OpenMPI port 
+    management. Add resv_port_cnt and resv_ports fields to the job step 
+    data structures. Add environment variable SLURM_STEP_RESV_PORTS to
+    show what ports are reserved for a job step.
+ -- Add support for SchedulerParameters=interval=<sec> to control the time
+    interval between executions of the backfill scheduler logic.
+ -- Preserve record of last job ID in use even when doing a cold-start unless
+    there is no job state file or there is a change in its format (which only 
+    happens when there is a change in SLURM's major or minor version number: 
+    v1.3 -> v1.4).
+ -- Added new configuration parameter KillOnBadExit to kill a job step as soon
+    as any task of a job step exits with a non-zero exit code. Patch based
+    on work from Eric Lin, Bull.
+ -- Add spank plugin calls for use by salloc and sbatch command, see 
+    "man spank" for details.
+ -- NOTE: Cold-start (without preserving state) required for upgrade from 
+    version 1.4.0-pre7.
+
+* Changes in SLURM 1.4.0-pre7
+=============================
+ -- Bug fix for preemption with select/cons_res when there are no idle nodes.
+ -- Bug fix for use of srun options --exclusive and --cpus-per-task together
+    for job step resource allocation (tracking of cpus in use was bad).
+ -- Added the srun option --preserve-env to pass the current values of 
+    environment variables SLURM_NNODES and SLURM_NPROCS through to the 
+    executable, rather than computing them from commandline parameters.
+ -- For select/cons_res or sched/gang only: Validate a job's resource 
+    allocation socket and core count on each allocated node. If the node's
+    configuration has been changed, then abort the job.
+ -- For select/cons_res or sched/gang only: Disable updating a node's 
+    processor count if FastSchedule=0. Administrators must set a valid
+    processor count although the memory and disk space configuration can
+    be loaded from the compute node when it starts.
+ -- Add configure option "--disable-iso8601" to disable SLURM use of ISO 8601
+    time format at the time of SLURM build. Default output for all commands
+    is now ISO 8601 (yyyy-mm-ddThh:mm:ss).
+ -- Add support for scontrol to explicity power a node up or down using the
+    configured SuspendProg and ResumeProg programs.
+ -- Fix book select/cons_res logic for tracking the number of allocated
+    CPUs on a node when a partition's Shared value is YES or FORCE.
+ -- Added configure options "--enable-cray-xt" and "--with-apbasil=PATH" for
+    eventual support of Cray-XT systems.
+
+* Changes in SLURM 1.4.0-pre6
+=============================
+ -- Fix job preemption when sched/gang and select/linear are configured with
+    non-sharing partitions.
+ -- In select/cons_res insure that required nodes have available resources.
+
+* Changes in SLURM 1.4.0-pre5
+=============================
+ -- Correction in setting of SLURM_CPU_BIND environment variable.
+ -- Rebuild slurmctld's job select_jobinfo->node_bitmap on restart/reconfigure
+    of the daemon rather than restoring the bitmap since the nodes in a system
+    can change (be added or removed).
+ -- Add configuration option "--with-cpusetdir=PATH" for non-standard 
+    locations.
+ -- Get new multi-core data structures working on BlueGene systems.
+ -- Modify PMI_Get_clique_ranks() to return an array of integers rather 
+    than a char * to satisfy PMI standard. Correct logic in 
+    PMI_Get_clique_size() for when srun --overcommit option is used.
+ -- Fix bug in select/cons_res, allocated a job all of the processors on a 
+    node when the --exclusive option is specified as a job submit option.
+ -- Add NUMA cpu_bind support to the task affinity plugin. Binds tasks to
+    a set of CPUs that belong NUMA locality domain with the appropriate
+    --cpu-bind option (ldoms, rank_ldom, map_ldom, and mask_ldom), see
+    "man srun" for more information.
+
+* Changes in SLURM 1.4.0-pre4
+=============================
+ -- For task/affinity, force jobs to use a particular task binding by setting
+    the TaskPluginParam configuration parameter rather than slurmd's
+    SLURM_ENFORCED_CPU_BIND environment variable.
+ -- Enable full preemption of jobs by partition with select/cons_res 
+    (cons_res_preempt.patch from Chris Holmes, HP).
+ -- Add configuration parameter DebugFlags to provide detailed logging for
+    specific subsystems (steps and triggers so far).
+ -- srun's --no-kill option is passed to slurmctld so that a job step is 
+    killed even if the node where srun executes goes down (unless the 
+    --no-kill option is used, previous termination logic would fail if 
+    srun was not responding).
+ -- Transfer a job step's core bitmap from the slurmctld to the slurmd
+    within the job step credential.
+ -- Add cpu_bind, cpu_bind_type, mem_bind and mem_bind_type to job allocation
+    request and job_details structure in slurmctld. Add support to --cpu_bind
+    and --mem_bind options from salloc and sbatch commands.
+
+* Changes in SLURM 1.4.0-pre3
+=============================
+ -- Internal changes: CPUs per node changed from 32-bit to 16-bit size.
+    Node count fields changed from 16-bit to 32-bit size in some structures.
+ -- Remove select plugin functions select_p_get_extra_jobinfo(),
+    select_p_step_begin() and select_p_step_fini().
+ -- Remove the following slurmctld job structure fields: num_cpu_groups,
+    cpus_per_node, cpu_count_reps, alloc_lps_cnt, alloc_lps, and used_lps.
+    Use equivalent fields in new "select_job" structure, which is filled
+    in by the select plugins.
+ -- Modify mem_per_task in job step request from 16-bit to 32-bit size.
+    Use new "select_job" structure for the job step's memory management.
+ -- Add core_bitmap_job to slurmctld's job step structure to identify
+    which specific cores are allocated to the step.
+ -- Add new configuration option OverTimeLimit to permit jobs to exceed 
+    their (soft) time limit by a configurable amount. Backfill scheduling
+    will be based upon the soft time limit.
+ -- Remove select_g_get_job_cores(). That data is now within the slurmctld's
+    job structure.
+
+* Changes in SLURM 1.4.0-pre2
+=============================
+ -- Remove srun's --ctrl-comm-ifhn-addr option (for PMI/MPICH2). It is no
+    longer needed.
+ -- Modify power save mode so that nodes can be powered off when idle. See
+    https://computing.llnl.gov/linux/slurm/power_save.html or 
+    "man slurm.conf" (SuspendProgram and related parameters) for more 
+    information.
+ -- Added configuration parameter PrologSlurmctld, which can be used to boot
+    nodes into a particular state for each job. See "man slurm.conf" for 
+    details.
+ -- Add configuration parameter CompleteTime to control how long to wait for 
+    a job's completion before allocating already released resources to pending
+    jobs. This can be used to reduce fragmentation of resources. See
+    "man slurm.conf" for details.
+ -- Make default CryptoType=crypto/munge. OpenSSL is now completely optional.
+ -- Make default AuthType=auth/munge rather than auth/none.
+ -- Change output format of "sinfo -R" from "%35R %N" to "%50R %N".
+
+* Changes in SLURM 1.4.0-pre1
+=============================
+ -- Save/restore a job's task_distribution option on slurmctld retart.
+    NOTE: SLURM must be cold-started on converstion from version 1.3.x.
+ -- Remove task_mem from job step credential (only job_mem is used now).
+ -- Remove --task-mem and --job-mem options from salloc, sbatch and srun
+    (use --mem-per-cpu or --mem instead).
+ -- Remove DefMemPerTask from slurm.conf (use DefMemPerCPU or DefMemPerNode
+    instead).
+ -- Modify slurm_step_launch API call. Move launch host from function argument
+    to element in the data structure slurm_step_launch_params_t, which is
+    used as a function argument.
+ -- Add state_reason_string to job state with optional details about why
+    a job is pending.
+ -- Make "scontrol show node" output match scontrol input for some fields
+    ("Cores" changed to "CoresPerSocket", etc.).
+ -- Add support for a new node state "FUTURE" in slurm.conf. These node records
+    are created in SLURM tables for future use without a reboot of the SLURM
+    daemons, but are not reported by any SLURM commands or APIs.
+
+* Changes in SLURM 1.3.17
+=========================
+ -- Fix bug in configure script that can clear user specified LIBS.
+
 * Changes in SLURM 1.3.16
 =========================
+ -- Fix memory leak in forward logic of tree message passing.
+ -- Fix job exit code recorded for srun job allocation.
+ -- Bluegene - Bug fix for too many parameters being passed to a debug statement
+ -- Bluegene - Bug fix for systems running more than 8 in the X dim running
+    Dynamic mode.
 
 * Changes in SLURM 1.3.15
 =========================
@@ -24,8 +438,6 @@ documents those changes that are of interest to users and admins.
  -- Fix bug in logic to remove a job's dependency, could result in abort.
  -- Add new error message to sched/wiki and sched/wiki2 (Maui and Moab) for
     STARTJOB request: "TASKLIST includes non-responsive nodes".
- -- Fix bug in task layout for heterogeneous nodes and srun --exclusive
-    option.
  -- Fix bug in select/linear when used with sched/gang that can result in a 
     job's required or excluded node specification being ignored.
  -- Add logic to handle message connect timeouts (timed-out.patch from 
@@ -3754,4 +4166,4 @@ documents those changes that are of interest to users and admins.
  -- Change directory to /tmp in slurmd if daemonizing.
  -- Logfiles are reopened on reconfigure.
  
-$Id: NEWS 17225 2009-04-10 19:25:52Z da $
+$Id: NEWS 17869 2009-06-16 22:36:02Z jette $
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index f9a138723996ece0ed80897e3eaaed204c36aecd..4e92c65a67a04999d76c97b87566d817d4a06455 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -1,259 +1,154 @@
-RELEASE NOTES FOR SLURM VERSION 1.3
-27 June 2008
+RELEASE NOTES FOR SLURM VERSION 2.0
+11 February 2009 (after SLURM 1.4.0-pre8 released)
 
 
 IMPORTANT NOTE:
-SLURM state files in version 1.3 are different from those of version 1.2.
-After installing SLURM version 1.2, plan to restart without preserving 
-jobs or other state information. While SLURM version 1.2 is still running, 
+SLURM state files in version 2.0 are different from those of version 1.3.
+After installing SLURM version 2.0, plan to restart without preserving 
+jobs or other state information. While SLURM version 1.3 is still running, 
 cancel all pending and running jobs (e.g.
 "scancel --state=pending; scancel --state=running"). Then stop and restart 
 daemons with the "-c" option or use "/etc/init.d/slurm startclean".
 
+If using the slurmdbd (SLURM DataBase Daemon) you must update this first.  
+The 2.0 slurmdbd will work with SLURM daemons at version 1.3.7 and above.  
+You will not need to update all clusters at the same time, but it is very 
+important to update slurmdbd first and having it running before updating 
+any other clusters making use of it.  No real harm will come from updating 
+your systems before the slurmdbd, but they will not talk to each other 
+until you do.
+
 There are substantial changes in the slurm.conf configuration file. It 
 is recommended that you rebuild your configuration file using the tool
-doc/html/configurator.html that comes with the distribution. The node 
-information is unchanged and the partition information only changes for 
-the Shared and Priority parameters, so those portions of your old 
-slurml.conf file may be copied into the new file.
-
-Two areas of substantial change are accounting and job scheduling.
-Slurm is now able to save accounting information in a database, 
-either MySQL or PostGreSQL. We have written a new daemon, slurmdbd 
-(Slurm DataBase Daemon), to serve as a centralized data manager for 
-multiple Slurm clusters. A new tool sacctmgr is available to manage
-user accounting information through SlurmdDBD and a variety of 
-other tools are still under development to generate assorted 
-acccounting reports including graphics and a web interface. Slurm 
-now supports gang scheduling (time-slicing of parallel jobs for 
-improved responsiveness and system utilization). Many related 
-scheduling changes have also been made. 
-
-There are changes in SLURM's RPMs. "slurm-auth-munge" was changed to 
-"slurm-munge" since it now contains the authentication and cryptographic 
-signature plugins for Munge. The SLURM plugins have been moved out of 
-the main "slurm" RPM to a new RPM called "slurm-plugins". There is a 
-new RPM called "slurm-slurmdbd" (SLURM DataBase Daemon). Slurmdbd is 
-used to provide a secure SLURM database interface for accounting purposes 
-(more information about that below). The "slurm-slurmdbd" RPM requires 
-the "slurm-plugins" RPM, but none of the other SLURM RPMs. The main 
-"slurm" RPM also requires the "slurm-plugins" RPM.
-
-To archive accounting records in a database then database RPMs must be 
-installed where the SLURM RPMs are build and where the database is used. 
-You have a choise of database, either "mysql" plus "mysql-devel" or 
-"postgres" plus "postgres-devel" RPMs.
-
-Many enhancements have been made for better Slurm integration with 
-Moab and Maui schedulers. Moab version 5.2.3 or higher should be 
-used with SLURM version 1.3. In the Moab configuration file, moab.cfg,
-change the SUBMITCMD option from "srun --batch" to "sbatch" since the 
-"srun --batch" option is no longer valid (use of full pathnames to 
-the commands are recommended, e.g. "/usr/local/bin/sbatch").
-
-Major changes in Slurm version 1.3 are described below. Some changes
-made after the initial release of Slurm version 1.2 are also noted.
-Many less significant changes are not identified here. A complete list 
-of changes can be found in the NEWS file. Man pages should be consulted
-for more details about command and configuration parameter changes.
-
-
-COMMAND CHANGES
-===============
-* The srun options --allocate, --attach and --batch have been removed.
-  Use the new commands added in SLURM version 1.2 for this functionality:
-  salloc  - Create a job allocation (functions like "srun --allocate")
-  sattach - Attach to an existing job step (functions like "srun --attach")
-  sbatch  - Submit a batch job script (functions like "srun --batch")
-  These commands generally have the same options as the srun command.
-  See the individual man pages for more information. 
-
-* The slaunch command has been removed. Use the srun command instead.
-
-* The srun option --exclusive has been added for job steps to be 
-  allocated processors not already assigned to other job steps. This 
-  can be used to execute multiple job steps simultaneously within a 
-  job allocation and have SLURM perform resource management for the 
-  job steps much like it does for jobs. If dedicated resources are 
-  not immediately available, the job step will be executed later 
-  unless the --immediate option is also set.
-
-* Support is now provided for feature counts in job constraints. For 
-  example: srun --nodes=16 --constraint=graphics*4 ...
-
-* The srun option --pty has been added to start the job with a pseudo 
-  terminal attached to task zero (all other tasks have I/O discarded).
-
-* Job time limits can be specified using the following formats: min, 
-  min:sec, hour:min:sec, and days-hour:min:sec (formerly only supported 
-  minutes).
-
-* scontrol now shows job TimeLimit and partition MaxTime in the format of
-  [days-]hours:minutes:seconds or "UNLIMITED". The scontrol update options 
-  for times now accept minutes, minutes:seconds, hours:minutes:seconds, 
-  days-hours, days-hours:minutes, days-hours:minutes:seconds or "UNLIMITED".
-  This new format also applies to partition MaxTime in the slurm.conf file.
-
-* scontrol now shows job required nodes using the format of 
-  ReqNodes=<min>[-<max>] rather than MinNodes=<min> (and no maximum
-  node count reported).
-
-* scontrol "notify" command added to send message to stdout of srun for 
-  specified job id. 
-
-* Support has been added for a much richer job dependency specification 
-  including testing of exit codes and multiple dependencies.
-
-* The srun options --checkpoint=<interval> and --checkpoint-path=<file_path>
-  have been added.
-
-* Event trigger support was added in Slurm v1.2.2. The command strigger
-  was added to manage the triggers.
-
-* Added a --task-mem option and removed --job-mem option from srun, salloc, 
-  and sbatch commands. Memory limits are applied on a per-task basis.
-
-
-SCHEDULING CHANGES
-==================
-* The sched/backfill plugin has been largely re-written. It now supports 
-  select/cons_res and all job options (required nodes, excluded nodes, 
-  contiguous, etc.).
-
-* Added a new partition parameter, Priority. A job's scheduling priority is 
-  based upon two factors. First the priority of its partition and second the 
-  job's priority. Since nodes can be configured in multiple partitions, this 
-  can be used to configure high priority partitions (queues).
-
-* The partition parameter Shared now has a job count. For example:
-  Shared=YES:4     (Up to 4 jobs may share each resource, user control)
-  Shared=FORCE:2   (Up to 2 jobs may share each resource, no user control)
-
-* Added new parameters DefMemPerTask and MaxMemPerTask to control the default
-  and maximum memory per task. Any task that exceeds the specified size will 
-  be terminated (enforcement requires job accounting to be enabled with a 
-  non-zero value for JoabAcctGatherFrequency).
-
-* The select linear plugin (allocating whole nodes to jobs) can treat memory 
-  as a consumable resource with SelectTypeParameter=CR_Memory configured.
-
-* A new scheduler type, gang, was added for gang scheduling (time-slicing of 
-  parallel jobs). Note: The Slurm gang scheduler is not compatible with the
-  LSF, Maui or Moab schedulers.
-
-* The new parameter, SchedulerTimeSlice, controls the length of gang scheduler 
-  time slices.
-
-* Added a new parameter, Licenses to support cluster-wide consumable 
-  resources. The --licenses option was also added to salloc, sbatch, 
-  and srun.
-
-* The Shared=exclusive option in conjunction with SelectType=select/cons_res
-  can be used to dedicate whole nodes to jobs in specific partitions while
-  allocating sockets, cores, or hyperthreads in other partitions.
-
-* Changes in the interface with the Moab and Maui scheduler have been 
-  extensive providing far better integration between the systems.
-  * Many more parameters are shared between the systems.
-  * A new wiki.conf parameter, ExcludePartitions, can be used to enable 
-    Slurm-based scheduling of jobs in specific partitions to achieve
-    better responsiveness while losing Moab or Maui policy controls.
-  * Another new wiki.conf parameter, HidePartitionJobs, can be used to 
-    to hide jobs in specific partitions from Moab or Maui as well. See
-    the wiki.conf man pages for details. 
-  * Moab relies upon Slurm to get a user's environment variables upon 
-    job submission. If this can not be accomplished within a few seconds 
-    (see the GetEnvTimeout parameter) then cache files can be used. Use
-    contribs/env_cache_builder.c to build these cache files.
-
+doc/html/configurator.html that comes with the distribution.
+
+SLURM can continue to be used as a simple resource manager, but optional
+plugins support sophisticated scheduling algorithms. These plugins do require 
+the use of a database containing user and bank account information, so 
+more administration work is required. SLURM's modular design lets you 
+control the functionality that you want it to provide.
+
+HIGHLIGHTS
+* Sophisticated scheduling algorithms are available in a new plugin. Jobs
+  can be prioritized based upon their age, size and/or fair-share resource 
+  allocation using hierarchical bank accounts. For more information see:
+  https://computing.llnl.gov/linux/slurm/job_priority.html
+* An assortment of resource limits can be imposed upon individual users 
+  and/or hierarchical bank accounts such as maximum job time limit, maximum 
+  job size and maximum number of running jobs. For more information see:
+  https://computing.llnl.gov/linux/slurm/resource_limits.html
+* Advanced reservations can be made to insure resources will be available when
+  needed. For more information see:
+  https://computing.llnl.gov/linux/slurm/reservations.html
+* Idle nodes can now be completely powered down when idle and automatically
+  restarted when there is work available. For more information see:
+  https://computing.llnl.gov/linux/slurm/power_save.html
+* SLURM has been modified to allocate specific cores to jobs and job steps in
+  the centralized scheduler rather than the daemons running on the individual
+  compute nodes. This permits effective preemption or gang schedule jobs.
+* New configuration parameters, PrologSlurmctld and EpilogSlurmctld, can be 
+  used to support the booting of different operating systems for each job. 
+  See "man slurm.conf" for details. 
+* Preemption of jobs from lower priority partitions in order to execute jobs
+  in higher priority partitions is now supported. The jobs from the lower 
+  priority partition will resume once preempting job completes. For more 
+  information see:
+  https://computing.llnl.gov/linux/slurm/preempt.html
+* Added support for optimized resource allocation with respect to network
+  topology. Requires switch configuration information be added to slurm.conf.
+* Support added for Sun Constellation system with optimized resource allocation
+  for a 3-dimensional torus interconnect. For more information see:
+  https://computing.llnl.gov/linux/slurm/sun_const.html
+* Support added for IBM BlueGene/P systems, including High Throughput Computing
+  (HTC) mode.
+* Support for checkpoint/restart using BLCR added using the checkpoint/blcr
+  plugin. For more information see:
+  https://computing.llnl.gov/linux/slurm/checkpoint_blcr.html
+  https://ftg.lbl.gov/CheckpointRestart/CheckpointRestart.shtml
+
+CONFIGURATION FILE CHANGES (see "man slurm.conf" for details)
+* The default AuthType is now "auth/munge" rather than "auth/none".
+* The default CryptoType is now "crypto/munge". OpenSSL is no longer required
+  by SLURM in the default configuration.
+* DefaultTime has been added to specify a default job time limit in the 
+  partition. If not set, uses the partition's MaxTime.
+* PrologSlurmctld has been added and can be used to boot nodes into a 
+  particular state for each job.
+* DefMemPerTask has been removed. Use DefMemPerCPU or DefMemPerNode instead.
+* KillOnBadExit added to immediately terminate a job step whenever any tasks
+  terminates with a non-zero exit code.
+* Added new node state of "FUTURE". These node records are created in SLURM
+  tables for future use without a reboot of the SLURM daemons, but are not
+  reported by any SLURM commands or APIs.
+* BatchStartTime has been added to control how long to wait for a batch job
+  to start (complete Prolog, load environment for Moab, etc.).
+* CompleteTime has been added to control how long to wait for a job's 
+  completion before allocating already released resources to pending jobs.
+* OverTimeLimit added to permit jobs to exceed their (soft) time limit by a
+  configurable amount. Backfill scheduling will be based upon the soft time
+  limit.
+* For select/cons_res or sched/gang only: Each nodes processor count must be
+  specified in the configuration file. Additional resources found by SLURM
+  daemons on the compute nodes will not be used.
+* DebugFlags added to provide detailed logging for specific subsystems.
+* Added job priority plugin.  Default for PriorityType is "priority/basic" 
+  which is the same logic SLURM has today (job priorities are assigned at
+  submit time with decreasing value).  "priority/multifactor" is a new plugin 
+  which utilizes logic to set a priority on a job based on many different 
+  configuration parameters as described here:  
+  https://computing.llnl.gov/linux/slurm/job_priority.html
+* The task/affinity plugin will automatically bind a job step to the CPUs
+  it has been allocated. The entity bound to (sockets, cores or threads)
+  will be automatically set based upon the allocation size and task count
+  SLURM's SPANK cpuset plugin is no longer be needed.
+* Resource allocations can now be optimized according to network topology.
+  The following switch topology configuration options have been added: 
+  TopologyPlugin and in a new topology.conf file: SwitchName, Nodes, 
+  Switches. More information is available in man pages for slurm.conf, 
+  topology.conf, and https://computing.llnl.gov/linux/slurm/topology.html
+* SrunIOTimeout has been added to optionally ping srun's tasks for better 
+  fault tolerance (e.g. killed and restarteed SLURM daemons on compute node).
+* ResumeDelay added to control how much time after a node has been suspended
+  before resume it (e.g. powering it back up).
+* BLUEGENE - Added option DenyPassthrough in the bluegene.conf.  Can be set
+  to any combination of X,Y,Z to not allow passthroughs when running in 
+  dynamic layout mode. (see "man bluegene.conf" for details)
+
+COMMAND CHANGES (see man pages for details)
+* --task-mem and --job-mem options have been removed from salloc, sbatch and
+  srun. Use --mem-per-cpu or --mem instead.
+* Added the srun option --preserve-env to pass the current values of 
+  environment variables SLURM_NNODES and SLURM_NPROCS through to the 
+  executable, rather than computing them from commandline parameters.
+* --ctrl-comm-ifhn-addr option has been removed from the srun command (it is 
+  no longer useful).
+* Batch jobs have an environment variable SLURM_RESTART_COUNT set when 
+  restarted.
+* To create a partition using the scontrol command, use the "create" command
+  rather than "update" with a new partition name.
+* Time format of all SLURM command set to ISO 8601 (yyyy-mm-ddThh:mm:ss)
+  unless the configure option "--disable-iso8601" is used at build time.
+* sacct -S to status a job will no longer work.  Use sstat from now on.
+* sacct --nodes option can be used to filter jobs by allocated node.
+* sacct default starttime is midnight of the previous day rather than the
+  start of the database.
+* sacct and sstat have been rewritten to have a more sacctmgr like feel
+* Added the sprio command to view the factors that comprise a job's scheduling
+  priority - works only with the priority/multifactor plugin.
 
 ACCOUNTING CHANGES
-==================
-* The job accounting plugin has been split into two components: gathering
-  of data and storing the data. The JobAcctType parameter has been replaced by
-  JobAcctGatherType (AIX or Linux) and AccountingStorageType (MySQL, PostGreSQL,
-  filetext, and SlurmDBD). Storing the accounting information into a database
-  will provide you with greater flexibility in managing the data.
-
-* A new daemon SlurmDBD (Slurm DataBase Daemon) has been added. This can 
-  be used to securely manage the accounting data for several Slurm clusters
-  in a central location. Several new parameters have been added to support
-  SlurmDBD, all starting with SlurmDBD. Note that the SlurmDBD daemon is 
-  designed to use a Slurm JobAcctStorageType plugin to use MySQL now. 
-  It also uses existing Slurm authentication plugins.
-
-* A new command, sacctmgr, is available for managing user accounts in
-  SlurmDBD has been added. This information is required for use of SlurmDBD
-  to manage job accounting data. Information is maintained based upon 
-  an "association", which has four components: cluster name, Slurm partition, 
-  user name and bank account. This tool can also be used to maintain 
-  scheduling policy information that can be uploaded to Moab (various 
-  resource limits and fair-share values) See the sacctmgr man page and 
-  accounting web page for more information. Additional tools to generate 
-  accounting reports are currently under development and will be released 
-  soon.
-
-* A new command, sreport, is available for generating accounting reports.
-  While the sacct command can be used to generate information about 
-  individual jobs, sreport can combine this data to report utilization 
-  information by cluster, bank account, user, etc. 
-
-* Job completion records can now be written to a MySQL or PostGreSQL
-  database in addition to a test file as controlled using the JobCompType
-  parameter.
-
-
-OTHER CONFIGURATION CHANGES
-===========================
-* A new parameter, JobRequeue, to control default job behavior after a node 
-  failure (requeue or kill the job). The sbatch--requeue option can be used to
-  override the system default.
-
-* Added new parameters HealthCheckInterval and HealthCheckProgram to 
-  automatically test the health of compute nodes.
-
-* New parameters UnkillableStepProgram and UnkillableStepTimeout offer
-  better control when user processes can not be killed. For example
-  nodes can be automatically rebooted (added in Slurm v1.2.12)
-
-* A new parameter, JobFileAppend, controls how to proceed when a job's
-  output or error file already exist (truncate the file or append to it, 
-  added in slurm v1.2.13). Users can override this using the --open-mode
-  option when submitting a job.
-
-* A new parameter, EnforcePartLimits, was dded. If set then immediately 
-  reject a job that exceeds a partition's size and/or time limits rather
-  then queued for a later change in the partition's limits. NOTE: Not 
-  reported by "scontrol show config" to avoid changing RPCs. It will be 
-  reported in SLURM version 1.4.
-
-* Checkpoint plugins have been added for XLCH and OpenMPI.
-
-* A new parameter, PrivateData, can be used to prevent users from being 
-  able to view jobs or job steps belonging to other users.
-
-* A new parameter CryptoType to specify digital signature plugin to be used
-  Options are crypto/openssl (default) or crypto/munge (for a GPL license).
-
-* Several Slurm MPI plugins were added to support srun launch of MPI tasks
-  including mpich1_p4 (Slurm v1.2.10) and mpich-mx (Slurm v1.2.11). 
-
-* Cpuset logic was added to the task/affinity plugin in Slurm v1.2.3. 
-  Set TaskPluginParam=cpusets to enable.
-
+* Added ability for slurmdbd to archive and purge step and/or job records.
+* Added support for Workload Characterization Key (WCKey) in accounting 
+  records. This is an optional string that can be used to identify the type of
+  work being performed (in addition to user ID, account name, job name, etc.).
+* Added configuration parameter AccountingStorageBackupHost for fault-tolerance
+  in communications to SlurmDBD.
 
 OTHER CHANGES
-=============
-* Perl APIs and Torque wrappers for Torque/PBS to SLURM migration were 
-  added in Slurm v1.2.13 in the contribs directory. SLURM now works 
-  directly with Globus using the PBS GRAM.
-
-* Support was added for several additional PMI functions to be used by 
-  MPICH2 and MVAPICH2. Support for an PMI_TIME environment variable was
-  also added for user to control how PMI communications are spread out 
-  in time. Scalability up to 16k tasks has been achieved. 
-
-* New node state FAILING has been added along with event trigger for it.
-  This is similar to DRAINING, but is intended for fault prediction work.
-  A trigger was also added for nodes becoming DRAINED.
-
+* Modify PMI_Get_clique_ranks() to return an array of integers rather
+  than a char * to satisfy PMI standard. Correct logic in
+  PMI_Get_clique_size() for when srun --overcommit option is used.
+* Set "/proc/self/oom_adj" for slurmd and slurmstepd daemons based upon
+  the values of SLURMD_OOM_ADJ and SLURMSTEPD_OOM_ADJ environment
+  variables. This can be used to prevent daemons being killed when
+  a node's memory is exhausted.
diff --git a/RELEASE_NOTES_LLNL b/RELEASE_NOTES_LLNL
new file mode 100644
index 0000000000000000000000000000000000000000..b0db71544dfbe86138a8fc82274cfbf3a0db9756
--- /dev/null
+++ b/RELEASE_NOTES_LLNL
@@ -0,0 +1,36 @@
+LLNL-SPECIFIC RELEASE NOTES FOR SLURM VERSION 2.0
+19 February 2009
+
+For processor-scheduled clusters (*not* allocating whole nodes to jobs):
+Set "DefMemPerCPU" and "MaxMemPerCPU" as appropriate to restrict memory 
+available to a job. Also set "JobAcctGatherType=jobacct_gather/linux"
+for enforcement (periodic sampling of memory use by the job).  You can change 
+said sampling rate from the default (every 30 seconds) by setting the 
+"JobAcctGatherFrequency" option to a different number of seconds in 
+the slurm.conf.
+
+For InfiniBand switch systems, set TopologyType=topology/tree in slurm.conf
+and add switch topology information to a new file called topology.conf. 
+Options used are SwitchName, Switches, and Nodes. The SwitchName is any 
+convenient name for bookkeeping purposes only. For example:
+# Switch Topology Information
+SwitchName=s0 Nodes=tux[0-11]
+SwitchName=s1 Nodes=tux[12-23]
+SwitchName=s2 Nodes=tux[24-35]
+SwitchName=s3 Switches=s[0-2]
+
+Remove the "preserve-env.so" SPANK plugin. The functionality is now
+directly in SLURM.
+
+SLURM version 2.0 must use a database daemon (slurmdbd) at version 2.0
+or higher. While we are testing version 2.0, set "AccountingStoragePort=????".
+Once we upgrade the production slurmdbd to version 2.0, this change will
+not be required.  You can likewise test 1.3.7+ clusters with the same port 
+since 2.0 slurmdbd will talk to 1.3.7+ SLURM.
+
+SLURM state files in version 2.0 are different from those of version 1.3.
+After installing SLURM version 2.0, plan to restart without preserving 
+jobs or other state information. While SLURM version 1.3 is still running, 
+cancel all pending and running jobs (e.g.
+"scancel --state=pending; scancel --state=running"). Then stop and restart 
+daemons with the "-c" option or use "/etc/init.d/slurm startclean".
diff --git a/aclocal.m4 b/aclocal.m4
index 9903e9bc346dc5fa86e5fbc032d6ef6daf6b472c..0cd78b46f445a164a4cd5e511577dbffcb415cfe 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -7552,14 +7552,18 @@ m4_include([auxdir/slurm.m4])
 m4_include([auxdir/x_ac__system_configuration.m4])
 m4_include([auxdir/x_ac_affinity.m4])
 m4_include([auxdir/x_ac_aix.m4])
+m4_include([auxdir/x_ac_blcr.m4])
 m4_include([auxdir/x_ac_bluegene.m4])
 m4_include([auxdir/x_ac_cflags.m4])
+m4_include([auxdir/x_ac_cray.m4])
 m4_include([auxdir/x_ac_databases.m4])
 m4_include([auxdir/x_ac_debug.m4])
 m4_include([auxdir/x_ac_elan.m4])
+m4_include([auxdir/x_ac_env.m4])
 m4_include([auxdir/x_ac_federation.m4])
 m4_include([auxdir/x_ac_gpl_licensed.m4])
 m4_include([auxdir/x_ac_gtk.m4])
+m4_include([auxdir/x_ac_iso.m4])
 m4_include([auxdir/x_ac_munge.m4])
 m4_include([auxdir/x_ac_ncurses.m4])
 m4_include([auxdir/x_ac_pam.m4])
diff --git a/auxdir/Makefile.am b/auxdir/Makefile.am
index 84d784b5eeb0c30c8d1b7a2202dfee293e13615c..62f523a4955adf9670a484d442e72434cf016ca1 100644
--- a/auxdir/Makefile.am
+++ b/auxdir/Makefile.am
@@ -1,5 +1,5 @@
 ##****************************************************************************
-## $Id: Makefile.am 16088 2008-12-29 21:56:17Z jette $
+## $Id: Makefile.am 16867 2009-03-12 16:35:42Z jette $
 ##****************************************************************************
 ## Process this file with automake to produce Makefile.in.
 ##****************************************************************************
@@ -13,15 +13,20 @@ EXTRA_DIST = \
     x_ac_aix.m4 \
     x_ac_bluegene.m4 \
     x_ac_cflags.m4 \
+    x_ac_cray.m4 \
     x_ac_debug.m4 \
     x_ac_elan.m4 \
+    x_ac_env.m4 \
     x_ac_federation.m4 \
     x_ac_gpl_licensed.m4 \
+    x_ac_iso.m4 \
     x_ac_pam.m4 \
     x_ac_munge.m4 \
     x_ac_ncurses.m4 \
+    x_ac_pam.m4 \
     x_ac_ptrace.m4 \
     x_ac_readline.m4 \
     x_ac_setproctitle.m4 \
     x_ac_slurm_ssl.m4 \
-    x_ac_sun_const.m4 
+    x_ac_sun_const.m4 \
+    x_ac_blcr.m4
diff --git a/auxdir/Makefile.in b/auxdir/Makefile.in
index 500ec8fc17879b31401202b50335300d264aef22..cea696fb26a5ea7bfea58822836519c1fe0ccb62 100644
--- a/auxdir/Makefile.in
+++ b/auxdir/Makefile.in
@@ -41,14 +41,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -78,6 +82,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -246,18 +254,23 @@ EXTRA_DIST = \
     x_ac_aix.m4 \
     x_ac_bluegene.m4 \
     x_ac_cflags.m4 \
+    x_ac_cray.m4 \
     x_ac_debug.m4 \
     x_ac_elan.m4 \
+    x_ac_env.m4 \
     x_ac_federation.m4 \
     x_ac_gpl_licensed.m4 \
+    x_ac_iso.m4 \
     x_ac_pam.m4 \
     x_ac_munge.m4 \
     x_ac_ncurses.m4 \
+    x_ac_pam.m4 \
     x_ac_ptrace.m4 \
     x_ac_readline.m4 \
     x_ac_setproctitle.m4 \
     x_ac_slurm_ssl.m4 \
-    x_ac_sun_const.m4 
+    x_ac_sun_const.m4 \
+    x_ac_blcr.m4
 
 all: all-am
 
diff --git a/auxdir/x_ac_affinity.m4 b/auxdir/x_ac_affinity.m4
index ad2725bf924b8f24b8df66c26794ee07fb4b9970..83f78638b9a770998650c3f3fd3f005c8bb5c484 100644
--- a/auxdir/x_ac_affinity.m4
+++ b/auxdir/x_ac_affinity.m4
@@ -61,16 +61,19 @@ AC_DEFUN([X_AC_AFFINITY], [
   fi
 
 #
-# Test for cpusets
-#
-  if test -d "/dev/cpuset" ; then
-     have_sched_setaffinity=yes
-  fi
-
-#
-# Test for other affinity functions as appropriate
-# TBD
+# Test for cpuset directory
 #
+  cpuset_default_dir="/dev/cpuset"
+  AC_ARG_WITH([cpusetdir],
+              AS_HELP_STRING(--with-cpusetdir=PATH,specify path to cpuset directory default is /dev/cpuset),
+              [try_path=$withval])
+  for cpuset_dir in $try_path "" $cpuset_default_dir; do
+    if test -d "$cpuset_dir" ; then
+      AC_DEFINE_UNQUOTED(CPUSET_DIR, "$cpuset_dir", [Define location of cpuset directory])
+      have_sched_setaffinity=yes
+      break
+    fi
+  done
 
 #
 # Set HAVE_SCHED_SETAFFINITY if any task affinity supported
diff --git a/auxdir/x_ac_aix.m4 b/auxdir/x_ac_aix.m4
index 9aa4c892f929609cdaa98c9335541f91d229a7d3..9b105d30b45a3fb74605c51a49522d12ea230641 100644
--- a/auxdir/x_ac_aix.m4
+++ b/auxdir/x_ac_aix.m4
@@ -1,5 +1,5 @@
 ##*****************************************************************************
-## $Id: x_ac_aix.m4 11741 2007-06-20 18:42:19Z da $
+## $Id: x_ac_aix.m4 17515 2009-05-15 19:50:47Z da $
 ##*****************************************************************************
 #  AUTHOR:
 #    Morris Jette <jette@llnl.gov>
@@ -48,9 +48,9 @@ AC_DEFUN([X_AC_AIX],
          [ PROCTRACKDIR="$withval" ]
       )
       if test -f "$PROCTRACKDIR/lib/proctrackext.exp"; then
-         CPPFLAGS="-I$PROCTRACKDIR/include $CPPFLAGS"
          PROCTRACKDIR="$PROCTRACKDIR/lib"
          AC_SUBST(PROCTRACKDIR)
+         CPPFLAGS="-I$PROCTRACKDIR/include $CPPFLAGS"
          AC_CHECK_HEADERS(proctrack.h)
          ac_have_aix_proctrack="yes"
       elif test -f "$prefix/lib/proctrackext.exp"; then
diff --git a/auxdir/x_ac_blcr.m4 b/auxdir/x_ac_blcr.m4
new file mode 100644
index 0000000000000000000000000000000000000000..03e78c3d06f59933795e8516351dd89047a2c229
--- /dev/null
+++ b/auxdir/x_ac_blcr.m4
@@ -0,0 +1,69 @@
+##*****************************************************************************
+## $Id: x_ac_blcr.m4 0001 2009-01-10 16:06:05Z hjcao $
+##*****************************************************************************
+#  AUTHOR:
+#    Copied from x_ac_munge.
+#    
+#
+#  SYNOPSIS:
+#    X_AC_BLCR()
+#
+#  DESCRIPTION:
+#    Check the usual suspects for an BLCR installation,
+#    updating CPPFLAGS and LDFLAGS as necessary.
+#
+#  WARNINGS:
+#    This macro must be placed after AC_PROG_CC and before AC_PROG_LIBTOOL.
+##*****************************************************************************
+
+AC_DEFUN([X_AC_BLCR], [
+
+  _x_ac_blcr_dirs="/usr /usr/local /opt/freeware /opt/blcr"
+  _x_ac_blcr_libs="lib64 lib"
+
+  AC_ARG_WITH(
+    [blcr],
+    AS_HELP_STRING(--with-blcr=PATH,Specify path to BLCR installation),
+    [_x_ac_blcr_dirs="$withval $_x_ac_blcr_dirs"])
+
+  AC_CACHE_CHECK(
+    [for blcr installation],
+    [x_ac_cv_blcr_dir],
+    [
+      for d in $_x_ac_blcr_dirs; do
+        test -d "$d" || continue
+        test -d "$d/include" || continue
+        test -f "$d/include/libcr.h" || continue
+	for bit in $_x_ac_blcr_libs; do
+          test -d "$d/$bit" || continue
+        
+ 	  _x_ac_blcr_libs_save="$LIBS"
+          LIBS="-L$d/$bit -lcr $LIBS"
+          AC_LINK_IFELSE(
+            AC_LANG_CALL([], cr_init),
+            AS_VAR_SET(x_ac_cv_blcr_dir, $d))
+          LIBS="$_x_ac_blcr_libs_save"
+          test -n "$x_ac_cv_blcr_dir" && break
+	done
+        test -n "$x_ac_cv_blcr_dir" && break
+      done
+    ])
+
+  if test -z "$x_ac_cv_blcr_dir"; then
+    AC_MSG_WARN([unable to locate blcr installation])
+  else
+    BLCR_HOME="$x_ac_cv_blcr_dir"
+    BLCR_LIBS="-lcr"
+    BLCR_CPPFLAGS="-I$x_ac_cv_blcr_dir/include"
+    BLCR_LDFLAGS="-L$x_ac_cv_blcr_dir/$bit"
+  fi
+
+  AC_DEFINE_UNQUOTED(BLCR_HOME, "$x_ac_cv_blcr_dir", [Define BLCR installation home])
+  AC_SUBST(BLCR_HOME)
+
+  AC_SUBST(BLCR_LIBS)
+  AC_SUBST(BLCR_CPPFLAGS)
+  AC_SUBST(BLCR_LDFLAGS)
+
+  AM_CONDITIONAL(WITH_BLCR, test -n "$x_ac_cv_blcr_dir")
+])
diff --git a/auxdir/x_ac_bluegene.m4 b/auxdir/x_ac_bluegene.m4
index 1d24f5922b63273f84d0c026df68e9e4147b324f..cd944d23685503c9efe11c8d42dc299be41f14e2 100644
--- a/auxdir/x_ac_bluegene.m4
+++ b/auxdir/x_ac_bluegene.m4
@@ -1,5 +1,5 @@
 ##*****************************************************************************
-## $Id: x_ac_bluegene.m4 16697 2009-02-26 19:49:53Z da $
+## $Id: x_ac_bluegene.m4 16699 2009-02-26 19:56:21Z da $
 ##*****************************************************************************
 #  AUTHOR:
 #    Morris Jette <jette1@llnl.gov>
diff --git a/auxdir/x_ac_cray.m4 b/auxdir/x_ac_cray.m4
new file mode 100644
index 0000000000000000000000000000000000000000..a4c706b13ddba0c2fff4e1256ffa023362a20101
--- /dev/null
+++ b/auxdir/x_ac_cray.m4
@@ -0,0 +1,50 @@
+##*****************************************************************************
+#  AUTHOR:
+#    Morris Jette <jette1@llnl.gov>
+#
+#  SYNOPSIS:
+#    X_AC_CRAY
+#
+#  DESCRIPTION:
+#    Test for Cray systems including XT with 3-D interconect
+#    Also test for the apbasil client (Cray's Batch Application Scheduler 
+#    Interface Layer interface)
+##*****************************************************************************
+
+AC_DEFUN([X_AC_CRAY], [
+  AC_MSG_CHECKING([for Cray XT])
+  AC_ARG_ENABLE(
+    [cray-xt],
+    AS_HELP_STRING(--enable-cray-xt,enable Cray XT system support),
+    [ case "$enableval" in
+        yes) x_ac_cray_xt=yes ;;
+         no) x_ac_cray_xt=no ;;
+          *) AC_MSG_RESULT([doh!])
+             AC_MSG_ERROR([bad value "$enableval" for --enable-cray-xt]) ;;
+      esac
+    ],
+    [x_ac_cray_xt=no]
+  )
+
+  if test "$x_ac_cray_xt" = yes; then
+    AC_MSG_RESULT([yes])
+    AC_DEFINE(HAVE_3D, 1, [Define to 1 if 3-dimensional architecture])
+    AC_DEFINE(HAVE_CRAY,1,[Define if Cray system])
+    AC_DEFINE(HAVE_CRAY_XT,1,[Define if Cray XT system])
+    AC_DEFINE(HAVE_FRONT_END, 1, [Define to 1 if running slurmd on front-end only])
+  else
+    AC_MSG_RESULT([no])
+  fi
+
+  AC_ARG_WITH(apbasil, AS_HELP_STRING(--with-apbasil=PATH,Specify path to apbasil command), [ try_apbasil=$withval ])
+  apbasil_default_locs="/usr/apbasil"
+  for apbasil_loc in $try_apbasil "" $apbasil_default_locs; do
+    if test -z "$have_apbasil" -a -x "$apbasil_loc" ; then
+      have_apbasil=$apbasil_loc
+    fi
+  done
+  if test ! -z "$have_apbasil" ; then
+    AC_DEFINE_UNQUOTED(APBASIL_LOC, "$have_apbasil", [Define the apbasil command location])
+  fi
+])
+
diff --git a/auxdir/x_ac_databases.m4 b/auxdir/x_ac_databases.m4
index 01a3d089e9689b0afedc9e5c036046a988b6cbc2..8d4654f0ac0278a53b2d8de6f44e2d83db6df795 100644
--- a/auxdir/x_ac_databases.m4
+++ b/auxdir/x_ac_databases.m4
@@ -103,7 +103,7 @@ AC_DEFUN([X_AC_DATABASES],
 			fi
 		fi
       	fi
-
+	AM_CONDITIONAL(WITH_MYSQL, test x"$ac_have_mysql" == x"yes")
 
 	#Check for PostgreSQL
 	ac_have_postgres="no"
@@ -156,4 +156,6 @@ AC_DEFUN([X_AC_DATABASES],
        			AC_MSG_WARN([*** PostgreSQL test program execution failed.])
 		fi        	
       	fi
+	AM_CONDITIONAL(WITH_PGSQL, test x"$ac_have_pgsql" == x"yes")
+
 ])
diff --git a/auxdir/x_ac_debug.m4 b/auxdir/x_ac_debug.m4
index 1f5a37fbd7b9d83f875aeb08820d600ead6aa706..dc48936271f7805e2ccdca5c9a3444913cd7cefa 100644
--- a/auxdir/x_ac_debug.m4
+++ b/auxdir/x_ac_debug.m4
@@ -1,5 +1,5 @@
 ##*****************************************************************************
-#  $Id: x_ac_debug.m4 15332 2008-10-07 20:08:18Z jette $
+#  $Id: x_ac_debug.m4 15340 2008-10-07 21:21:53Z da $
 ##*****************************************************************************
 #  AUTHOR:
 #    Chris Dunlap <cdunlap@llnl.gov>
diff --git a/auxdir/x_ac_env.m4 b/auxdir/x_ac_env.m4
new file mode 100644
index 0000000000000000000000000000000000000000..39e570f47152fdb29990ed8717813e38dc95fb0d
--- /dev/null
+++ b/auxdir/x_ac_env.m4
@@ -0,0 +1,37 @@
+##*****************************************************************************
+#  AUTHOR:
+#    Morris Jette <jette1@llnl.gov>
+#
+#  SYNOPSIS:
+#    X_AC_ENV_LOGIC
+#
+#  DESCRIPTION:
+#    Test for how user's environment should be loaded for sbatch's 
+#    --get-user-env option (as used by Moab)
+##*****************************************************************************
+
+AC_DEFUN([X_AC_ENV_LOGIC], [
+  AC_MSG_CHECKING([whether sbatch --get-user-env option should load .login])
+  AC_ARG_ENABLE(
+    [load-env-no-login],
+    AS_HELP_STRING(--enable-load-env-no-login,
+                   [enable --get-user-env option to load user environment without .login]),
+    [ case "$enableval" in
+        yes) x_ac_load_env_no_login=yes ;;
+         no) x_ac_load_env_no_login=no ;;
+          *) AC_MSG_RESULT([doh!])
+             AC_MSG_ERROR([bad value "$enableval" for --enable-load-env-no-login]) ;;
+      esac
+    ],
+    [x_ac_load_env_no_login=no]
+  )
+
+  if test "$x_ac_load_env_no_login" = yes; then
+    AC_MSG_RESULT([yes])
+    AC_DEFINE(LOAD_ENV_NO_LOGIN, 1,
+              [Define to 1 for --get-user-env to load user environment without .login])
+  else
+    AC_MSG_RESULT([no])
+  fi
+])
+
diff --git a/auxdir/x_ac_gtk.m4 b/auxdir/x_ac_gtk.m4
index a7cc39f2dc062381a4c721e9fc1faa63730f93fb..010ed0401855cf11b389ef62bc98ebe38abb501c 100644
--- a/auxdir/x_ac_gtk.m4
+++ b/auxdir/x_ac_gtk.m4
@@ -23,7 +23,6 @@ AC_DEFUN([X_AC_GTK],
 	    PKG_CONFIG_PATH="/usr/lib64/pkgconfig/"
     fi
  
-
 ### Check for pkg-config program
     AC_ARG_WITH(
 	    [pkg-config],
diff --git a/auxdir/x_ac_iso.m4 b/auxdir/x_ac_iso.m4
new file mode 100644
index 0000000000000000000000000000000000000000..2b9f92a57317bfa9abee880429e37a0793eef71c
--- /dev/null
+++ b/auxdir/x_ac_iso.m4
@@ -0,0 +1,34 @@
+##*****************************************************************************
+#  AUTHOR:
+#    Morris Jette <jette1@llnl.gov>
+#
+#  SYNOPSIS:
+#    X_AC_ISO
+#
+#  DESCRIPTION:
+#    Test for ISO compliant time support.
+##*****************************************************************************
+
+AC_DEFUN([X_AC_ISO], [
+  AC_MSG_CHECKING([whether to enable ISO 8601 time format support])
+  AC_ARG_ENABLE(
+    [iso8601],
+    AS_HELP_STRING(--disable-iso8601,disable ISO 8601 time format support),
+    [ case "$enableval" in
+        yes) x_ac_iso8601=yes ;;
+         no) x_ac_iso8601=no ;;
+          *) AC_MSG_RESULT([doh!])
+             AC_MSG_ERROR([bad value "$enableval" for --enable-iso8601]) ;;
+      esac
+    ],
+    [x_ac_iso8601=yes]
+  )
+
+  if test "$x_ac_iso8601" = yes; then
+    AC_MSG_RESULT([yes])
+    AC_DEFINE(USE_ISO_8601,,[define if using ISO 8601 time format])
+  else
+    AC_MSG_RESULT([no])
+  fi
+])
+
diff --git a/auxdir/x_ac_readline.m4 b/auxdir/x_ac_readline.m4
index 373685ded061cbc93e745153ab38ac52fde0210e..b4b6a35ff03df568b7dbfbfc42b772cf8f3b071b 100644
--- a/auxdir/x_ac_readline.m4
+++ b/auxdir/x_ac_readline.m4
@@ -1,5 +1,5 @@
 ##*****************************************************************************
-## $Id: x_ac_readline.m4 8192 2006-05-25 00:15:05Z morrone $
+## $Id: x_ac_readline.m4 17615 2009-05-27 21:17:29Z jette $
 ##*****************************************************************************
 #  AUTHOR:
 #    Jim Garlick <garlick@llnl.gov>
@@ -39,7 +39,7 @@ AC_DEFUN([X_AC_READLINE],
 	#include <readline/history.h>]], [[
 	char *line = readline("in:");]])],[AC_DEFINE([HAVE_READLINE], [1], 
                  [Define if you are compiling with readline.])],[READLINE_LIBS=""])
-    LIBS="$savedLIBS"
+    LIBS="$saved_LIBS"
   fi
   AC_SUBST(READLINE_LIBS)
 ])
diff --git a/config.h.in b/config.h.in
index 95018980c44811bbcc7070ec2c95f0b5dd4d35cb..eff8951e46a657388b3241bfa54a9554d068b69a 100644
--- a/config.h.in
+++ b/config.h.in
@@ -1,5 +1,8 @@
 /* config.h.in.  Generated from configure.ac by autoheader.  */
 
+/* Define the apbasil command location */
+#undef APBASIL_LOC
+
 /* Define the BG_BRIDGE_SO value */
 #undef BG_BRIDGE_SO
 
@@ -9,6 +12,12 @@
 /* Define the BG_SERIAL value */
 #undef BG_SERIAL
 
+/* Define BLCR installation home */
+#undef BLCR_HOME
+
+/* Define location of cpuset directory */
+#undef CPUSET_DIR
+
 /* Define to 1 if licensed under terms of the GNU General Public License. */
 #undef GPL_LICENSED
 
@@ -30,6 +39,12 @@
 /* Define to 1 if have Blue Gene files */
 #undef HAVE_BG_FILES
 
+/* Define if Cray system */
+#undef HAVE_CRAY
+
+/* Define if Cray XT system */
+#undef HAVE_CRAY_XT
+
 /* Define to 1 if you have the <curses.h> header file. */
 #undef HAVE_CURSES_H
 
@@ -276,7 +291,7 @@
    member named physmem. */
 #undef HAVE__SYSTEM_CONFIGURATION
 
-/* Define to 1 for --get-user-env to load user environment without login. */
+/* Define to 1 for --get-user-env to load user environment without .login */
 #undef LOAD_ENV_NO_LOGIN
 
 /* Define to 1 for memory leak debugging. */
@@ -391,6 +406,9 @@
 /* Define slurm_ prefix function aliases for plugins */
 #undef USE_ALIAS
 
+/* define if using ISO 8601 time format */
+#undef USE_ISO_8601
+
 /* Version number of package */
 #undef VERSION
 
diff --git a/configure b/configure
index 836c1e323f4b8103072727e95106c599045307bc..2bc264f5ea4bc951dc88beaf314789efae78f606 100755
--- a/configure
+++ b/configure
@@ -933,9 +933,13 @@ HAVE_GTK_FALSE
 HAVEMYSQLCONFIG
 MYSQL_LIBS
 MYSQL_CFLAGS
+WITH_MYSQL_TRUE
+WITH_MYSQL_FALSE
 HAVEPGCONFIG
 PGSQL_LIBS
 PGSQL_CFLAGS
+WITH_PGSQL_TRUE
+WITH_PGSQL_FALSE
 DEBUG_MODULES_TRUE
 DEBUG_MODULES_FALSE
 SLURMCTLD_PORT
@@ -969,6 +973,12 @@ AUTHD_CFLAGS
 WITH_AUTHD_TRUE
 WITH_AUTHD_FALSE
 UTIL_LIBS
+BLCR_HOME
+BLCR_LIBS
+BLCR_CPPFLAGS
+BLCR_LDFLAGS
+WITH_BLCR_TRUE
+WITH_BLCR_FALSE
 LTLIBOBJS'
 ac_subst_files=''
       ac_precious_vars='build_alias
@@ -1580,14 +1590,16 @@ Optional Features:
   --disable-libtool-lock  avoid locking (might break parallel builds)
   --enable-pam            enable PAM (Pluggable Authentication Modules)
                           support
+  --disable-iso8601       disable ISO 8601 time format support
+  --enable-load-env-no-login
+                          enable --get-user-env option to load user
+                          environment without .login
+  --enable-cray-xt        enable Cray XT system support
   --enable-sun-const      enable Sun Constellation system support
   --enable-debug          enable debugging code for development
   --enable-memory-leak-debug
                           enable memory leak debugging code for development
   --enable-front-end      enable slurmd operation on a front-end
-  --enable-load-env-no-login
-                          enable --get-user-env option to load user
-                          environment without login
   --enable-multiple-slurmd
                           enable multiple-slurmd support
 
@@ -1602,6 +1614,9 @@ Optional Packages:
   --with-pic              try to use only PIC/non-PIC objects [default=use
                           both]
   --with-tags[=TAGS]      include additional configurations [automatic]
+  --with-cpusetdir=PATH   specify path to cpuset directory default is
+                          /dev/cpuset
+  --with-apbasil=PATH     Specify path to apbasil command
   --with-xcpu=PATH        specify path to XCPU directory
   --with-pkg-config=PATH  Specify path to pkg-config binary
   --with-mysql_config=PATH
@@ -1613,6 +1628,7 @@ Optional Packages:
   --without-readline      compile without readline support
   --with-ssl=PATH         Specify path to OpenSSL installation
   --with-munge=PATH       Specify path to munge installation
+  --with-blcr=PATH        Specify path to BLCR installation
 
 Some influential environment variables:
   CC          C compiler command
@@ -5001,9 +5017,9 @@ if test "${with_proctrack+set}" = set; then
 fi
 
       if test -f "$PROCTRACKDIR/lib/proctrackext.exp"; then
-         CPPFLAGS="-I$PROCTRACKDIR/include $CPPFLAGS"
          PROCTRACKDIR="$PROCTRACKDIR/lib"
 
+         CPPFLAGS="-I$PROCTRACKDIR/include $CPPFLAGS"
 
 for ac_header in proctrack.h
 do
@@ -7524,7 +7540,7 @@ ia64-*-hpux*)
   ;;
 *-*-irix6*)
   # Find out which ABI we are using.
-  echo '#line 7527 "configure"' > conftest.$ac_ext
+  echo '#line 7543 "configure"' > conftest.$ac_ext
   if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
   (eval $ac_compile) 2>&5
   ac_status=$?
@@ -9630,11 +9646,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9633: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9649: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:9637: \$? = $ac_status" >&5
+   echo "$as_me:9653: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -9920,11 +9936,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:9923: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:9939: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:9927: \$? = $ac_status" >&5
+   echo "$as_me:9943: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -10024,11 +10040,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:10027: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:10043: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:10031: \$? = $ac_status" >&5
+   echo "$as_me:10047: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -12401,7 +12417,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 12404 "configure"
+#line 12420 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -12501,7 +12517,7 @@ else
   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
   lt_status=$lt_dlunknown
   cat > conftest.$ac_ext <<EOF
-#line 12504 "configure"
+#line 12520 "configure"
 #include "confdefs.h"
 
 #if HAVE_DLFCN_H
@@ -14902,11 +14918,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:14905: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:14921: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:14909: \$? = $ac_status" >&5
+   echo "$as_me:14925: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -15006,11 +15022,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:15009: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:15025: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:15013: \$? = $ac_status" >&5
+   echo "$as_me:15029: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -16604,11 +16620,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:16607: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:16623: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:16611: \$? = $ac_status" >&5
+   echo "$as_me:16627: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -16708,11 +16724,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:16711: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:16727: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:16715: \$? = $ac_status" >&5
+   echo "$as_me:16731: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -18928,11 +18944,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:18931: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:18947: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:18935: \$? = $ac_status" >&5
+   echo "$as_me:18951: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -19218,11 +19234,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:19221: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:19237: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>conftest.err)
    ac_status=$?
    cat conftest.err >&5
-   echo "$as_me:19225: \$? = $ac_status" >&5
+   echo "$as_me:19241: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s "$ac_outfile"; then
      # The compiler can only warn and ignore the option if not recognized
      # So say no if there are warnings other than the usual output.
@@ -19322,11 +19338,11 @@ else
    -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
    -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
    -e 's:$: $lt_compiler_flag:'`
-   (eval echo "\"\$as_me:19325: $lt_compile\"" >&5)
+   (eval echo "\"\$as_me:19341: $lt_compile\"" >&5)
    (eval "$lt_compile" 2>out/conftest.err)
    ac_status=$?
    cat out/conftest.err >&5
-   echo "$as_me:19329: \$? = $ac_status" >&5
+   echo "$as_me:19345: \$? = $ac_status" >&5
    if (exit $ac_status) && test -s out/conftest2.$ac_objext
    then
      # The compiler can only warn and ignore the option if not recognized
@@ -23173,16 +23189,26 @@ echo "$as_me: WARNING: Unable to locate PLPA processor affinity functions" >&2;}
   fi
 
 #
-# Test for cpusets
+# Test for cpuset directory
 #
-  if test -d "/dev/cpuset" ; then
-     have_sched_setaffinity=yes
-  fi
+  cpuset_default_dir="/dev/cpuset"
 
-#
-# Test for other affinity functions as appropriate
-# TBD
-#
+# Check whether --with-cpusetdir was given.
+if test "${with_cpusetdir+set}" = set; then
+  withval=$with_cpusetdir; try_path=$withval
+fi
+
+  for cpuset_dir in $try_path "" $cpuset_default_dir; do
+    if test -d "$cpuset_dir" ; then
+
+cat >>confdefs.h <<_ACEOF
+#define CPUSET_DIR "$cpuset_dir"
+_ACEOF
+
+      have_sched_setaffinity=yes
+      break
+    fi
+  done
 
 #
 # Set HAVE_SCHED_SETAFFINITY if any task affinity supported
@@ -23380,6 +23406,76 @@ fi
 
 
 
+  { echo "$as_me:$LINENO: checking whether to enable ISO 8601 time format support" >&5
+echo $ECHO_N "checking whether to enable ISO 8601 time format support... $ECHO_C" >&6; }
+  # Check whether --enable-iso8601 was given.
+if test "${enable_iso8601+set}" = set; then
+  enableval=$enable_iso8601;  case "$enableval" in
+        yes) x_ac_iso8601=yes ;;
+         no) x_ac_iso8601=no ;;
+          *) { echo "$as_me:$LINENO: result: doh!" >&5
+echo "${ECHO_T}doh!" >&6; }
+             { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-iso8601" >&5
+echo "$as_me: error: bad value \"$enableval\" for --enable-iso8601" >&2;}
+   { (exit 1); exit 1; }; } ;;
+      esac
+
+else
+  x_ac_iso8601=yes
+
+fi
+
+
+  if test "$x_ac_iso8601" = yes; then
+    { echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define USE_ISO_8601
+_ACEOF
+
+  else
+    { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+  fi
+
+
+
+  { echo "$as_me:$LINENO: checking whether sbatch --get-user-env option should load .login" >&5
+echo $ECHO_N "checking whether sbatch --get-user-env option should load .login... $ECHO_C" >&6; }
+  # Check whether --enable-load-env-no-login was given.
+if test "${enable_load_env_no_login+set}" = set; then
+  enableval=$enable_load_env_no_login;  case "$enableval" in
+        yes) x_ac_load_env_no_login=yes ;;
+         no) x_ac_load_env_no_login=no ;;
+          *) { echo "$as_me:$LINENO: result: doh!" >&5
+echo "${ECHO_T}doh!" >&6; }
+             { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-load-env-no-login" >&5
+echo "$as_me: error: bad value \"$enableval\" for --enable-load-env-no-login" >&2;}
+   { (exit 1); exit 1; }; } ;;
+      esac
+
+else
+  x_ac_load_env_no_login=no
+
+fi
+
+
+  if test "$x_ac_load_env_no_login" = yes; then
+    { echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define LOAD_ENV_NO_LOGIN 1
+_ACEOF
+
+  else
+    { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+  fi
+
+
+
   { echo "$as_me:$LINENO: checking whether byte ordering is bigendian" >&5
 echo $ECHO_N "checking whether byte ordering is bigendian... $ECHO_C" >&6; }
 if test "${ac_cv_c_bigendian+set}" = set; then
@@ -25053,6 +25149,75 @@ CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
 LIBS="$PTHREAD_LIBS $LIBS"
 
 
+  { echo "$as_me:$LINENO: checking for Cray XT" >&5
+echo $ECHO_N "checking for Cray XT... $ECHO_C" >&6; }
+  # Check whether --enable-cray-xt was given.
+if test "${enable_cray_xt+set}" = set; then
+  enableval=$enable_cray_xt;  case "$enableval" in
+        yes) x_ac_cray_xt=yes ;;
+         no) x_ac_cray_xt=no ;;
+          *) { echo "$as_me:$LINENO: result: doh!" >&5
+echo "${ECHO_T}doh!" >&6; }
+             { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-cray-xt" >&5
+echo "$as_me: error: bad value \"$enableval\" for --enable-cray-xt" >&2;}
+   { (exit 1); exit 1; }; } ;;
+      esac
+
+else
+  x_ac_cray_xt=no
+
+fi
+
+
+  if test "$x_ac_cray_xt" = yes; then
+    { echo "$as_me:$LINENO: result: yes" >&5
+echo "${ECHO_T}yes" >&6; }
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_3D 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CRAY 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_CRAY_XT 1
+_ACEOF
+
+
+cat >>confdefs.h <<\_ACEOF
+#define HAVE_FRONT_END 1
+_ACEOF
+
+  else
+    { echo "$as_me:$LINENO: result: no" >&5
+echo "${ECHO_T}no" >&6; }
+  fi
+
+
+# Check whether --with-apbasil was given.
+if test "${with_apbasil+set}" = set; then
+  withval=$with_apbasil;  try_apbasil=$withval
+fi
+
+  apbasil_default_locs="/usr/apbasil"
+  for apbasil_loc in $try_apbasil "" $apbasil_default_locs; do
+    if test -z "$have_apbasil" -a -x "$apbasil_loc" ; then
+      have_apbasil=$apbasil_loc
+    fi
+  done
+  if test ! -z "$have_apbasil" ; then
+
+cat >>confdefs.h <<_ACEOF
+#define APBASIL_LOC "$have_apbasil"
+_ACEOF
+
+  fi
+
+
   { echo "$as_me:$LINENO: checking for Sun Constellation system" >&5
 echo $ECHO_N "checking for Sun Constellation system... $ECHO_C" >&6; }
   # Check whether --enable-sun-const was given.
@@ -25444,7 +25609,6 @@ fi
 	    PKG_CONFIG_PATH="/usr/lib64/pkgconfig/"
     fi
 
-
 ### Check for pkg-config program
 
 # Check whether --with-pkg-config was given.
@@ -25930,6 +26094,13 @@ echo "$as_me: WARNING: *** MySQL test program execution failed." >&2;}
 			fi
 		fi
       	fi
+	 if test x"$ac_have_mysql" == x"yes"; then
+  WITH_MYSQL_TRUE=
+  WITH_MYSQL_FALSE='#'
+else
+  WITH_MYSQL_TRUE='#'
+  WITH_MYSQL_FALSE=
+fi
 
 
 	#Check for PostgreSQL
@@ -26111,6 +26282,15 @@ _ACEOF
 echo "$as_me: WARNING: *** PostgreSQL test program execution failed." >&2;}
 		fi
       	fi
+	 if test x"$ac_have_pgsql" == x"yes"; then
+  WITH_PGSQL_TRUE=
+  WITH_PGSQL_FALSE='#'
+else
+  WITH_PGSQL_TRUE='#'
+  WITH_PGSQL_FALSE=
+fi
+
+
 
 
 
@@ -26926,7 +27106,7 @@ fi
 
 rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
       conftest$ac_exeext conftest.$ac_ext
-    LIBS="$savedLIBS"
+    LIBS="$saved_LIBS"
   fi
 
 
@@ -27298,36 +27478,6 @@ fi
 
 
 
-{ echo "$as_me:$LINENO: checking if user env vars should be based upon login" >&5
-echo $ECHO_N "checking if user env vars should be based upon login... $ECHO_C" >&6; }
-# Check whether --enable-load-env-no-login was given.
-if test "${enable_load_env_no_login+set}" = set; then
-  enableval=$enable_load_env_no_login;  case "$enableval" in
-     yes) x_ac_load_env_no_login=yes ;;
-      no) x_ac_load_env_no_login=no ;;
-       *) { echo "$as_me:$LINENO: result: doh!" >&5
-echo "${ECHO_T}doh!" >&6; }
-          { { echo "$as_me:$LINENO: error: bad value \"$enableval\" for --enable-load-env-no-login" >&5
-echo "$as_me: error: bad value \"$enableval\" for --enable-load-env-no-login" >&2;}
-   { (exit 1); exit 1; }; } ;;
-   esac
-
-
-fi
-
-if test "$x_ac_load_env_no_login" = yes; then
-
-cat >>confdefs.h <<\_ACEOF
-#define LOAD_ENV_NO_LOGIN 1
-_ACEOF
-
-  { echo "$as_me:$LINENO: result: yes" >&5
-echo "${ECHO_T}yes" >&6; }
-else
-  { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
-fi
-
 { echo "$as_me:$LINENO: checking whether to enable multiple-slurmd support" >&5
 echo $ECHO_N "checking whether to enable multiple-slurmd support... $ECHO_C" >&6; }
 # Check whether --enable-multiple-slurmd was given.
@@ -27523,7 +27673,126 @@ _ACEOF
 
 
 
-ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm-perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/python/Makefile contribs/python/hostlist/Makefile contribs/python/hostlist/test/Makefile contribs/slurmdb-direct/Makefile src/Makefile src/api/Makefile src/common/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/srun/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/xlch/Makefile src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/gang/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/linear/Makefile src/plugins/select/cons_res/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile testsuite/slurm_unit/slurmctld/Makefile testsuite/slurm_unit/slurmd/Makefile testsuite/slurm_unit/slurmdbd/Makefile"
+
+  _x_ac_blcr_dirs="/usr /usr/local /opt/freeware /opt/blcr"
+  _x_ac_blcr_libs="lib64 lib"
+
+
+# Check whether --with-blcr was given.
+if test "${with_blcr+set}" = set; then
+  withval=$with_blcr; _x_ac_blcr_dirs="$withval $_x_ac_blcr_dirs"
+fi
+
+
+  { echo "$as_me:$LINENO: checking for blcr installation" >&5
+echo $ECHO_N "checking for blcr installation... $ECHO_C" >&6; }
+if test "${x_ac_cv_blcr_dir+set}" = set; then
+  echo $ECHO_N "(cached) $ECHO_C" >&6
+else
+
+      for d in $_x_ac_blcr_dirs; do
+        test -d "$d" || continue
+        test -d "$d/include" || continue
+        test -f "$d/include/libcr.h" || continue
+	for bit in $_x_ac_blcr_libs; do
+          test -d "$d/$bit" || continue
+
+ 	  _x_ac_blcr_libs_save="$LIBS"
+          LIBS="-L$d/$bit -lcr $LIBS"
+          cat >conftest.$ac_ext <<_ACEOF
+/* confdefs.h.  */
+_ACEOF
+cat confdefs.h >>conftest.$ac_ext
+cat >>conftest.$ac_ext <<_ACEOF
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char cr_init ();
+int
+main ()
+{
+return cr_init ();
+  ;
+  return 0;
+}
+_ACEOF
+rm -f conftest.$ac_objext conftest$ac_exeext
+if { (ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
+  (eval "$ac_link") 2>conftest.er1
+  ac_status=$?
+  grep -v '^ *+' conftest.er1 >conftest.err
+  rm -f conftest.er1
+  cat conftest.err >&5
+  echo "$as_me:$LINENO: \$? = $ac_status" >&5
+  (exit $ac_status); } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext &&
+       $as_test_x conftest$ac_exeext; then
+  x_ac_cv_blcr_dir=$d
+else
+  echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+
+fi
+
+rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
+      conftest$ac_exeext conftest.$ac_ext
+          LIBS="$_x_ac_blcr_libs_save"
+          test -n "$x_ac_cv_blcr_dir" && break
+	done
+        test -n "$x_ac_cv_blcr_dir" && break
+      done
+
+fi
+{ echo "$as_me:$LINENO: result: $x_ac_cv_blcr_dir" >&5
+echo "${ECHO_T}$x_ac_cv_blcr_dir" >&6; }
+
+  if test -z "$x_ac_cv_blcr_dir"; then
+    { echo "$as_me:$LINENO: WARNING: unable to locate blcr installation" >&5
+echo "$as_me: WARNING: unable to locate blcr installation" >&2;}
+  else
+    BLCR_HOME="$x_ac_cv_blcr_dir"
+    BLCR_LIBS="-lcr"
+    BLCR_CPPFLAGS="-I$x_ac_cv_blcr_dir/include"
+    BLCR_LDFLAGS="-L$x_ac_cv_blcr_dir/$bit"
+  fi
+
+
+cat >>confdefs.h <<_ACEOF
+#define BLCR_HOME "$x_ac_cv_blcr_dir"
+_ACEOF
+
+
+
+
+
+
+
+   if test -n "$x_ac_cv_blcr_dir"; then
+  WITH_BLCR_TRUE=
+  WITH_BLCR_FALSE='#'
+else
+  WITH_BLCR_TRUE='#'
+  WITH_BLCR_FALSE=
+fi
+
+
+
+
+
+ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm-perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/python/Makefile contribs/python/hostlist/Makefile contribs/python/hostlist/test/Makefile contribs/slurmdb-direct/Makefile src/Makefile src/api/Makefile src/common/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/sstat/Makefile src/sshare/Makefile src/salloc/Makefile src/sbatch/Makefile src/sattach/Makefile src/sprio/Makefile src/srun/Makefile src/srun_cr/Makefile src/slurmd/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/slurmctld/Makefile src/sbcast/Makefile src/scontrol/Makefile src/scancel/Makefile src/squeue/Makefile src/sinfo/Makefile src/smap/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/pgsql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/xlch/Makefile src/plugins/checkpoint/blcr/Makefile src/plugins/checkpoint/blcr/cr_checkpoint.sh src/plugins/checkpoint/blcr/cr_restart.sh src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/jobcomp/pgsql/Makefile src/plugins/priority/Makefile src/plugins/priority/basic/Makefile src/plugins/priority/multifactor/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/rms/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/gang/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/block_allocator/Makefile src/plugins/select/bluegene/plugin/Makefile src/plugins/select/cons_res/Makefile src/plugins/select/linear/Makefile src/plugins/switch/Makefile src/plugins/switch/elan/Makefile src/plugins/switch/none/Makefile src/plugins/switch/federation/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/none/Makefile src/plugins/topology/Makefile src/plugins/topology/3d_torus/Makefile src/plugins/topology/none/Makefile src/plugins/topology/tree/Makefile doc/Makefile doc/man/Makefile doc/html/Makefile doc/html/configurator.html testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile testsuite/slurm_unit/slurmctld/Makefile testsuite/slurm_unit/slurmd/Makefile testsuite/slurm_unit/slurmdbd/Makefile"
 
 
 cat >confcache <<\_ACEOF
@@ -27727,6 +27996,20 @@ echo "$as_me: error: conditional \"HAVE_GTK\" was never defined.
 Usually this means the macro was only invoked conditionally." >&2;}
    { (exit 1); exit 1; }; }
 fi
+if test -z "${WITH_MYSQL_TRUE}" && test -z "${WITH_MYSQL_FALSE}"; then
+  { { echo "$as_me:$LINENO: error: conditional \"WITH_MYSQL\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+echo "$as_me: error: conditional \"WITH_MYSQL\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+   { (exit 1); exit 1; }; }
+fi
+if test -z "${WITH_PGSQL_TRUE}" && test -z "${WITH_PGSQL_FALSE}"; then
+  { { echo "$as_me:$LINENO: error: conditional \"WITH_PGSQL\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+echo "$as_me: error: conditional \"WITH_PGSQL\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+   { (exit 1); exit 1; }; }
+fi
 if test -z "${DEBUG_MODULES_TRUE}" && test -z "${DEBUG_MODULES_FALSE}"; then
   { { echo "$as_me:$LINENO: error: conditional \"DEBUG_MODULES\" was never defined.
 Usually this means the macro was only invoked conditionally." >&5
@@ -27776,6 +28059,13 @@ echo "$as_me: error: conditional \"WITH_AUTHD\" was never defined.
 Usually this means the macro was only invoked conditionally." >&2;}
    { (exit 1); exit 1; }; }
 fi
+if test -z "${WITH_BLCR_TRUE}" && test -z "${WITH_BLCR_FALSE}"; then
+  { { echo "$as_me:$LINENO: error: conditional \"WITH_BLCR\" was never defined.
+Usually this means the macro was only invoked conditionally." >&5
+echo "$as_me: error: conditional \"WITH_BLCR\" was never defined.
+Usually this means the macro was only invoked conditionally." >&2;}
+   { (exit 1); exit 1; }; }
+fi
 
 : ${CONFIG_STATUS=./config.status}
 ac_clean_files_save=$ac_clean_files
@@ -28267,10 +28557,13 @@ do
     "src/sacctmgr/Makefile") CONFIG_FILES="$CONFIG_FILES src/sacctmgr/Makefile" ;;
     "src/sreport/Makefile") CONFIG_FILES="$CONFIG_FILES src/sreport/Makefile" ;;
     "src/sstat/Makefile") CONFIG_FILES="$CONFIG_FILES src/sstat/Makefile" ;;
+    "src/sshare/Makefile") CONFIG_FILES="$CONFIG_FILES src/sshare/Makefile" ;;
     "src/salloc/Makefile") CONFIG_FILES="$CONFIG_FILES src/salloc/Makefile" ;;
     "src/sbatch/Makefile") CONFIG_FILES="$CONFIG_FILES src/sbatch/Makefile" ;;
     "src/sattach/Makefile") CONFIG_FILES="$CONFIG_FILES src/sattach/Makefile" ;;
+    "src/sprio/Makefile") CONFIG_FILES="$CONFIG_FILES src/sprio/Makefile" ;;
     "src/srun/Makefile") CONFIG_FILES="$CONFIG_FILES src/srun/Makefile" ;;
+    "src/srun_cr/Makefile") CONFIG_FILES="$CONFIG_FILES src/srun_cr/Makefile" ;;
     "src/slurmd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/Makefile" ;;
     "src/slurmd/slurmd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/slurmd/Makefile" ;;
     "src/slurmd/slurmstepd/Makefile") CONFIG_FILES="$CONFIG_FILES src/slurmd/slurmstepd/Makefile" ;;
@@ -28300,6 +28593,9 @@ do
     "src/plugins/checkpoint/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/none/Makefile" ;;
     "src/plugins/checkpoint/ompi/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/ompi/Makefile" ;;
     "src/plugins/checkpoint/xlch/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/xlch/Makefile" ;;
+    "src/plugins/checkpoint/blcr/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/Makefile" ;;
+    "src/plugins/checkpoint/blcr/cr_checkpoint.sh") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/cr_checkpoint.sh" ;;
+    "src/plugins/checkpoint/blcr/cr_restart.sh") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/cr_restart.sh" ;;
     "src/plugins/crypto/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/crypto/Makefile" ;;
     "src/plugins/crypto/munge/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/crypto/munge/Makefile" ;;
     "src/plugins/crypto/openssl/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/crypto/openssl/Makefile" ;;
@@ -28313,6 +28609,9 @@ do
     "src/plugins/jobcomp/script/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/script/Makefile" ;;
     "src/plugins/jobcomp/mysql/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/mysql/Makefile" ;;
     "src/plugins/jobcomp/pgsql/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/pgsql/Makefile" ;;
+    "src/plugins/priority/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/priority/Makefile" ;;
+    "src/plugins/priority/basic/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/priority/basic/Makefile" ;;
+    "src/plugins/priority/multifactor/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/priority/multifactor/Makefile" ;;
     "src/plugins/proctrack/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/Makefile" ;;
     "src/plugins/proctrack/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/aix/Makefile" ;;
     "src/plugins/proctrack/pgid/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/pgid/Makefile" ;;
@@ -28330,8 +28629,8 @@ do
     "src/plugins/select/bluegene/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/Makefile" ;;
     "src/plugins/select/bluegene/block_allocator/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/block_allocator/Makefile" ;;
     "src/plugins/select/bluegene/plugin/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/bluegene/plugin/Makefile" ;;
-    "src/plugins/select/linear/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/linear/Makefile" ;;
     "src/plugins/select/cons_res/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/cons_res/Makefile" ;;
+    "src/plugins/select/linear/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/linear/Makefile" ;;
     "src/plugins/switch/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/switch/Makefile" ;;
     "src/plugins/switch/elan/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/switch/elan/Makefile" ;;
     "src/plugins/switch/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/switch/none/Makefile" ;;
@@ -28348,6 +28647,10 @@ do
     "src/plugins/task/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/Makefile" ;;
     "src/plugins/task/affinity/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/affinity/Makefile" ;;
     "src/plugins/task/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/none/Makefile" ;;
+    "src/plugins/topology/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/Makefile" ;;
+    "src/plugins/topology/3d_torus/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/3d_torus/Makefile" ;;
+    "src/plugins/topology/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/none/Makefile" ;;
+    "src/plugins/topology/tree/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/tree/Makefile" ;;
     "doc/Makefile") CONFIG_FILES="$CONFIG_FILES doc/Makefile" ;;
     "doc/man/Makefile") CONFIG_FILES="$CONFIG_FILES doc/man/Makefile" ;;
     "doc/html/Makefile") CONFIG_FILES="$CONFIG_FILES doc/html/Makefile" ;;
@@ -28630,9 +28933,13 @@ HAVE_GTK_FALSE!$HAVE_GTK_FALSE$ac_delim
 HAVEMYSQLCONFIG!$HAVEMYSQLCONFIG$ac_delim
 MYSQL_LIBS!$MYSQL_LIBS$ac_delim
 MYSQL_CFLAGS!$MYSQL_CFLAGS$ac_delim
+WITH_MYSQL_TRUE!$WITH_MYSQL_TRUE$ac_delim
+WITH_MYSQL_FALSE!$WITH_MYSQL_FALSE$ac_delim
 HAVEPGCONFIG!$HAVEPGCONFIG$ac_delim
 PGSQL_LIBS!$PGSQL_LIBS$ac_delim
 PGSQL_CFLAGS!$PGSQL_CFLAGS$ac_delim
+WITH_PGSQL_TRUE!$WITH_PGSQL_TRUE$ac_delim
+WITH_PGSQL_FALSE!$WITH_PGSQL_FALSE$ac_delim
 DEBUG_MODULES_TRUE!$DEBUG_MODULES_TRUE$ac_delim
 DEBUG_MODULES_FALSE!$DEBUG_MODULES_FALSE$ac_delim
 SLURMCTLD_PORT!$SLURMCTLD_PORT$ac_delim
@@ -28654,10 +28961,6 @@ SSL_LDFLAGS!$SSL_LDFLAGS$ac_delim
 SSL_LIBS!$SSL_LIBS$ac_delim
 SSL_CPPFLAGS!$SSL_CPPFLAGS$ac_delim
 HAVE_OPENSSL_TRUE!$HAVE_OPENSSL_TRUE$ac_delim
-HAVE_OPENSSL_FALSE!$HAVE_OPENSSL_FALSE$ac_delim
-HAVE_OPENSSL!$HAVE_OPENSSL$ac_delim
-MUNGE_LIBS!$MUNGE_LIBS$ac_delim
-MUNGE_CPPFLAGS!$MUNGE_CPPFLAGS$ac_delim
 _ACEOF
 
   if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 97; then
@@ -28699,6 +29002,10 @@ _ACEOF
 ac_delim='%!_!# '
 for ac_last_try in false false false false false :; do
   cat >conf$$subs.sed <<_ACEOF
+HAVE_OPENSSL_FALSE!$HAVE_OPENSSL_FALSE$ac_delim
+HAVE_OPENSSL!$HAVE_OPENSSL$ac_delim
+MUNGE_LIBS!$MUNGE_LIBS$ac_delim
+MUNGE_CPPFLAGS!$MUNGE_CPPFLAGS$ac_delim
 MUNGE_LDFLAGS!$MUNGE_LDFLAGS$ac_delim
 WITH_MUNGE_TRUE!$WITH_MUNGE_TRUE$ac_delim
 WITH_MUNGE_FALSE!$WITH_MUNGE_FALSE$ac_delim
@@ -28707,10 +29014,16 @@ AUTHD_CFLAGS!$AUTHD_CFLAGS$ac_delim
 WITH_AUTHD_TRUE!$WITH_AUTHD_TRUE$ac_delim
 WITH_AUTHD_FALSE!$WITH_AUTHD_FALSE$ac_delim
 UTIL_LIBS!$UTIL_LIBS$ac_delim
+BLCR_HOME!$BLCR_HOME$ac_delim
+BLCR_LIBS!$BLCR_LIBS$ac_delim
+BLCR_CPPFLAGS!$BLCR_CPPFLAGS$ac_delim
+BLCR_LDFLAGS!$BLCR_LDFLAGS$ac_delim
+WITH_BLCR_TRUE!$WITH_BLCR_TRUE$ac_delim
+WITH_BLCR_FALSE!$WITH_BLCR_FALSE$ac_delim
 LTLIBOBJS!$LTLIBOBJS$ac_delim
 _ACEOF
 
-  if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 9; then
+  if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 19; then
     break
   elif $ac_last_try; then
     { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
diff --git a/configure.ac b/configure.ac
index 6acdcaf6644902ad87a3be903f201c1b45011bbc..ae2e82c4ece7e4c04467f4020c81c93cb7a63fcc 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,4 +1,4 @@
-# $Id: configure.ac 16936 2009-03-18 22:02:01Z da $
+# $Id: configure.ac 16996 2009-03-24 20:21:41Z jette $
 # This file is to be processed with autoconf to generate a configure script
 
 dnl Prologue
@@ -98,6 +98,14 @@ dnl
 dnl Check for PAM module support
 X_AC_PAM
 
+dnl
+dnl Check for ISO compliance
+X_AC_ISO
+
+dnl
+dnl Check if we want to load .login with sbatch --get-user-env option
+X_AC_ENV_LOGIC
+
 dnl Checks for types.
 dnl
 X_AC_SLURM_BIGENDIAN
@@ -139,6 +147,7 @@ LDFLAGS="$LDFLAGS "
 CFLAGS="$CFLAGS $PTHREAD_CFLAGS"
 LIBS="$PTHREAD_LIBS $LIBS"
 
+X_AC_CRAY
 X_AC_SUN_CONST
 
 X_AC_CFLAGS
@@ -216,30 +225,6 @@ dnl Check for compilation of SLURM auth modules:
 dnl
 X_AC_MUNGE
 
-dnl Check if srun --get-user-env (for Moab) should load user's environment 
-dnl based upon a login. This may be temporary.
-dnl
-AC_MSG_CHECKING([if user env vars should be based upon login])
-AC_ARG_ENABLE(
-  [load-env-no-login],
-  AS_HELP_STRING(--enable-load-env-no-login,
-                 [enable --get-user-env option to load user environment without login]),
-  [ case "$enableval" in
-     yes) x_ac_load_env_no_login=yes ;;
-      no) x_ac_load_env_no_login=no ;;
-       *) AC_MSG_RESULT([doh!])
-          AC_MSG_ERROR([bad value "$enableval" for --enable-load-env-no-login]) ;;
-   esac
-  ]
-)
-if test "$x_ac_load_env_no_login" = yes; then
-  AC_DEFINE(LOAD_ENV_NO_LOGIN, 1, 
-            [Define to 1 for --get-user-env to load user environment without login.])
-  AC_MSG_RESULT([yes])
-else
-  AC_MSG_RESULT([no])
-fi
-
 dnl
 dnl Check if multiple-slurmd support is requested and define MULTIPLE_SLURMD
 dnl if it is.
@@ -283,6 +268,12 @@ dnl Add LSD-Tools defines:
 AC_DEFINE(WITH_LSD_FATAL_ERROR_FUNC, 1, [Have definition of lsd_fatal_error()])
 AC_DEFINE(WITH_LSD_NOMEM_ERROR_FUNC, 1, [Have definition of lsd_nomem_error()])
 
+dnl
+dnl Check for compilation of SLURM with BLCR support:
+dnl
+X_AC_BLCR
+
+
 dnl All slurm Makefiles:
 
 AC_CONFIG_FILES([Makefile
@@ -306,10 +297,13 @@ AC_CONFIG_FILES([Makefile
 		 src/sacctmgr/Makefile
 		 src/sreport/Makefile
 		 src/sstat/Makefile
+		 src/sshare/Makefile
 		 src/salloc/Makefile 
 		 src/sbatch/Makefile 
 		 src/sattach/Makefile
+		 src/sprio/Makefile
 		 src/srun/Makefile 
+		 src/srun_cr/Makefile 
 		 src/slurmd/Makefile 
 		 src/slurmd/slurmd/Makefile 
 		 src/slurmd/slurmstepd/Makefile 
@@ -339,6 +333,9 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/checkpoint/none/Makefile
 		 src/plugins/checkpoint/ompi/Makefile
 		 src/plugins/checkpoint/xlch/Makefile
+		 src/plugins/checkpoint/blcr/Makefile
+		 src/plugins/checkpoint/blcr/cr_checkpoint.sh
+		 src/plugins/checkpoint/blcr/cr_restart.sh
 		 src/plugins/crypto/Makefile
 		 src/plugins/crypto/munge/Makefile
 		 src/plugins/crypto/openssl/Makefile
@@ -352,6 +349,9 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/jobcomp/script/Makefile
 		 src/plugins/jobcomp/mysql/Makefile
 		 src/plugins/jobcomp/pgsql/Makefile
+		 src/plugins/priority/Makefile
+		 src/plugins/priority/basic/Makefile
+		 src/plugins/priority/multifactor/Makefile
 		 src/plugins/proctrack/Makefile
 		 src/plugins/proctrack/aix/Makefile
 		 src/plugins/proctrack/pgid/Makefile
@@ -369,8 +369,8 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/select/bluegene/Makefile
 		 src/plugins/select/bluegene/block_allocator/Makefile
 		 src/plugins/select/bluegene/plugin/Makefile
-		 src/plugins/select/linear/Makefile
 		 src/plugins/select/cons_res/Makefile
+		 src/plugins/select/linear/Makefile
 		 src/plugins/switch/Makefile
 		 src/plugins/switch/elan/Makefile
 		 src/plugins/switch/none/Makefile
@@ -387,6 +387,10 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/task/Makefile
 		 src/plugins/task/affinity/Makefile
 		 src/plugins/task/none/Makefile
+		 src/plugins/topology/Makefile
+		 src/plugins/topology/3d_torus/Makefile
+		 src/plugins/topology/none/Makefile
+		 src/plugins/topology/tree/Makefile
 		 doc/Makefile
 		 doc/man/Makefile
 		 doc/html/Makefile
diff --git a/contribs/Makefile.am b/contribs/Makefile.am
index 5c63fda341eda63bc7f0342c308226fc9ba1b1d1..8f75575e73b24ba17a06555d939767f4f5eb7f55 100644
--- a/contribs/Makefile.am
+++ b/contribs/Makefile.am
@@ -5,5 +5,6 @@ EXTRA_DIST = \
 	make.slurm.patch	\
 	mpich1.slurm.patch	\
 	ptrace.patch		\
+	skilling.c		\
 	time_login.c		\
 	README
diff --git a/contribs/Makefile.in b/contribs/Makefile.in
index 0b1295f61a4bbf35eb817f2bfb44834944c3f830..113c21083eb643f3ec1a5a43d858726d24948af1 100644
--- a/contribs/Makefile.in
+++ b/contribs/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -89,6 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -254,6 +262,7 @@ EXTRA_DIST = \
 	make.slurm.patch	\
 	mpich1.slurm.patch	\
 	ptrace.patch		\
+	skilling.c		\
 	time_login.c		\
 	README
 
diff --git a/contribs/README b/contribs/README
index 99b5bc42ec41a2c4a212ef93ec8fe76775fcdbb9..ddd8812d063c69f367e9f1755b5d4c4ad7694ab6 100644
--- a/contribs/README
+++ b/contribs/README
@@ -21,6 +21,18 @@ of the SLURM contribs distribution follows:
   python/            [Python modules]
      Directory for Python modules.
 
+  skilling.c         [ C program ]
+     This program can be used to order the hostnames in a 2+ dimensional
+     architecture for use in the slurm.conf file. It is used to generate
+     the Hilbert number based upon a node's physical location in the 
+     computer. Nodes close together in their Hilbert number will also be 
+     physically close in 2-D or 3-D space, so we can reduce the 2-D or 3-D
+     job placement problem to a 1-D problem that SLURM can easily handle
+     by defining the node names in the slurm.conf file in order of their
+     Hilbert number. If the computer is not a perfect square or cube with
+     power of two size, then collapse the node list maintaining the numeric
+     order based upon the Hilbert number.
+
   time_login.c       [ C program ]
      This program will report how long a pseudo-login will take for specific
      users or all users on the system. Users identified by this program 
diff --git a/contribs/env_cache_builder.c b/contribs/env_cache_builder.c
index d8560d104ee6e01c60d499a21b3f7711db8d976a..176b06bb05644317fc411d13e324c1c573abc1b9 100644
--- a/contribs/env_cache_builder.c
+++ b/contribs/env_cache_builder.c
@@ -27,10 +27,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/contribs/make.slurm.patch b/contribs/make.slurm.patch
index df23aa7297adceab467fbf5db93c005cbecacfe5..236fae8408d1e912cda5a380150f0862691ba20f 100644
--- a/contribs/make.slurm.patch
+++ b/contribs/make.slurm.patch
@@ -30,7 +30,7 @@ Index: job.c
  child_execute_job (int stdin_fd, int stdout_fd, char **argv, char **envp)
  {
 +/* PARALLEL JOB LAUNCH VIA SLURM */
-+  if (getenv("SLURM_JOBID")) {
++  if (getenv("SLURM_JOB_ID")) {
 +    int i;
 +    static char *argx[128];
 +    argx[0] = "srun";
diff --git a/contribs/mpich1.slurm.patch b/contribs/mpich1.slurm.patch
index 70990087ad17f3842a1d80776d342f6e3829ff4d..c5308d61a8d43b40171113da91563e816d6677e4 100644
--- a/contribs/mpich1.slurm.patch
+++ b/contribs/mpich1.slurm.patch
@@ -76,7 +76,7 @@ Index: mpid/ch_p4/p4/lib/p4_args.c
 +     * for a truly parallel job launch using the existing "execer"
 +     * mode of operation with slight modification.
 +     */
-+    if (getenv("SLURM_JOBID")) {
++    if (getenv("SLURM_JOB_ID")) {
 +	int i;
 +	char *tmp, *hostlist, *host2, *tasks_per_node, *task2;
 +
diff --git a/contribs/perlapi/Makefile.in b/contribs/perlapi/Makefile.in
index 200197341f25508e2b8dedd0f5b3559b4bcaa825..ac8d0ea05d4c12d5820f5483f905c5cd575ecef4 100644
--- a/contribs/perlapi/Makefile.in
+++ b/contribs/perlapi/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +81,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/contribs/perlapi/libslurm-perl/Slurm.xs b/contribs/perlapi/libslurm-perl/Slurm.xs
index 72475ed7f207af2843b4ca5fc17d1b9adf1cb928..8c83a9df1f261136a4fe7c4ac8c20a3d1e858102 100644
--- a/contribs/perlapi/libslurm-perl/Slurm.xs
+++ b/contribs/perlapi/libslurm-perl/Slurm.xs
@@ -20,13 +20,6 @@ extern void slurm_api_clear_config(void);
         slurm_xfree((void **)&(__p), __FILE__, __LINE__, "")
 extern void slurm_xfree(void **, const char *, int, const char *);
 
-extern int slurm_hostlist_count(hostlist_t hl);
-extern int slurm_hostlist_push(hostlist_t hl, const char *hosts);
-extern int slurm_hostlist_push_host(hostlist_t hl, const char *host);
-extern int slurm_hostlist_find(hostlist_t hl, const char *hostname);
-extern size_t slurm_hostlist_ranged_string(hostlist_t hl, size_t n, char *buf);
-extern void slurm_hostlist_uniq(hostlist_t hl);
-
 struct slurm {
 	node_info_msg_t *node_info_msg;
 	partition_info_msg_t *part_info_msg;
@@ -675,19 +668,19 @@ slurm_checkpoint_enable(slurm_t self, U32 jobid, U32 stepid)
 		jobid, stepid
 
 int
-slurm_checkpoint_create(slurm_t self, U32 jobid, U32 stepid, U16 max_wait)
+slurm_checkpoint_create(slurm_t self, U32 jobid, U32 stepid, U16 max_wait, char* image_dir)
 	C_ARGS:
-		jobid, stepid, max_wait
+		jobid, stepid, max_wait, image_dir
 
 int
-slurm_checkpoint_vacate(slurm_t self, U32 jobid, U32 stepid, U16 max_wait)
+slurm_checkpoint_vacate(slurm_t self, U32 jobid, U32 stepid, U16 max_wait, char* image_dir)
 	C_ARGS:
-		jobid, stepid, max_wait
+		jobid, stepid, max_wait, image_dir
 
 int
-slurm_checkpoint_restart(slurm_t self, U32 jobid, U32 stepid)
+slurm_checkpoint_restart(slurm_t self, U32 jobid, U32 stepid, U16 stick, char *image_dir)
 	C_ARGS:
-		jobid, stepid
+		jobid, stepid, stick, image_dir
 
 int
 slurm_checkpoint_complete(slurm_t self, U32 jobid, U32 stepid, time_t begin_time, U32 error_code, char* error_msg)
@@ -753,14 +746,19 @@ slurm_get_triggers(slurm_t self)
 ##################################################################
 MODULE=Slurm PACKAGE=Slurm::Hostlist PREFIX=slurm_hostlist_
 
+int
+slurm_hostlist_count(hostlist_t hl = NULL)
+	OUTPUT:
+		RETVAL
+	
 hostlist_t
 slurm_hostlist_create(char* hostlist)
 
 int
-slurm_hostlist_count(hostlist_t hl = NULL)
+slurm_hostlist_find(hostlist_t hl = NULL, char* hostname)
 	OUTPUT:
 		RETVAL
-	
+
 int
 slurm_hostlist_push(hostlist_t hl = NULL, char* hosts)
 	OUTPUT:
@@ -771,8 +769,17 @@ slurm_hostlist_push_host(hostlist_t hl = NULL, char* host)
 	OUTPUT:
 		RETVAL
 
-int
-slurm_hostlist_find(hostlist_t hl = NULL, char* hostname)
+char*
+slurm_hostlist_ranged_string(hostlist_t hl = NULL)
+	PREINIT:
+		size_t size = 1024;
+		int rc = 0;
+	CODE:
+		Newz(0, RETVAL, size, char);
+		while((rc = slurm_hostlist_ranged_string(hl, size, RETVAL)) == -1) {
+			size *= 2;
+			Renew(RETVAL, size, char);
+		}
 	OUTPUT:
 		RETVAL
 
@@ -791,20 +798,6 @@ slurm_hostlist_shift(hostlist_t hl = NULL)
 	OUTPUT:
 		RETVAL
 
-char*
-slurm_hostlist_ranged_string(hostlist_t hl = NULL)
-	PREINIT:
-		size_t size = 1024;
-		int rc = 0;
-	CODE:
-		Newz(0, RETVAL, size, char);
-		while((rc = slurm_hostlist_ranged_string(hl, size, RETVAL)) == -1) {
-			size *= 2;
-			Renew(RETVAL, size, char);
-		}
-	OUTPUT:
-		RETVAL
-
 void
 slurm_hostlist_uniq(hostlist_t hl = NULL)
         CODE:
@@ -1007,13 +1000,7 @@ slurm_step_launch(slurm_step_ctx ctx = NULL, HV* hv = NULL, SV* start_cb = NULL,
 		if(hv_to_slurm_step_launch_params(hv, &params) < 0) {
 			RETVAL = SLURM_ERROR;
 		} else {
-			char *dot_ptr, launcher_host[1024];
-			gethostname(launcher_host, sizeof(launcher_host));
-			dot_ptr = strchr(launcher_host, '.');
-			if (dot_ptr)
-				dot_ptr[0] = '\0';
-			RETVAL = slurm_step_launch(ctx, launcher_host,
-						   &params, &callbacks);
+			RETVAL = slurm_step_launch(ctx, &params, &callbacks);
 		}
 	OUTPUT:
 		RETVAL
diff --git a/contribs/perlapi/libslurm-perl/alloc.c b/contribs/perlapi/libslurm-perl/alloc.c
index 0ca22566ad8507b8a9e043ba5038d444e7795726..e6194392ab128f82fd6e2f9265b1a0fcc716b958 100644
--- a/contribs/perlapi/libslurm-perl/alloc.c
+++ b/contribs/perlapi/libslurm-perl/alloc.c
@@ -53,6 +53,7 @@ hv_to_job_desc_msg(HV* hv, job_desc_msg_t* job_desc_msg)
 		}
 	}
 	FETCH_FIELD(hv, job_desc_msg, features, charp, FALSE);
+	FETCH_FIELD(hv, job_desc_msg, reservation, charp, FALSE);
 	FETCH_FIELD(hv, job_desc_msg, immediate, uint16_t, FALSE);
 	FETCH_FIELD(hv, job_desc_msg, job_id, uint32_t, FALSE);
 	FETCH_FIELD(hv, job_desc_msg, name, charp, FALSE);
diff --git a/contribs/perlapi/libslurm-perl/conf.c b/contribs/perlapi/libslurm-perl/conf.c
index a4a2e91ea29fc7203ab9eaf5ce5a825505726c1f..62d2b66101f448ed1edbf87a6c3e8536e0bba8be 100644
--- a/contribs/perlapi/libslurm-perl/conf.c
+++ b/contribs/perlapi/libslurm-perl/conf.c
@@ -17,6 +17,20 @@ int
 slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv)
 {
 	STORE_FIELD(hv, conf, last_update, time_t);
+	STORE_FIELD(hv, conf, accounting_storage_enforce, uint16_t);
+	if(conf->accounting_storage_backup_host)
+		STORE_FIELD(hv, conf, accounting_storage_backup_host, charp);
+	if(conf->accounting_storage_host)
+		STORE_FIELD(hv, conf, accounting_storage_host, charp);
+	if(conf->accounting_storage_loc)
+		STORE_FIELD(hv, conf, accounting_storage_loc, charp);
+	if(conf->accounting_storage_pass)
+		STORE_FIELD(hv, conf, accounting_storage_pass, charp);
+	STORE_FIELD(hv, conf, accounting_storage_port, uint32_t);
+	if(conf->accounting_storage_type)
+		STORE_FIELD(hv, conf, accounting_storage_type, charp);
+	if(conf->accounting_storage_user)
+		STORE_FIELD(hv, conf, accounting_storage_user, charp);
 	if(conf->authtype)
 		STORE_FIELD(hv, conf, authtype, charp);
 	if(conf->backup_addr)
@@ -27,70 +41,105 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv)
 	STORE_FIELD(hv, conf, cache_groups, uint16_t);
 	if(conf->checkpoint_type)
 		STORE_FIELD(hv, conf, checkpoint_type, charp);
+	if(conf->cluster_name)
+		STORE_FIELD(hv, conf, cluster_name, charp);
 	if(conf->control_addr)
 		STORE_FIELD(hv, conf, control_addr, charp);
 	if(conf->control_machine)
 		STORE_FIELD(hv, conf, control_machine, charp);
 	if(conf->crypto_type)
 		STORE_FIELD(hv, conf, crypto_type, charp);
+	STORE_FIELD(hv, conf, debug_flags, uint32_t);
+	STORE_FIELD(hv, conf, def_mem_per_task, uint32_t);
+	STORE_FIELD(hv, conf, disable_root_jobs, uint16_t);
+	STORE_FIELD(hv, conf, enforce_part_limits, uint16_t);
 	if(conf->epilog)
 		STORE_FIELD(hv, conf, epilog, charp);
-	STORE_FIELD(hv, conf, first_job_id, uint32_t);
-	STORE_FIELD(hv, conf, next_job_id, uint32_t);
+	STORE_FIELD(hv, conf, epilog_msg_time, uint32_t);
+	if(conf->epilog_slurmctld)
+		STORE_FIELD(hv, conf, epilog_slurmctld, charp);
 	STORE_FIELD(hv, conf, fast_schedule, uint16_t);
+	STORE_FIELD(hv, conf, first_job_id, uint32_t);
+	STORE_FIELD(hv, conf, health_check_interval, uint16_t);
+	if(conf->health_check_program)
+		STORE_FIELD(hv, conf, health_check_program, charp);
 	STORE_FIELD(hv, conf, inactive_limit, uint16_t);
+	STORE_FIELD(hv, conf, job_acct_gather_freq, uint16_t);
 	if(conf->job_acct_gather_type)
 		STORE_FIELD(hv, conf, job_acct_gather_type, charp);
-	STORE_FIELD(hv, conf, job_acct_gather_freq, uint16_t);
-	if(conf->accounting_storage_loc)
-		STORE_FIELD(hv, conf, accounting_storage_loc, charp);
-	if(conf->accounting_storage_type)
-		STORE_FIELD(hv, conf, accounting_storage_type, charp);
-	if(conf->accounting_storage_user)
-		STORE_FIELD(hv, conf, accounting_storage_user, charp);
-	if(conf->accounting_storage_host)
-		STORE_FIELD(hv, conf, accounting_storage_host, charp);
-	if(conf->accounting_storage_pass)
-		STORE_FIELD(hv, conf, accounting_storage_pass, charp);
-	STORE_FIELD(hv, conf, accounting_storage_port, uint32_t);
+	if(conf->job_ckpt_dir)
+		STORE_FIELD(hv, conf, job_ckpt_dir, charp);
+	if(conf->job_comp_host)
+		STORE_FIELD(hv, conf, job_comp_host, charp);
 	if(conf->job_comp_loc)
 		STORE_FIELD(hv, conf, job_comp_loc, charp);
+	if(conf->job_comp_pass)
+		STORE_FIELD(hv, conf, job_comp_pass, charp);
+	STORE_FIELD(hv, conf, job_comp_port, uint32_t);
 	if(conf->job_comp_type)
 		STORE_FIELD(hv, conf, job_comp_type, charp);
 	if(conf->job_comp_user)
 		STORE_FIELD(hv, conf, job_comp_user, charp);
-	if(conf->job_comp_host)
-		STORE_FIELD(hv, conf, job_comp_host, charp);
-	if(conf->job_comp_pass)
-		STORE_FIELD(hv, conf, job_comp_pass, charp);
-	STORE_FIELD(hv, conf, job_comp_port, uint32_t);
+	if(conf->job_credential_private_key)
+		STORE_FIELD(hv, conf, job_credential_private_key, charp);
+	if(conf->job_credential_public_certificate)
+		STORE_FIELD(hv, conf, job_credential_public_certificate, charp);
 	STORE_FIELD(hv, conf, job_file_append, uint16_t); 
+	STORE_FIELD(hv, conf, job_requeue, uint16_t); 
+	STORE_FIELD(hv, conf, kill_on_bad_exit, uint16_t);
 	STORE_FIELD(hv, conf, kill_wait, uint16_t);
+	if(conf->licenses)
+		STORE_FIELD(hv, conf, licenses, charp);
 	if(conf->mail_prog)
 		STORE_FIELD(hv, conf, mail_prog, charp);
 	STORE_FIELD(hv, conf, max_job_cnt, uint16_t);
+	STORE_FIELD(hv, conf, max_mem_per_task, uint32_t);
 	STORE_FIELD(hv, conf, min_job_age, uint16_t);
 	if(conf->mpi_default)
 		STORE_FIELD(hv, conf, mpi_default, charp);
+	if(conf->mpi_params)
+		STORE_FIELD(hv, conf, mpi_params, charp);
 	STORE_FIELD(hv, conf, msg_timeout, uint16_t);
+	STORE_FIELD(hv, conf, next_job_id, uint32_t);
+	if(conf->node_prefix)
+		STORE_FIELD(hv, conf, node_prefix, charp);
+	STORE_FIELD(hv, conf, over_time_limit, uint16_t);
 	if(conf->plugindir)
 		STORE_FIELD(hv, conf, plugindir, charp);
 	if(conf->plugstack)
 		STORE_FIELD(hv, conf, plugstack, charp);
+	STORE_FIELD(hv, conf, priority_decay_hl, uint32_t);
+	STORE_FIELD(hv, conf, priority_favor_small, uint16_t);
+	STORE_FIELD(hv, conf, priority_max_age, uint32_t);
+	STORE_FIELD(hv, conf, priority_reset_period, uint16_t);
+	STORE_FIELD(hv, conf, priority_type, charp);
+	STORE_FIELD(hv, conf, priority_weight_age, uint32_t);
+	STORE_FIELD(hv, conf, priority_weight_fs, uint32_t);
+	STORE_FIELD(hv, conf, priority_weight_js, uint32_t);
+	STORE_FIELD(hv, conf, priority_weight_part, uint32_t);
+	STORE_FIELD(hv, conf, priority_weight_qos, uint32_t);
 	STORE_FIELD(hv, conf, private_data, uint16_t);
 	if(conf->proctrack_type)
 		STORE_FIELD(hv, conf, proctrack_type, charp);
 	if(conf->prolog)
 		STORE_FIELD(hv, conf, prolog, charp);
+	if(conf->prolog_slurmctld)
+		STORE_FIELD(hv, conf, prolog_slurmctld, charp);
 	STORE_FIELD(hv, conf, propagate_prio_process, uint16_t);
 	if(conf->propagate_rlimits)
 		STORE_FIELD(hv, conf, propagate_rlimits, charp);
 	if(conf->propagate_rlimits_except)
 		STORE_FIELD(hv, conf, propagate_rlimits_except, charp);
-	STORE_FIELD(hv, conf, ret2service, uint16_t);
 	STORE_FIELD(hv, conf, resume_rate, uint16_t);
 	if(conf->resume_program)
 		STORE_FIELD(hv, conf, resume_program, charp);
+	STORE_FIELD(hv, conf, resv_over_run, uint16_t);
+	STORE_FIELD(hv, conf, ret2service, uint16_t);
+	if(conf->salloc_default_command)
+		STORE_FIELD(hv, conf, salloc_default_command, charp);
+	if(conf->sched_params)
+		STORE_FIELD(hv, conf, sched_params, charp);
+	STORE_FIELD(hv, conf, sched_time_slice, uint16_t);
 	if(conf->schedtype)
 		STORE_FIELD(hv, conf, schedtype, charp);
 	STORE_FIELD(hv, conf, schedport, uint16_t);
@@ -101,6 +150,9 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv)
 	STORE_FIELD(hv, conf, slurm_user_id, uint32_t);
 	if(conf->slurm_user_name)
 		STORE_FIELD(hv, conf, slurm_user_name, charp);
+	STORE_FIELD(hv, conf, slurmd_user_id, uint32_t);
+	if(conf->slurmd_user_name)
+		STORE_FIELD(hv, conf, slurmd_user_name, charp);
 	STORE_FIELD(hv, conf, slurmctld_debug, uint16_t);
 	if(conf->slurmctld_logfile)
 		STORE_FIELD(hv, conf, slurmctld_logfile, charp);
@@ -111,14 +163,18 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv)
 	STORE_FIELD(hv, conf, slurmd_debug, uint16_t);
 	if(conf->slurmd_logfile)
 		STORE_FIELD(hv, conf, slurmd_logfile, charp);
+	if(conf->slurmd_pidfile)
+		STORE_FIELD(hv, conf, slurmd_pidfile, charp);
 	STORE_FIELD(hv, conf, slurmd_port, uint32_t);
 	if(conf->slurmd_spooldir)
 		STORE_FIELD(hv, conf, slurmd_spooldir, charp);
-	if(conf->slurmd_pidfile)
-		STORE_FIELD(hv, conf, slurmd_pidfile, charp);
 	STORE_FIELD(hv, conf, slurmd_timeout, uint16_t);
 	if(conf->slurm_conf)
 		STORE_FIELD(hv, conf, slurm_conf, charp);
+	if(conf->srun_epilog)
+		STORE_FIELD(hv, conf, srun_epilog, charp);
+	if(conf->srun_prolog)
+		STORE_FIELD(hv, conf, srun_prolog, charp);
 	if(conf->state_save_location)
 		STORE_FIELD(hv, conf, state_save_location, charp);
 	if(conf->suspend_exc_nodes)
@@ -140,22 +196,15 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t* conf, HV* hv)
 		STORE_FIELD(hv, conf, task_prolog, charp);
 	if(conf->tmp_fs)
 		STORE_FIELD(hv, conf, tmp_fs, charp);
-	STORE_FIELD(hv, conf, wait_time, uint16_t);
-	if(conf->job_credential_private_key)
-		STORE_FIELD(hv, conf, job_credential_private_key, charp);
-	if(conf->job_credential_public_certificate)
-		STORE_FIELD(hv, conf, job_credential_public_certificate, charp);
-	if(conf->srun_prolog)
-		STORE_FIELD(hv, conf, srun_prolog, charp);
-	if(conf->srun_epilog)
-		STORE_FIELD(hv, conf, srun_epilog, charp);
-	if(conf->node_prefix)
-		STORE_FIELD(hv, conf, node_prefix, charp);
+	if(conf->topology_plugin)
+		STORE_FIELD(hv, conf, topology_plugin, charp);
+	STORE_FIELD(hv, conf, track_wckey, uint16_t);
 	STORE_FIELD(hv, conf, tree_width, uint16_t);
-	STORE_FIELD(hv, conf, use_pam, uint16_t);
 	if(conf->unkillable_program)
 		STORE_FIELD(hv, conf, unkillable_program, charp);
 	STORE_FIELD(hv, conf, unkillable_timeout, uint16_t);
+	STORE_FIELD(hv, conf, use_pam, uint16_t);
+	STORE_FIELD(hv, conf, wait_time, uint16_t);
 	return 0;
 }
 
diff --git a/contribs/perlapi/libslurm-perl/job.c b/contribs/perlapi/libslurm-perl/job.c
index f08d76db91dfd6d5bd6490817c502c7ce40aad85..4085be3d9a3eb65c5e8f9fa2dde8fc9f5e063aab 100644
--- a/contribs/perlapi/libslurm-perl/job.c
+++ b/contribs/perlapi/libslurm-perl/job.c
@@ -18,36 +18,18 @@ job_info_to_hv(job_info_t* job_info, HV* hv)
 	int j;
 	AV* avp;
 
-	STORE_FIELD(hv, job_info, job_id, uint32_t);
-	if(job_info->name)
-		STORE_FIELD(hv, job_info, name, charp);
-	STORE_FIELD(hv, job_info, batch_flag, uint16_t);
-	STORE_FIELD(hv, job_info, alloc_sid, uint32_t);
+	if(job_info->account)
+		STORE_FIELD(hv, job_info, account, charp);
 	if(job_info->alloc_node)
 		STORE_FIELD(hv, job_info, alloc_node, charp);
-	STORE_FIELD(hv, job_info, user_id, uint32_t);
-	STORE_FIELD(hv, job_info, group_id, uint32_t);
-	STORE_FIELD(hv, job_info, job_state, uint16_t);
-	STORE_FIELD(hv, job_info, time_limit, uint32_t);
-	STORE_FIELD(hv, job_info, submit_time, time_t);
-	STORE_FIELD(hv, job_info, start_time, time_t);
-	STORE_FIELD(hv, job_info, end_time, time_t);
-	STORE_FIELD(hv, job_info, suspend_time, time_t);
-	STORE_FIELD(hv, job_info, pre_sus_time, time_t);
-	STORE_FIELD(hv, job_info, priority, uint32_t);
-	if(job_info->nodes)
-		STORE_FIELD(hv, job_info, nodes, charp);
-	avp = newAV();
-	for(j = 0; ; j += 2) {
-		if(job_info->node_inx[j] == -1)
-			break;
-		av_store(avp, j, newSVuv(job_info->node_inx[j]));
-		av_store(avp, j+1, newSVuv(job_info->node_inx[j+1]));
-	}
-	hv_store_sv(hv, "node_inx", newRV_noinc((SV*)avp));
-	if(job_info->partition)
-		STORE_FIELD(hv, job_info, partition, charp);
-	STORE_FIELD(hv, job_info, num_cpu_groups, uint32_t);
+	STORE_FIELD(hv, job_info, alloc_sid, uint32_t);
+	STORE_FIELD(hv, job_info, assoc_id, uint32_t);
+	STORE_FIELD(hv, job_info, batch_flag, uint16_t);
+	if(job_info->command)
+		STORE_FIELD(hv, job_info, command, charp);
+	if(job_info->comment)
+		STORE_FIELD(hv, job_info, comment, charp);
+	STORE_FIELD(hv, job_info, contiguous, uint16_t);
 	avp = newAV();
 	for(j = 0; j < job_info->num_cpu_groups; j ++) {
 		av_store(avp, j, newSVuv(job_info->cpus_per_node[j]));
@@ -58,45 +40,93 @@ job_info_to_hv(job_info_t* job_info, HV* hv)
 		av_store(avp, j, newSVuv(job_info->cpu_count_reps[j]));
 	}
 	hv_store_sv(hv, "cpu_count_reps", newRV_noinc((SV*)avp));
-	STORE_FIELD(hv, job_info, num_procs, uint32_t);
-	STORE_FIELD(hv, job_info, num_nodes, uint32_t);
+	STORE_FIELD(hv, job_info, cpus_per_task, uint16_t);
+	STORE_FIELD(hv, job_info, dependency, charp);
+	STORE_FIELD(hv, job_info, end_time, time_t);
+	if(job_info->exc_nodes)
+		STORE_FIELD(hv, job_info, exc_nodes, charp);
+	avp = newAV();
+	for(j = 0; ; j += 2) {
+		if(job_info->exc_node_inx[j] == -1)
+			break;
+		av_store(avp, j, newSVuv(job_info->exc_node_inx[j]));
+		av_store(avp, j+1, newSVuv(job_info->exc_node_inx[j+1]));
+	}
+	hv_store_sv(hv, "exc_node_inx", newRV_noinc((SV*)avp));
+
+	STORE_FIELD(hv, job_info, exit_code, uint32_t);
+	if(job_info->features)
+		STORE_FIELD(hv, job_info, features, charp);
+	STORE_FIELD(hv, job_info, group_id, uint32_t);
+	STORE_FIELD(hv, job_info, job_id, uint32_t);
+	STORE_FIELD(hv, job_info, job_min_cores, uint16_t);
+	STORE_FIELD(hv, job_info, job_min_memory, uint32_t);
+	STORE_FIELD(hv, job_info, job_min_procs, uint16_t);
+	STORE_FIELD(hv, job_info, job_min_sockets, uint16_t);
+	STORE_FIELD(hv, job_info, job_min_threads, uint16_t);
+	STORE_FIELD(hv, job_info, job_min_tmp_disk, uint32_t);
+	STORE_FIELD(hv, job_info, job_state, uint16_t);
+	if(job_info->licenses)
+		STORE_FIELD(hv, job_info, licenses, charp);
+	STORE_FIELD(hv, job_info, max_cores, uint16_t);
 	STORE_FIELD(hv, job_info, max_nodes, uint32_t);
-	STORE_FIELD(hv, job_info, min_sockets, uint16_t);
 	STORE_FIELD(hv, job_info, max_sockets, uint16_t);
+	STORE_FIELD(hv, job_info, max_threads, uint16_t);
 	STORE_FIELD(hv, job_info, min_cores, uint16_t);
-	STORE_FIELD(hv, job_info, max_cores, uint16_t);
+	STORE_FIELD(hv, job_info, min_sockets, uint16_t);
 	STORE_FIELD(hv, job_info, min_threads, uint16_t);
-	STORE_FIELD(hv, job_info, max_threads, uint16_t);
-	STORE_FIELD(hv, job_info, shared, uint16_t);
-	STORE_FIELD(hv, job_info, contiguous, uint16_t);
-	STORE_FIELD(hv, job_info, cpus_per_task, uint16_t);
+	if(job_info->name)
+		STORE_FIELD(hv, job_info, name, charp);
+	if(job_info->network)
+		STORE_FIELD(hv, job_info, network, charp);
+	if(job_info->nodes)
+		STORE_FIELD(hv, job_info, nodes, charp);
+	avp = newAV();
+	for(j = 0; ; j += 2) {
+		if(job_info->node_inx[j] == -1)
+			break;
+		av_store(avp, j, newSVuv(job_info->node_inx[j]));
+		av_store(avp, j+1, newSVuv(job_info->node_inx[j+1]));
+	}
+	hv_store_sv(hv, "node_inx", newRV_noinc((SV*)avp));
+	STORE_FIELD(hv, job_info, ntasks_per_core, uint16_t);
 	STORE_FIELD(hv, job_info, ntasks_per_node, uint16_t);
 	STORE_FIELD(hv, job_info, ntasks_per_socket, uint16_t);
-	STORE_FIELD(hv, job_info, ntasks_per_core, uint16_t);
-	STORE_FIELD(hv, job_info, job_min_procs, uint16_t);
-	STORE_FIELD(hv, job_info, job_min_sockets, uint16_t);
-	STORE_FIELD(hv, job_info, job_min_cores, uint16_t);
-	STORE_FIELD(hv, job_info, job_min_threads, uint16_t);
-	STORE_FIELD(hv, job_info, job_min_memory, uint32_t);
-	STORE_FIELD(hv, job_info, job_min_tmp_disk, uint32_t);
+	STORE_FIELD(hv, job_info, num_cpu_groups, uint32_t);
+	STORE_FIELD(hv, job_info, num_nodes, uint32_t);
+	STORE_FIELD(hv, job_info, num_procs, uint32_t);
+	if(job_info->partition)
+		STORE_FIELD(hv, job_info, partition, charp);
+	STORE_FIELD(hv, job_info, pre_sus_time, time_t);
+	STORE_FIELD(hv, job_info, priority, uint32_t);
 	if(job_info->req_nodes)
 		STORE_FIELD(hv, job_info, req_nodes, charp);
-	/* TODO: req_node_inx */
-	if(job_info->exc_nodes)
-		STORE_FIELD(hv, job_info, exc_nodes, charp);
-	/* TODO: exc_node_inx */
-	if(job_info->features)
-		STORE_FIELD(hv, job_info, features, charp);
-	STORE_FIELD(hv, job_info, dependency, charp);
-	STORE_FIELD(hv, job_info, exit_code, uint32_t);
-	if(job_info->account)
-		STORE_FIELD(hv, job_info, account, charp);
-	STORE_FIELD(hv, job_info, state_reason, uint16_t);
-	if(job_info->network)
-		STORE_FIELD(hv, job_info, network, charp);
-	if(job_info->comment)
-		STORE_FIELD(hv, job_info, comment, charp);
+	avp = newAV();
+	for(j = 0; ; j += 2) {
+		if(job_info->req_node_inx[j] == -1)
+			break;
+		av_store(avp, j, newSVuv(job_info->req_node_inx[j]));
+		av_store(avp, j+1, newSVuv(job_info->req_node_inx[j+1]));
+	}
+	hv_store_sv(hv, "req_node_inx", newRV_noinc((SV*)avp));
+	STORE_FIELD(hv, job_info, requeue, uint16_t);
+	STORE_FIELD(hv, job_info, restart_cnt, uint16_t);
+	if(job_info->resv_name)
+		STORE_FIELD(hv, job_info, resv_name, charp);
 	/* TODO: select_jobinfo */
+	STORE_FIELD(hv, job_info, shared, uint16_t);
+	STORE_FIELD(hv, job_info, start_time, time_t);
+	if(job_info->state_desc)
+		STORE_FIELD(hv, job_info, state_desc, charp);
+	STORE_FIELD(hv, job_info, state_reason, uint16_t);
+	STORE_FIELD(hv, job_info, submit_time, time_t);
+	STORE_FIELD(hv, job_info, suspend_time, time_t);
+	STORE_FIELD(hv, job_info, time_limit, uint32_t);
+	STORE_FIELD(hv, job_info, user_id, uint32_t);
+	if(job_info->wckey)
+		STORE_FIELD(hv, job_info, wckey, charp);
+	if(job_info->work_dir)
+		STORE_FIELD(hv, job_info, work_dir, charp);
 			
 	return 0;
 }
@@ -133,22 +163,37 @@ job_info_msg_to_hv(job_info_msg_t* job_info_msg, HV* hv)
 int
 job_step_info_to_hv(job_step_info_t* step_info, HV* hv)
 {
-	STORE_FIELD(hv, step_info, job_id, uint32_t);
-	STORE_FIELD(hv, step_info, step_id, uint16_t);
-	STORE_FIELD(hv, step_info, user_id, uint32_t);
-	STORE_FIELD(hv, step_info, num_tasks, uint32_t);
-	STORE_FIELD(hv, step_info, start_time, time_t);
-	STORE_FIELD(hv, step_info, run_time, time_t);
+	int j;
+	AV* avp;
 
-	if(step_info->partition)
-		STORE_FIELD(hv, step_info, partition, charp);
-	if(step_info->nodes)
-		STORE_FIELD(hv, step_info, nodes, charp);
+	if(step_info->ckpt_dir)
+		STORE_FIELD(hv, step_info, ckpt_dir, charp);
+	STORE_FIELD(hv, step_info, ckpt_interval, uint16_t);
+	STORE_FIELD(hv, step_info, job_id, uint32_t);
 	if(step_info->name)
 		STORE_FIELD(hv, step_info, name, charp);
 	if(step_info->network)
 		STORE_FIELD(hv, step_info, network, charp);
-	/* TODO: node_inx */
+	if(step_info->nodes)
+		STORE_FIELD(hv, step_info, nodes, charp);
+	avp = newAV();
+	for(j = 0; ; j += 2) {
+		if(step_info->node_inx[j] == -1)
+			break;
+		av_store(avp, j, newSVuv(step_info->node_inx[j]));
+		av_store(avp, j+1, newSVuv(step_info->node_inx[j+1]));
+	}
+	hv_store_sv(hv, "node_inx", newRV_noinc((SV*)avp));
+	STORE_FIELD(hv, step_info, num_tasks, uint32_t);
+	if(step_info->partition)
+		STORE_FIELD(hv, step_info, partition, charp);
+	if(step_info->resv_ports)
+		STORE_FIELD(hv, step_info, resv_ports, charp);
+	STORE_FIELD(hv, step_info, run_time, time_t);
+	STORE_FIELD(hv, step_info, start_time, time_t);
+	STORE_FIELD(hv, step_info, step_id, uint16_t);
+	STORE_FIELD(hv, step_info, user_id, uint32_t);
+
 	return 0;
 }
 
@@ -188,19 +233,19 @@ slurm_step_layout_to_hv(slurm_step_layout_t* step_layout, HV* hv)
 	int i, j;
 
 	STORE_FIELD(hv, step_layout, node_cnt, uint16_t);
-	STORE_FIELD(hv, step_layout, task_cnt, uint32_t);
 	if (step_layout->node_list)
 		STORE_FIELD(hv, step_layout, node_list, charp);
 	else {
 		Perl_warn(aTHX_ "node_list missing in slurm_step_layout_t");
 		return -1;
 	}
-	
+	STORE_FIELD(hv, step_layout, plane_size, uint16_t);
 	avp = newAV();
 	for(i = 0; i < step_layout->node_cnt; i ++)
 		av_store(avp, i, newSVuv(step_layout->tasks[i]));
 	hv_store_sv(hv, "tasks", newRV_noinc((SV*)avp));
-	
+	STORE_FIELD(hv, step_layout, task_cnt, uint32_t);
+	STORE_FIELD(hv, step_layout, task_dist, uint16_t);
 	avp = newAV();
 	for(i = 0; i < step_layout->node_cnt; i ++) {
 		avp2 = newAV();
@@ -210,7 +255,5 @@ slurm_step_layout_to_hv(slurm_step_layout_t* step_layout, HV* hv)
 	}
 	hv_store_sv(hv, "tids", newRV_noinc((SV*)avp));
 	
-	STORE_FIELD(hv, step_layout, task_dist, uint16_t);
-	STORE_FIELD(hv, step_layout, plane_size, uint16_t);
 	return 0;
 }
diff --git a/contribs/perlapi/libslurm-perl/launch.c b/contribs/perlapi/libslurm-perl/launch.c
index e5901cb97b8561fc33a22938931a73627ab818c5..3191c2bf3792ef179a8f0cca71dc78dddf59cab9 100644
--- a/contribs/perlapi/libslurm-perl/launch.c
+++ b/contribs/perlapi/libslurm-perl/launch.c
@@ -113,14 +113,20 @@ hv_to_slurm_step_launch_params(HV* hv, slurm_step_launch_params_t* params)
 	FETCH_FIELD(hv, params, cpu_bind, charp, FALSE);
 	FETCH_FIELD(hv, params, mem_bind_type, uint16_t, FALSE);
 	FETCH_FIELD(hv, params, mem_bind, charp, FALSE);
+
+	FETCH_FIELD(hv, params, max_sockets, uint16_t, FALSE);
+	FETCH_FIELD(hv, params, max_cores, uint16_t, FALSE);
+	FETCH_FIELD(hv, params, max_threads, uint16_t, FALSE);
 	FETCH_FIELD(hv, params, cpus_per_task, uint16_t, FALSE);
-	FETCH_FIELD(hv, params, ntasks_per_node, uint16_t, FALSE);
-	FETCH_FIELD(hv, params, ntasks_per_socket, uint16_t, FALSE);
-	FETCH_FIELD(hv, params, ntasks_per_core, uint16_t, FALSE);
 	FETCH_FIELD(hv, params, task_dist, uint16_t, FALSE);
-	FETCH_FIELD(hv, params, plane_size, uint16_t, FALSE);
-	FETCH_FIELD(hv, params, mpi_plugin_name, charp, FALSE);
+	FETCH_FIELD(hv, params, preserve_env, bool, FALSE);
 
+	FETCH_FIELD(hv, params, mpi_plugin_name, charp, FALSE);
+	FETCH_FIELD(hv, params, open_mode, uint8_t, FALSE);
+	FETCH_FIELD(hv, params, acctg_freq, uint16_t, FALSE);
+	FETCH_FIELD(hv, params, pty, bool, FALSE);
+	FETCH_FIELD(hv, params, ckpt_dir, charp, FALSE);
+	
 	return 0;
 }
 
diff --git a/contribs/perlapi/libslurm-perl/msg.h b/contribs/perlapi/libslurm-perl/msg.h
index 2f36413fff89b2788718d0a165a6c4d3c015a898..6dc418d109ec5d9270d59924df0c478b1ddf1c26 100644
--- a/contribs/perlapi/libslurm-perl/msg.h
+++ b/contribs/perlapi/libslurm-perl/msg.h
@@ -14,7 +14,16 @@ typedef char* charp;
  */
 inline static int av_store_uint16_t(AV* av, int index, uint16_t val)
 {
-	SV* sv = newSVuv(val);
+	SV* sv = NULL;
+	/* Perl has a hard time figuring out the an unsigned int is
+	   equal to INFINITE or NO_VAL since they are treated as
+	   signed ints so we will handle this here. */
+	if(val == (uint16_t)INFINITE)
+		sv = newSViv(INFINITE);
+	else if(val == (uint16_t)NO_VAL)
+		sv = newSViv(NO_VAL);
+	else
+		sv = newSViv(val);
 	
 	if (av_store(av, (I32)index, sv) == NULL) {
 		SvREFCNT_dec(sv);
@@ -28,7 +37,16 @@ inline static int av_store_uint16_t(AV* av, int index, uint16_t val)
  */
 inline static int av_store_uint32_t(AV* av, int index, uint32_t val)
 {
-	SV* sv = newSVuv(val);
+	SV* sv = NULL;
+	/* Perl has a hard time figuring out the an unsigned int is
+	   equal to INFINITE or NO_VAL since they are treated as
+	   signed ints so we will handle this here. */
+	if(val == (uint32_t)INFINITE)
+		sv = newSViv(INFINITE);
+	else if(val == (uint32_t)NO_VAL)
+		sv = newSViv(NO_VAL);
+	else
+		sv = newSViv(val);
 	
 	if (av_store(av, (I32)index, sv) == NULL) {
 		SvREFCNT_dec(sv);
@@ -37,7 +55,6 @@ inline static int av_store_uint32_t(AV* av, int index, uint32_t val)
 	return 0;
 }
 
-
 /*
  * store an int into AV 
  */
@@ -74,8 +91,17 @@ inline static int hv_store_charp(HV* hv, const char *key, charp val)
  */
 inline static int hv_store_uint32_t(HV* hv, const char *key, uint32_t val)
 {
-	SV* sv = newSVuv(val);
-	
+	SV* sv = NULL;
+	/* Perl has a hard time figuring out the an unsigned int is
+	   equal to INFINITE or NO_VAL since they are treated as
+	   signed ints so we will handle this here. */
+	if(val == (uint32_t)INFINITE)
+		sv = newSViv(INFINITE);
+	else if(val == (uint32_t)NO_VAL)
+		sv = newSViv(NO_VAL);
+	else
+		sv = newSVuv(val);
+
 	if (!key || hv_store(hv, key, (I32)strlen(key), sv, 0) == NULL) {
 		SvREFCNT_dec(sv);
 		return -1;
@@ -88,7 +114,16 @@ inline static int hv_store_uint32_t(HV* hv, const char *key, uint32_t val)
  */
 inline static int hv_store_uint16_t(HV* hv, const char *key, uint16_t val)
 {
-	SV* sv = newSVuv(val);
+	SV* sv = NULL;
+	/* Perl has a hard time figuring out the an unsigned int is
+	   equal to INFINITE or NO_VAL since they are treated as
+	   signed ints so we will handle this here. */
+	if(val == (uint16_t)INFINITE)
+		sv = newSViv(INFINITE);
+	else if(val == (uint16_t)NO_VAL)
+		sv = newSViv(NO_VAL);
+	else
+		sv = newSVuv(val);
 	
 	if (!key || hv_store(hv, key, (I32)strlen(key), sv, 0) == NULL) {
 		SvREFCNT_dec(sv);
@@ -102,7 +137,16 @@ inline static int hv_store_uint16_t(HV* hv, const char *key, uint16_t val)
  */
 inline static int hv_store_uint8_t(HV* hv, const char *key, uint8_t val)
 {
-	SV* sv = newSVuv(val);
+	SV* sv = NULL;
+	/* Perl has a hard time figuring out the an unsigned int is
+	   equal to INFINITE or NO_VAL since they are treated as
+	   signed ints so we will handle this here. */
+	if(val == (uint8_t)INFINITE)
+		sv = newSViv(INFINITE);
+	else if(val == (uint8_t)NO_VAL)
+		sv = newSViv(NO_VAL);
+	else
+		sv = newSVuv(val);
 	
 	if (!key || hv_store(hv, key, (I32)strlen(key), sv, 0) == NULL) {
 		SvREFCNT_dec(sv);
diff --git a/contribs/perlapi/libslurm-perl/node.c b/contribs/perlapi/libslurm-perl/node.c
index 7b2b46ca8641a7394942e963c041fd7b84def73b..bda3f0eae8f38e87982956a7277cbe3b34bf3732 100644
--- a/contribs/perlapi/libslurm-perl/node.c
+++ b/contribs/perlapi/libslurm-perl/node.c
@@ -15,6 +15,12 @@
 int
 node_info_to_hv(node_info_t* node_info, HV* hv)
 {
+	if(node_info->arch)
+		STORE_FIELD(hv, node_info, arch, charp);
+	STORE_FIELD(hv, node_info, cores, uint16_t);
+	STORE_FIELD(hv, node_info, cpus, uint16_t);
+	if(node_info->features)
+		STORE_FIELD(hv, node_info, features, charp);
 	if (node_info->name)
 		STORE_FIELD(hv, node_info, name, charp);
 	else {
@@ -22,17 +28,16 @@ node_info_to_hv(node_info_t* node_info, HV* hv)
 		return -1;
 	}
 	STORE_FIELD(hv, node_info, node_state, uint16_t);
-	STORE_FIELD(hv, node_info, cpus, uint16_t);
-	STORE_FIELD(hv, node_info, used_cpus, uint16_t);
-	STORE_FIELD(hv, node_info, sockets, uint16_t);
-	STORE_FIELD(hv, node_info, cores, uint16_t);
+	if(node_info->os)
+		STORE_FIELD(hv, node_info, os, charp);
 	STORE_FIELD(hv, node_info, real_memory, uint32_t);
-	STORE_FIELD(hv, node_info, tmp_disk, uint32_t);
-	STORE_FIELD(hv, node_info, weight, uint32_t);
-	if(node_info->features)
-		STORE_FIELD(hv, node_info, features, charp);
 	if(node_info->reason)
 		STORE_FIELD(hv, node_info, reason, charp);
+	STORE_FIELD(hv, node_info, sockets, uint16_t);
+	STORE_FIELD(hv, node_info, threads, uint16_t);
+	STORE_FIELD(hv, node_info, tmp_disk, uint32_t);
+	STORE_FIELD(hv, node_info, used_cpus, uint16_t);
+	STORE_FIELD(hv, node_info, weight, uint32_t);
 	return 0;
 }
 /*
@@ -71,11 +76,13 @@ hv_to_update_node_msg(HV* hv, update_node_msg_t *update_msg)
 	update_msg->features = NULL;
 	update_msg->reason = NULL;
 	update_msg->node_state = (uint16_t) NO_VAL;
+	update_msg->weight = (uint32_t) NO_VAL;
 
 	FETCH_FIELD(hv, update_msg, node_names, charp, TRUE);
 	FETCH_FIELD(hv, update_msg, node_state, uint16_t, FALSE);
 	FETCH_FIELD(hv, update_msg, reason, charp, FALSE);
 	FETCH_FIELD(hv, update_msg, features, charp, FALSE);
+	FETCH_FIELD(hv, update_msg, weight, uint32_t, FALSE);
 	return 0;
 }
 
diff --git a/contribs/perlapi/libslurm-perl/partition.c b/contribs/perlapi/libslurm-perl/partition.c
index f0f2b30deb24a6645a93bff3e616b7c5094a1f8b..d0e9ac6e5b0c5d37e5f5f0634559c3bcf309efda 100644
--- a/contribs/perlapi/libslurm-perl/partition.c
+++ b/contribs/perlapi/libslurm-perl/partition.c
@@ -15,28 +15,45 @@
 int
 part_info_to_hv(partition_info_t* part_info, HV* hv)
 {
+	if (part_info->allow_alloc_nodes)
+		STORE_FIELD(hv, part_info, allow_alloc_nodes, charp);
+	if (part_info->allow_groups)
+		STORE_FIELD(hv, part_info, allow_groups, charp);
+	STORE_FIELD(hv, part_info, default_part, uint16_t);
+	STORE_FIELD(hv, part_info, default_time, uint32_t);
+	STORE_FIELD(hv, part_info, disable_root_jobs, uint16_t);
+	STORE_FIELD(hv, part_info, hidden, uint16_t);
+	STORE_FIELD(hv, part_info, max_nodes, uint32_t);
+	STORE_FIELD(hv, part_info, max_share, uint16_t);
+	STORE_FIELD(hv, part_info, max_time, uint32_t);
+	STORE_FIELD(hv, part_info, min_nodes, uint32_t);
 	if (part_info->name)
 		STORE_FIELD(hv, part_info, name, charp);
 	else {
 		Perl_warn(aTHX_ "partition name missing in partition_info_t");
 		return -1;
 	}
-	STORE_FIELD(hv, part_info, max_time, uint32_t);
-	STORE_FIELD(hv, part_info, max_nodes, uint32_t);
-	STORE_FIELD(hv, part_info, min_nodes, uint32_t);
-	STORE_FIELD(hv, part_info, total_nodes, uint32_t);
-	STORE_FIELD(hv, part_info, total_cpus, uint32_t);
+	/* no store for int pointers yet */
+	if (part_info->node_inx) {
+		int j;
+		AV* avp = newAV();
+		for(j = 0; ; j += 2) {
+			if(part_info->node_inx[j] == -1)
+				break;
+			av_store(avp, j, newSVuv(part_info->node_inx[j]));
+			av_store(avp, j+1, newSVuv(part_info->node_inx[j+1]));
+		}
+		hv_store_sv(hv, "node_inx", newRV_noinc((SV*)avp));
+	}
 	STORE_FIELD(hv, part_info, node_scaling, uint16_t);
-	STORE_FIELD(hv, part_info, default_part, uint16_t);
-	STORE_FIELD(hv, part_info, hidden, uint16_t);
-	STORE_FIELD(hv, part_info, root_only, uint16_t);
-	STORE_FIELD(hv, part_info, max_share, uint16_t);
-	STORE_FIELD(hv, part_info, state_up, uint16_t);
 	if (part_info->nodes)
 		STORE_FIELD(hv, part_info, nodes, charp);
-	/* TODO: node_inx */
-	if (part_info->allow_groups)
-		STORE_FIELD(hv, part_info, allow_groups, charp);
+	STORE_FIELD(hv, part_info, priority, uint16_t);
+	STORE_FIELD(hv, part_info, root_only, uint16_t);
+	STORE_FIELD(hv, part_info, state_up, uint16_t);
+	STORE_FIELD(hv, part_info, total_cpus, uint32_t);
+	STORE_FIELD(hv, part_info, total_nodes, uint32_t);
+
 	return 0;
 }
 
@@ -55,7 +72,8 @@ partition_info_msg_to_hv(partition_info_msg_t* part_info_msg, HV* hv)
 	avp = newAV();
 	for(i = 0; i < part_info_msg->record_count; i ++) {
 		hvp = newHV();
-		if (part_info_to_hv(part_info_msg->partition_array + i, hvp) < 0) {
+		if (part_info_to_hv(part_info_msg->partition_array + i, hvp)
+		    < 0) {
 			SvREFCNT_dec(hvp);
 			SvREFCNT_dec(avp);
 			return -1;
@@ -71,20 +89,24 @@ hv_to_update_part_msg(HV* hv, update_part_msg_t* part_msg)
 {
 	slurm_init_part_desc_msg(part_msg);
 	
-	FETCH_FIELD(hv, part_msg, name, charp, TRUE);
-	FETCH_FIELD(hv, part_msg, max_time, uint32_t, FALSE);
+	FETCH_FIELD(hv, part_msg, allow_alloc_nodes, charp, FALSE);
+	FETCH_FIELD(hv, part_msg, allow_groups, charp, FALSE);
+	FETCH_FIELD(hv, part_msg, default_part, uint16_t, FALSE);
+	FETCH_FIELD(hv, part_msg, default_time, uint32_t, FALSE);
+	FETCH_FIELD(hv, part_msg, disable_root_jobs, uint16_t, FALSE);
+	FETCH_FIELD(hv, part_msg, hidden, uint16_t, FALSE);
 	FETCH_FIELD(hv, part_msg, max_nodes, uint32_t, FALSE);
+	FETCH_FIELD(hv, part_msg, max_share, uint16_t, FALSE);
+	FETCH_FIELD(hv, part_msg, max_time, uint32_t, FALSE);
 	FETCH_FIELD(hv, part_msg, min_nodes, uint32_t, FALSE);
-	FETCH_FIELD(hv, part_msg, total_nodes, uint32_t, FALSE);
-	FETCH_FIELD(hv, part_msg, total_cpus, uint32_t, FALSE);
+	FETCH_FIELD(hv, part_msg, name, charp, TRUE);
+	/*not used node_inx */
 	FETCH_FIELD(hv, part_msg, node_scaling, uint16_t, FALSE);
-	FETCH_FIELD(hv, part_msg, default_part, uint16_t, FALSE);
-	FETCH_FIELD(hv, part_msg, hidden, uint16_t, FALSE);
+	FETCH_FIELD(hv, part_msg, nodes, charp, FALSE);
+	FETCH_FIELD(hv, part_msg, priority, uint16_t, FALSE);
 	FETCH_FIELD(hv, part_msg, root_only, uint16_t, FALSE);
-	FETCH_FIELD(hv, part_msg, max_share, uint16_t, FALSE);
 	FETCH_FIELD(hv, part_msg, state_up, uint16_t, FALSE);
-	FETCH_FIELD(hv, part_msg, nodes, charp, FALSE);
-	/* node_inx not used */
-	FETCH_FIELD(hv, part_msg, allow_groups, charp, FALSE);
+	FETCH_FIELD(hv, part_msg, total_cpus, uint32_t, FALSE);
+	FETCH_FIELD(hv, part_msg, total_nodes, uint32_t, FALSE);
 	return 0;
 }
diff --git a/contribs/phpext/Makefile.in b/contribs/phpext/Makefile.in
index 59dd797713988a07b1efab000868d8a805b1c837..0ca5a5277662306ecf866e342a4bf15e14d77b3d 100644
--- a/contribs/phpext/Makefile.in
+++ b/contribs/phpext/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +81,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/contribs/phpext/slurm_php/slurm_php.c b/contribs/phpext/slurm_php/slurm_php.c
index 14e9690bdb3b08fc2f2ca0dbe6eac5098618dda4..d09b9fcd3254c3cfeb18491d079ac104652d0be8 100644
--- a/contribs/phpext/slurm_php/slurm_php.c
+++ b/contribs/phpext/slurm_php/slurm_php.c
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/contribs/phpext/slurm_php/slurm_php.h b/contribs/phpext/slurm_php/slurm_php.h
index d857934aef413efe655d502d2615113217df5e84..d3be22f8cd36d1b41de4321abd198d274202857e 100644
--- a/contribs/phpext/slurm_php/slurm_php.h
+++ b/contribs/phpext/slurm_php/slurm_php.h
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/contribs/python/Makefile.in b/contribs/python/Makefile.in
index f1944ed811986de8ca4522076c9b76c20c5e27ee..0c7827d9911f29d8e5627400f730ccca735008f2 100644
--- a/contribs/python/Makefile.in
+++ b/contribs/python/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -89,6 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/contribs/python/hostlist/Makefile.in b/contribs/python/hostlist/Makefile.in
index 16b843e8d3b740935b7092fd151de091c3cb4e29..bc62bece60aa7177f1d9e940ca11f68a538d3025 100644
--- a/contribs/python/hostlist/Makefile.in
+++ b/contribs/python/hostlist/Makefile.in
@@ -41,14 +41,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -90,6 +94,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/contribs/python/hostlist/test/Makefile.in b/contribs/python/hostlist/test/Makefile.in
index 49426141dca935e58a14ec158583b441e3854779..a531d4c9430cc6458e6a8925483f26b6a1efa6ea 100644
--- a/contribs/python/hostlist/test/Makefile.in
+++ b/contribs/python/hostlist/test/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +81,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/contribs/skilling.c b/contribs/skilling.c
new file mode 100644
index 0000000000000000000000000000000000000000..a6ff3b0deea07a66c686194928e5ec05476c2e35
--- /dev/null
+++ b/contribs/skilling.c
@@ -0,0 +1,125 @@
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+// Filename:  hilbert.c
+// 
+// Purpose:   Hilbert and Linked-list utility procedures for BayeSys3.
+// 
+// History:   TreeSys.c   17 Apr 1996 - 31 Dec 2002
+//            Peano.c     10 Apr 2001 - 11 Jan 2003
+//            merged       1 Feb 2003
+//            Arith debug 28 Aug 2003
+//            Hilbert.c   14 Oct 2003
+//                         2 Dec 2003
+//-----------------------------------------------------------------------------
+/*
+    Copyright (c) 1996-2003 Maximum Entropy Data Consultants Ltd,
+                            114c Milton Road, Cambridge CB4 1XE, England
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+
+#include "license.txt"
+*/
+
+#include <stdio.h>
+typedef unsigned int coord_t; // char,short,int for up to 8,16,32 bits per word
+
+static void TransposetoAxes(
+coord_t* X,            // I O  position   [n]
+int      b,            // I    # bits
+int      n)            // I    dimension
+{
+    coord_t  M, P, Q, t;
+    int      i;
+
+// Gray decode by  H ^ (H/2)
+    t = X[n-1] >> 1;
+    for( i = n-1; i; i-- )
+        X[i] ^= X[i-1];
+    X[0] ^= t;
+
+// Undo excess work
+    M = 2 << (b - 1);
+    for( Q = 2; Q != M; Q <<= 1 )
+    {
+        P = Q - 1;
+        for( i = n-1; i; i-- )
+            if( X[i] & Q ) X[0] ^= P;                              // invert
+            else{ t = (X[0] ^ X[i]) & P;  X[0] ^= t;  X[i] ^= t; } // exchange
+        if( X[0] & Q ) X[0] ^= P;                                  // invert
+    }
+} 
+static void AxestoTranspose(
+coord_t* X,            // I O  position   [n]
+int      b,            // I    # bits
+int      n)            // I    dimension
+{
+    coord_t  P, Q, t;
+    int      i;
+
+// Inverse undo
+    for( Q = 1 << (b - 1); Q > 1; Q >>= 1 )
+    {
+        P = Q - 1;
+        if( X[0] & Q ) X[0] ^= P;                                  // invert
+        for( i = 1; i < n; i++ )
+            if( X[i] & Q ) X[0] ^= P;                              // invert
+            else{ t = (X[0] ^ X[i]) & P;  X[0] ^= t;  X[i] ^= t; } // exchange
+    }
+
+// Gray encode (inverse of decode)
+    for( i = 1; i < n; i++ )
+        X[i] ^= X[i-1];
+    t = X[n-1];
+    for( i = 1; i < b; i <<= 1 )
+        X[n-1] ^= X[n-1] >> i;
+    t ^= X[n-1];
+    for( i = n-2; i >= 0; i-- )
+        X[i] ^= t;
+}
+
+/* This is an sample use of Skilling's functions above.
+ * You will need to modify the code if the value of BITS or DIMS is changed.
+ * The the output of this can be used to order the node name entries in slurm.conf */
+#define BITS 5	/* number of bits used to store the axis values, size of Hilbert space */
+#define DIMS 3	/* number of dimensions in the Hilbert space */
+main(int argc, char **argv)
+{
+	int i, H;
+	coord_t X[DIMS]; // any position in 32x32x32 cube for BITS=5
+	if (argc != (DIMS + 1)) {
+		printf("Usage %s X Y Z\n", argv[0]);
+		exit(1);
+	}
+	for (i=0; i<DIMS; i++)
+		X[i] = atoi(argv[i+1]);
+	printf("Axis coordinates = %d %d %d\n", X[0], X[1], X[2]);
+
+	AxestoTranspose(X, BITS, DIMS); // Hilbert transpose for 5 bits and 3 dimensions
+	H = ((X[2]>>0 & 1) <<  0) + ((X[1]>>0 & 1) <<  1) + ((X[0]>>0 & 1) <<  2) +
+	    ((X[2]>>1 & 1) <<  3) + ((X[1]>>1 & 1) <<  4) + ((X[0]>>1 & 1) <<  5) +
+	    ((X[2]>>2 & 1) <<  6) + ((X[1]>>2 & 1) <<  7) + ((X[0]>>2 & 1) <<  8) +
+	    ((X[2]>>3 & 1) <<  9) + ((X[1]>>3 & 1) << 10) + ((X[0]>>3 & 1) << 11) +
+	    ((X[2]>>4 & 1) << 12) + ((X[1]>>4 & 1) << 13) + ((X[0]>>4 & 1) << 14);
+	printf("Hilbert integer  = %d (%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d)\n", H,
+		X[0]>>4 & 1, X[1]>>4 & 1, X[2]>>4 & 1, X[0]>>3 & 1, X[1]>>3 & 1,
+		X[2]>>3 & 1, X[0]>>2 & 1, X[1]>>2 & 1, X[2]>>2 & 1, X[0]>>1 & 1,
+		X[1]>>1 & 1, X[2]>>1 & 1, X[0]>>0 & 1, X[1]>>0 & 1, X[2]>>0 & 1);
+
+#if 0
+	/* Used for validation purposes */
+	TransposetoAxes(X, BITS, DIMS); // Hilbert transpose for 5 bits and 3 dimensions
+	printf("Axis coordinates = %d %d %d\n", X[0], X[1], X[2]);
+#endif
+}
+
diff --git a/contribs/slurmdb-direct/Makefile.in b/contribs/slurmdb-direct/Makefile.in
index 7ecb82e2dd7808603d8f04f6233f2d58c4bd8dc3..7c9d9c9f4ea89cd6c1dc6871a7f359198cce677f 100644
--- a/contribs/slurmdb-direct/Makefile.in
+++ b/contribs/slurmdb-direct/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -84,6 +88,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/contribs/slurmdb-direct/moab_2_slurmdb.pl b/contribs/slurmdb-direct/moab_2_slurmdb.pl
index 650663efd1d2858d743e86c14442602e4c6e52ba..cef174b5bdde536ccd825a5393dbcf8eb2d21cbd 100755
--- a/contribs/slurmdb-direct/moab_2_slurmdb.pl
+++ b/contribs/slurmdb-direct/moab_2_slurmdb.pl
@@ -159,6 +159,19 @@ foreach my $line (<STDIN>) {
 		$partition = $1;
 	}
 
+	# Only pick out a number at the beginning if it is something else 
+	# we should skip it and make comp_code 0.  If we want something 
+	# else just change it to whatever you would think would be best.  
+	# Dispite the Moab documentation the comp code could contain 
+	# characters like ,SID= afterwards,  without knowing what that means
+	# we just skip it.  We haven't seen a case where comp_code isn't an
+	# int at the first so the 0 "should" never happen.
+	if($comp_code =~ /^(\d+)/) {
+		$comp_code = $1;
+	} else {
+		$comp_code = 0;
+	}
+
 	#figure out the cluster
 	if($cluster eq "ALL") {
 		if ($node_features =~ /\[(\w*)\]/) {
diff --git a/contribs/time_login.c b/contribs/time_login.c
index 4c44f8d8228455ce10b823534fc1421fec5e1fa7..4c532179328c2a422d13c38e6b9bf3794b126511 100644
--- a/contribs/time_login.c
+++ b/contribs/time_login.c
@@ -14,10 +14,11 @@
  *  Copyright (C) 2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/contribs/torque/Makefile.in b/contribs/torque/Makefile.in
index f5bcc752a6f4a2f31307343da27a8a9a0bd5ec41..8a745decc07a0ca308362024b0ff9fb9f2ca1a82 100644
--- a/contribs/torque/Makefile.in
+++ b/contribs/torque/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -84,6 +88,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/contribs/torque/mpiexec.pl b/contribs/torque/mpiexec.pl
index 37614a7fab0aa263f5ae9c1a3f332ba68453d558..badbd43d97aaff02322a47527b7a27d4769f33c5 100755
--- a/contribs/torque/mpiexec.pl
+++ b/contribs/torque/mpiexec.pl
@@ -8,10 +8,11 @@
 #  Copyright (C) 2007 The Regents of the University of California.
 #  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 #  Written by Danny Auble <auble1@llnl.gov>.
-#  LLNL-CODE-402394.
+#  CODE-OCEC-09-009. All rights reserved.
 #  
 #  This file is part of SLURM, a resource management program.
-#  For details, see <http://www.llnl.gov/linux/slurm/>.
+#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  Please also read the included file: DISCLAIMER.
 #  
 #  SLURM is free software; you can redistribute it and/or modify it under
 #  the terms of the GNU General Public License as published by the Free
diff --git a/contribs/torque/pbsnodes.pl b/contribs/torque/pbsnodes.pl
index f02637f060555339bf3a633b26c08c4223197068..3992f1a64cb6280f2eb015c49332dddf408b9a50 100755
--- a/contribs/torque/pbsnodes.pl
+++ b/contribs/torque/pbsnodes.pl
@@ -8,10 +8,11 @@
 #  Copyright (C) 2007 The Regents of the University of California.
 #  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 #  Written by Danny Auble <auble1@llnl.gov>.
-#  LLNL-CODE-402394.
+#  CODE-OCEC-09-009. All rights reserved.
 #  
 #  This file is part of SLURM, a resource management program.
-#  For details, see <http://www.llnl.gov/linux/slurm/>.
+#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  Please also read the included file: DISCLAIMER.
 #  
 #  SLURM is free software; you can redistribute it and/or modify it under
 #  the terms of the GNU General Public License as published by the Free
diff --git a/contribs/torque/qdel.pl b/contribs/torque/qdel.pl
index 79d248706d995dddc36f9ac81a6a1a0686375994..49a991cc512d77ccff40f2dcc978c9fd7246457c 100755
--- a/contribs/torque/qdel.pl
+++ b/contribs/torque/qdel.pl
@@ -8,10 +8,11 @@
 #  Copyright (C) 2007 The Regents of the University of California.
 #  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 #  Written by Danny Auble <auble1@llnl.gov>.
-#  LLNL-CODE-402394.
+#  CODE-OCEC-09-009. All rights reserved.
 #  
 #  This file is part of SLURM, a resource management program.
-#  For details, see <http://www.llnl.gov/linux/slurm/>.
+#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  Please also read the included file: DISCLAIMER.
 #  
 #  SLURM is free software; you can redistribute it and/or modify it under
 #  the terms of the GNU General Public License as published by the Free
diff --git a/contribs/torque/qhold.pl b/contribs/torque/qhold.pl
index 731a7c0c528292435460ad1405adc7b19e718185..4d3e2d5957775b65e4bfc28489e99f1f39c2eb18 100755
--- a/contribs/torque/qhold.pl
+++ b/contribs/torque/qhold.pl
@@ -9,10 +9,11 @@
 #  Copyright (C) 2007 The Regents of the University of California.
 #  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 #  Written by Danny Auble <auble1@llnl.gov>.
-#  LLNL-CODE-402394.
+#  CODE-OCEC-09-009. All rights reserved.
 #  
 #  This file is part of SLURM, a resource management program.
-#  For details, see <http://www.llnl.gov/linux/slurm/>.
+#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  Please also read the included file: DISCLAIMER.
 #  
 #  SLURM is free software; you can redistribute it and/or modify it under
 #  the terms of the GNU General Public License as published by the Free
diff --git a/contribs/torque/qrls.pl b/contribs/torque/qrls.pl
index fd24b29fb170e9cb5de82dc888409aa3557e8544..8d44fef2b7b57ad172c83226cd07992970e79d9a 100755
--- a/contribs/torque/qrls.pl
+++ b/contribs/torque/qrls.pl
@@ -8,10 +8,11 @@
 #  Copyright (C) 2007 The Regents of the University of California.
 #  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 #  Written by Danny Auble <auble1@llnl.gov>.
-#  LLNL-CODE-402394.
+#  CODE-OCEC-09-009. All rights reserved.
 #  
 #  This file is part of SLURM, a resource management program.
-#  For details, see <http://www.llnl.gov/linux/slurm/>.
+#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  Please also read the included file: DISCLAIMER.
 #  
 #  SLURM is free software; you can redistribute it and/or modify it under
 #  the terms of the GNU General Public License as published by the Free
diff --git a/contribs/torque/qstat.pl b/contribs/torque/qstat.pl
index 7e302cba487c178a35b88bfa472762d8082551aa..9a35c0ddc1cff21f3da41e18bf778232194a79d6 100755
--- a/contribs/torque/qstat.pl
+++ b/contribs/torque/qstat.pl
@@ -8,10 +8,11 @@
 #  Copyright (C) 2007 The Regents of the University of California.
 #  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 #  Written by Danny Auble <auble1@llnl.gov>.
-#  LLNL-CODE-402394.
+#  CODE-OCEC-09-009. All rights reserved.
 #  
 #  This file is part of SLURM, a resource management program.
-#  For details, see <http://www.llnl.gov/linux/slurm/>.
+#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  Please also read the included file: DISCLAIMER.
 #  
 #  SLURM is free software; you can redistribute it and/or modify it under
 #  the terms of the GNU General Public License as published by the Free
@@ -144,6 +145,32 @@ if(defined($queueList)) {
 		}	
 		$rc = 0;
 	}
+} elsif($queueStatus) {
+	my $jresp = Slurm->load_jobs(1);
+	die "Problem loading jobs.\n" if(!$jresp);
+	my $resp = Slurm->load_partitions(1);
+	die "Problem loading partitions.\n" if(!$resp);
+	my $total_running = 0;
+	my $total_queued = 0;
+	my $line = 0;
+	foreach my $part (@{$resp->{partition_array}}) {
+		$part->{'running_jobs'} = 0;
+		$part->{'queued_jobs'} = 0;
+		foreach my $job (@{$jresp->{job_array}}) {
+			next if($job->{'partition'} ne $part->{'name'});
+			$part->{'running_jobs'}++
+				if($job->{'job_state'} = JOB_RUNNING);
+			$part->{'queued_jobs'}++
+				if($job->{'job_state'} = JOB_PENDING);
+		}
+		$total_running += $part->{'running_jobs'};
+		$total_queued += $part->{'queued_jobs'};
+		print_part_limits($part, $line);
+		$line++;
+        }
+	printf("                                               ----- -----\n");
+	printf("                                               %5d %5d\n",
+	       $total_running, $total_queued);
 } else {
 	my @jobIds = @ARGV;
 	my @userIds = split(/,/, $userList) if $userList;
@@ -360,6 +387,20 @@ sub yes_no
 	return "no";
 }
 
+sub en_dis
+{
+	my ($query) = @_;
+	return "E" if $query;
+	return "D";
+}
+
+sub running_stopped
+{
+	my ($query) = @_;
+	return "R" if $query;
+	return "S";
+}
+
 sub get_exec_host 
 {
 	my ($job) = @_;
@@ -510,17 +551,18 @@ sub print_part_brief
 	my ($part, $line_num) = @_;
 
 	if(!$line_num) {
-		printf("%-16s %5s %5s %5s %5s %5s %5s %5s %5s %5s %5s %1s\n\n",
+		printf("%-16s %5s %5s %5s %5s %5s %5s %5s %5s %5s %5s %1s\n",
 		       "Queue", "Max", "Tot", "Ena",  "Str", "Que", "Run",
 		       "Hld", "Wat", "Trn", "Ext", "T");
-		printf("%-16s %5s %5s %5s %5s %5s %5s %5s %5s %5s %5s %1s\n\n",
+		printf("%-16s %5s %5s %5s %5s %5s %5s %5s %5s %5s %5s %1s\n",
 		       "----------------", "---", "---", "---",  "---", "---", 
 		       "---", "---", "---", "---", "---", "-");
 	}
 	printf("%-16.16s %5.5s %5.5s %5.5s %5.5s %5.5s %5.5s %5.5s " .
 	       "%5.5s %5.5s %5.5s %1.1s\n",
 	       $part->{'name'}, '?', '?', yes_no($part->{'state_up'}),
-	       yes_no($part->{'state_up'}), '?', '?', '?', '?', '?', '?', 'E');
+	       yes_no($part->{'state_up'}), '?', '?', '?', '?', '?', '?',
+	       en_dis($part->{'state_up'}));
 }
 
 sub print_part_full
@@ -538,6 +580,40 @@ sub print_part_full
 	print "\n";
 }
 
+sub print_part_limits 
+{
+	my ($part, $line_num) = @_;
+
+	if(!$line_num) {
+		printf("%-16s %6s %8s %8s %4s  %3s %3s %2s %5s\n",
+		       "Queue", "Memory", "CPU Time", "Walltime",
+		       "Node", "Run", "Que", "Lm", "State");
+		printf("%-16s %6s %8s %8s %4s  %3s %3s %2s %5s\n",
+		       "----------------", "------", "--------", "--------",
+		       "----", "---", "---", "--", "-----");
+	}
+
+	
+	printf("%-16.16s   --      --    ", $part->{'name'});
+	if($part->{'max_time'} != INFINITE) {
+		printf("%8u ", $part->{'max_time'});
+	} else {
+		printf("   --    ");
+		
+	}
+
+	if($part->{'max_nodes'} != INFINITE) {
+		printf("%4u  ", $part->{'max_nodes'});
+	} else {
+		printf("  --  ");
+	}
+
+	printf("%3u %3u --  %1.1s %1.1s \n", $part->{'running_jobs'},
+	       $part->{'queued_jobs'}, en_dis($part->{'state_up'}),
+	       running_stopped($part->{'state_up'}));
+}
+
+
 ##############################################################################
 
 __END__
@@ -550,6 +626,10 @@ B<qstat> - display job/partition information in a familiar pbs format
 
 B<qstat> [B<-f>] [B<-a>|B<-i>|B<-r>] [B<-n> [B<-1>]] [B<-G>|B<-M>] [B<-u> I<user_list>] [B<-? | --help>] [B<--man>] [I<job_id>...]
 
+B<qstat> -Q [-f]
+
+B<qstat> -q
+
 =head1 DESCRIPTION
 
 The B<qstat> command displays information about jobs.
diff --git a/contribs/torque/qsub.pl b/contribs/torque/qsub.pl
index cee2d06d961f040f259f46290b6906f76ba9a7cb..ff800bfb04333b25a18f1d0a1504c455ea2150c6 100755
--- a/contribs/torque/qsub.pl
+++ b/contribs/torque/qsub.pl
@@ -8,10 +8,11 @@
 #  Copyright (C) 2007 The Regents of the University of California.
 #  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 #  Written by Danny Auble <auble1@llnl.gov>.
-#  LLNL-CODE-402394.
+#  CODE-OCEC-09-009. All rights reserved.
 #  
 #  This file is part of SLURM, a resource management program.
-#  For details, see <http://www.llnl.gov/linux/slurm/>.
+#  For details, see <https://computing.llnl.gov/linux/slurm/>.
+#  Please also read the included file: DISCLAIMER.
 #  
 #  SLURM is free software; you can redistribute it and/or modify it under
 #  the terms of the GNU General Public License as published by the Free
diff --git a/doc/Makefile.in b/doc/Makefile.in
index 1564fa7ecc03b4cdcba3f3426fb1c08d6acef324..a165a62f6b9da09cddc24e9103f7eb2c05d23413 100644
--- a/doc/Makefile.in
+++ b/doc/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -89,6 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/doc/html/AllocationPies.gif b/doc/html/AllocationPies.gif
new file mode 100644
index 0000000000000000000000000000000000000000..8ee39222f5c76cba6a0299be89510fc1280d6ec4
Binary files /dev/null and b/doc/html/AllocationPies.gif differ
diff --git a/doc/html/ExampleUsage.gif b/doc/html/ExampleUsage.gif
new file mode 100644
index 0000000000000000000000000000000000000000..692cd5a125c7b56d8937b42f0caadfba0225fd44
Binary files /dev/null and b/doc/html/ExampleUsage.gif differ
diff --git a/doc/html/Makefile.am b/doc/html/Makefile.am
index c8c30c466e2cc8e5222155a32a8d85a38d221df7..30416c2469db69b57e6b9bd23e0666dbbc8863f3 100644
--- a/doc/html/Makefile.am
+++ b/doc/html/Makefile.am
@@ -7,9 +7,11 @@ generated_html = \
 	authplugins.html \
 	big_sys.html \
 	bluegene.html \
+	checkpoint_blcr.html \
 	checkpoint_plugins.html \
 	cons_res.html \
 	cons_res_share.html \
+	cray.html \
 	crypto_plugins.html \
 	dist_plane.html \
 	documentation.html \
@@ -18,6 +20,8 @@ generated_html = \
 	gang_scheduling.html \
 	help.html \
 	ibm.html \
+	priority_multifactor.html \
+	priority_plugins.html \
 	jobacct_gatherplugins.html \
 	accounting_storageplugins.html \
 	jobcompplugins.html \
@@ -25,6 +29,7 @@ generated_html = \
 	maui.html \
 	mc_support.html \
 	moab.html \
+	mpi_guide.html \
 	mpiplugins.html \
 	news.html \
 	overview.html \
@@ -33,25 +38,34 @@ generated_html = \
 	power_save.html \
 	preempt.html \
 	proctrack_plugins.html \
+	priority_multifactor.html \
+	priority_plugins.html \
 	programmer_guide.html \
 	publications.html \
 	quickstart_admin.html \
 	quickstart.html \
+	reservations.html \
+	resource_limits.html \
 	schedplugins.html \
 	selectplugins.html \
 	slurm.html \
+	sun_const.html \
 	switchplugins.html \
 	taskplugins.html \
 	team.html \
 	testimonials.html \
+	topology.html \
+	topology_plugin.html \
 	troubleshoot.html
 
 html_DATA = \
 	${generated_html} \
+	AllocationPies.gif \
 	arch.gif \
 	configurator.html \
 	coding_style.pdf \
 	entities.gif \
+	ExampleUsage.gif \
 	lci.7.tutorial.pdf \
 	lll.gif \
 	mc_support.gif \
@@ -66,7 +80,10 @@ html_DATA = \
 	slurm_design.pdf \
 	slurmstyles.css \
 	sponsors.gif \
-	linuxstyles.css
+	topo_ex1.gif \
+	topo_ex2.gif \
+	linuxstyles.css \
+	UsagePies.gif
 
 MOSTLYCLEANFILES = ${generated_html}
 
diff --git a/doc/html/Makefile.in b/doc/html/Makefile.in
index fc5184132ed1ad0c88e5849c635ba2f4cff2626c..67defa3e1624b21e8cd1c603a599cd5e63adc2d6 100644
--- a/doc/html/Makefile.in
+++ b/doc/html/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -88,6 +92,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -253,9 +261,11 @@ generated_html = \
 	authplugins.html \
 	big_sys.html \
 	bluegene.html \
+	checkpoint_blcr.html \
 	checkpoint_plugins.html \
 	cons_res.html \
 	cons_res_share.html \
+	cray.html \
 	crypto_plugins.html \
 	dist_plane.html \
 	documentation.html \
@@ -264,6 +274,8 @@ generated_html = \
 	gang_scheduling.html \
 	help.html \
 	ibm.html \
+	priority_multifactor.html \
+	priority_plugins.html \
 	jobacct_gatherplugins.html \
 	accounting_storageplugins.html \
 	jobcompplugins.html \
@@ -271,6 +283,7 @@ generated_html = \
 	maui.html \
 	mc_support.html \
 	moab.html \
+	mpi_guide.html \
 	mpiplugins.html \
 	news.html \
 	overview.html \
@@ -279,25 +292,34 @@ generated_html = \
 	power_save.html \
 	preempt.html \
 	proctrack_plugins.html \
+	priority_multifactor.html \
+	priority_plugins.html \
 	programmer_guide.html \
 	publications.html \
 	quickstart_admin.html \
 	quickstart.html \
+	reservations.html \
+	resource_limits.html \
 	schedplugins.html \
 	selectplugins.html \
 	slurm.html \
+	sun_const.html \
 	switchplugins.html \
 	taskplugins.html \
 	team.html \
 	testimonials.html \
+	topology.html \
+	topology_plugin.html \
 	troubleshoot.html
 
 html_DATA = \
 	${generated_html} \
+	AllocationPies.gif \
 	arch.gif \
 	configurator.html \
 	coding_style.pdf \
 	entities.gif \
+	ExampleUsage.gif \
 	lci.7.tutorial.pdf \
 	lll.gif \
 	mc_support.gif \
@@ -312,7 +334,10 @@ html_DATA = \
 	slurm_design.pdf \
 	slurmstyles.css \
 	sponsors.gif \
-	linuxstyles.css
+	topo_ex1.gif \
+	topo_ex2.gif \
+	linuxstyles.css \
+	UsagePies.gif
 
 MOSTLYCLEANFILES = ${generated_html}
 EXTRA_DIST = $(html_DATA)
diff --git a/doc/html/UsagePies.gif b/doc/html/UsagePies.gif
new file mode 100644
index 0000000000000000000000000000000000000000..512e08363d46835f5724cb16aaba58e8923b98ee
Binary files /dev/null and b/doc/html/UsagePies.gif differ
diff --git a/doc/html/accounting.shtml b/doc/html/accounting.shtml
index c860b34c3b7b6392660ad2a60af8a8d22c659584..b1c1031a3309761751356536c77758b62ee68ae1 100644
--- a/doc/html/accounting.shtml
+++ b/doc/html/accounting.shtml
@@ -631,7 +631,6 @@ If the cluster doesn't have the limit set no limit will be enforced.
 
 <li><b>QOS=</b> comma separated list of QOS's this association is
   able to run.
-
 </li>
 </ul>
 
diff --git a/doc/html/accounting_storageplugins.shtml b/doc/html/accounting_storageplugins.shtml
index 2423c6955ce0db05c88fc82465618ee28184d4ad..7bdac1ae5329c866c607cf7f2ffa0ddfc7f3a719 100644
--- a/doc/html/accounting_storageplugins.shtml
+++ b/doc/html/accounting_storageplugins.shtml
@@ -11,7 +11,7 @@ their own SLURM Job Accounting Storage plugins. This is version 1 of the API.
 SLURM Plugin API with the following specifications:
 
 <p><span class="commandline">const char
-plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"
+plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
 <p style="margin-left:.2in">
 A free-formatted ASCII text string that identifies the plugin.
 
@@ -58,7 +58,7 @@ acct_storage_p_get_connection() is called to get a connection to the
 <span class="commandline">make_agent</span> (input) to make an agent
 thread of not.  This is primarily used in the slurmdbd plugin.<br>
 <span class="commandline">conn_num</span> (input) connection number to
-the plugin.  In many cases you should plan on multiple simultanious
+the plugin.  In many cases you should plan on multiple simultaneous
 connections to the plugin.  This number is useful since the debug
 messages can print this out to determine which connection the message
 is from.<br>
@@ -77,7 +77,7 @@ the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input/output) connection to
 the storage type, all memory will be freed inside this function and
-NULLed out. 
+set to NULL. 
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
@@ -198,12 +198,25 @@ acct_wckey_rec_t *'s containing information about the wckeys to add. <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
+<p class="commandline">
+int acct_storage_p_add_reservation(void *db_conn,
+acct_reservation_rec_t *resv)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to add reservations to the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">resv</span> (input) Reservation to be added. <br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
 <p class="commandline">
 List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
 acct_user_cond_t *user_cond, acct_user_rec_t *user)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to modify existing users in the storage type.  The condition
-  could include very vaque information about the user, so this
+  could include very vague information about the user, so this
   function should be robust in the ability to give everything the user
   is asking for.  This is the reason a list of modified users is
   returned so the caller knows what has been changed, sometimes by mistake.
@@ -225,7 +238,7 @@ List acct_storage_p_modify_accounts(void *db_conn, uint32_t uid,
 acct_account_cond_t *acct_cond, acct_account_rec_t *acct)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to modify existing accounts in the storage type.  The condition
-  could include very vaque information about the account, so this
+  could include very vague information about the account, so this
   function should be robust in the ability to give everything the account
   is asking for.  This is the reason a list of modified accounts is
   returned so the caller knows what has been changed, sometimes by mistake.
@@ -247,7 +260,7 @@ List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
 acct_cluster_cond_t *cluster_cond, acct_cluster_rec_t *cluster)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to modify existing clusters in the storage type.  The condition
-  could include very vaque information about the cluster, so this
+  could include very vague information about the cluster, so this
   function should be robust in the ability to give everything the cluster
   is asking for.  This is the reason a list of modified clusters is
   returned so the caller knows what has been changed, sometimes by mistake.
@@ -269,7 +282,7 @@ List acct_storage_p_modify_associations(void *db_conn, uint32_t uid,
 acct_association_cond_t *assoc_cond, acct_association_rec_t *assoc)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to modify existing associations in the storage type.  The condition
-  could include very vaque information about the association, so this
+  could include very vague information about the association, so this
   function should be robust in the ability to give everything the association
   is asking for.  This is the reason a list of modified associations is
   returned so the caller knows what has been changed, sometimes by mistake.
@@ -291,7 +304,7 @@ List acct_storage_p_modify_qos(void *db_conn, uint32_t uid,
 acct_qos_cond_t *qos_cond, acct_qos_rec_t *qos)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to modify existing qos in the storage type.  The condition
-  could include very vaque information about the qos, so this
+  could include very vague information about the qos, so this
   function should be robust in the ability to give everything the qos
   is asking for.  This is the reason a list of modified qos is
   returned so the caller knows what has been changed, sometimes by mistake.
@@ -313,7 +326,7 @@ List acct_storage_p_modify_wckeys(void *db_conn, uint32_t uid,
 acct_wckey_cond_t *wckey_cond, acct_wckey_rec_t *wckey)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to modify existing wckeys in the storage type.  The condition
-  could include very vaque information about the wckeys, so this
+  could include very vague information about the wckeys, so this
   function should be robust in the ability to give everything the wckey
   is asking for.  This is the reason a list of modified wckey is
   returned so the caller knows what has been changed, sometimes by mistake.
@@ -330,6 +343,20 @@ should be on the wckey identified by the conditional.<br>
 modified on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
+<p class="commandline">
+int acct_storage_p_modify_reservation(void *db_conn,
+acct_reservation_rec_t *resv)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to modify reservations in the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">resv</span> (input) Reservation to be
+modified (id) must be set in the structure. <br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
 <p class="commandline">
 List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 acct_user_cond_t *user_cond)
@@ -453,6 +480,20 @@ which wckeys to be removed.  Wckey names should not need to be stated.<br>
 removed on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
+<p class="commandline">
+int acct_storage_p_remove_reservation(void *db_conn,
+acct_reservation_rec_t *resv)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called to remove reservations in the storage type.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">db_conn</span> (input) connection to
+the storage type. <br>
+<span class="commandline">resv</span> (input) Reservation to be
+removed (id) must be set in the structure. <br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.
+
 <p class="commandline">
 List acct_storage_p_get_users(void *db_conn, uint32_t uid,
 acct_user_cond_t *user_cond)
@@ -471,7 +512,7 @@ be stated.<br>
 on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
-		<p class="commandline">
+<p class="commandline">
 List acct_storage_p_get_accts(void *db_conn, uint32_t uid,
 acct_account_cond_t *acct_cond)
 <p style="margin-left:.2in"><b>Description</b>:<br>
@@ -590,7 +631,7 @@ the storage type.<br>
 <span class="commandline">uid</span> (input) uid of user calling the
 function.<br> 
 <span class="commandline">in</span> (input/out) can be anything that
-gathers usage like acct_associaiton_rec_t * or acct_wckey_rec_t *.<br> 
+gathers usage like acct_association_rec_t * or acct_wckey_rec_t *.<br> 
 <span class="commandline">type</span> (input) really
 slurmdbd_msg_type_t should let the plugin know what the structure is
 that was sent in some how.<br> 
@@ -650,13 +691,15 @@ structure marked up.<br>
 
 <p class="commandline">
 int clusteracct_storage_p_cluster_procs(void *db_conn, char *cluster,
-uint32_t procs, time_t event_time)
+char *cluster_nodes, uint32_t procs, time_t event_time)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Update storage type with the current number of processors on a given cluster.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
 <span class="commandline">cluster</span> (input) name of cluster.<br>
+<span class="commandline">cluster_nodes</span> (input) ranged list of
+nodes on system.<br>
 <span class="commandline">procs</span> (input) number of processors on
 system.<br>
 <span class="commandline">event_time</span> (input) time event happened.<br>
@@ -867,7 +910,7 @@ database with.
 <dt><span class="commandline">AccountingStoragePass</span>
 <dd>Let the plugin know the password of the user connecting to the database.
 <dt><span class="commandline">AccountingStorageEnforce</span>
-<dd>Specifies if we should enforce certain things be in existance
+<dd>Specifies if we should enforce certain things be in existence
   before allowing job submissions and such valid options are
   "associations, limits, and wckeys". You can use any combination of
   those listed.
@@ -880,6 +923,6 @@ ability to implement a particular API version using the mechanism outlined
 for SLURM plugins.
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 10 February 2009</p>
+<p style="text-align:center;">Last modified 2 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/api.shtml b/doc/html/api.shtml
index 3742713a0dc56c73d6af3cebf1dba9350c75ef78..68d83d0b01034efd1b50f68baeb5b857f3e6a419 100644
--- a/doc/html/api.shtml
+++ b/doc/html/api.shtml
@@ -1,9 +1,9 @@
 <!--#include virtual="header.txt"-->
 
-<h2><a name="top">SLURM Switch Plugin API</a></h2>
+<h2><a name="top">SLURM APIs</a></h2>
 
 <h3>Overview</h3>
-<p>All of the SLURM commands utilize a collection of Application Progamming 
+<p>All of the SLURM commands utilize a collection of Application Programming 
 Interfaces (APIs). 
 User and system applications can directly use these APIs as desired to 
 achieve tighter integration with SLURM.
@@ -207,7 +207,7 @@ created by <i>slurm_step_ctx_create</i>.</li>
 <li><b>slurm_jobinfo_ctx_get</b>&#151;Get values from a <i>jobinfo</i>
 field as returned by <i>slurm_step_ctx_get</i>.</li>
 
-<li><b>slurm_spawn</b>&#151;Spawn tasks and establish communcations.</li>
+<li><b>slurm_spawn</b>&#151;Spawn tasks and establish communications.</li>
 
 <li><b>slurm_spawn_kill</b>&#151;Signal spawned tasks.</li>
 
@@ -243,7 +243,7 @@ job step.</li>
 <ul>
 
 <li><b>slurm_checkpoint_able</b>&#151;Note that a specific job or 
-job step is elligible for checkpoint.</li>
+job step is eligible for checkpoint.</li>
 
 <li><b>slurm_checkpoint_complete</b>&#151;Note that a requested 
 checkpoint has completed.</li>
@@ -273,7 +273,7 @@ checkpointed job resume execution.</li>
 
 
 <h3>Administrative Functions</h3>
-<p>Most of these functions can only be exected by user <i>root</i>.</p>
+<p>Most of these functions can only be executed by user <i>root</i>.</p>
 <ul>
 
 <li><b>slurm_reconfigure</b>&#151;Update slurm daemons 
@@ -326,6 +326,6 @@ allocated by <i>slurm_hostlist_create</i>.
 </ul>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 25 October 2005</p>
+<p style="text-align:center;">Last modified 13 November 2005</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/authplugins.shtml b/doc/html/authplugins.shtml
index ff5accf674b9e8d83289a4ba4e96517bbace89b6..8d04e00f8e6c2787cbf388b430771426ae311839 100644
--- a/doc/html/authplugins.shtml
+++ b/doc/html/authplugins.shtml
@@ -17,8 +17,8 @@ abbreviation for the type of authentication. We recommend, for example:</p>
 authentication service. This may be used for testing purposes, but is not suitable for 
 production use due to lack of effective security.</li>
 <li><b>authd</b>&#151;Brett Chun's Linux authd.</li>
-<li><b>munge</b>&#151;LLNL's munge protocol (recommended plugin for production use).</li>
-<li><b>krb5</b>&#151;Kerberos 5 (not implemented as of 8 June 2004).</li>
+<li><b>munge</b>&#151;LLNL's Munge protocol (recommended plugin for production use).</li>
+<li><b>krb5</b>&#151;Kerberos 5 (expected to be available in 2009).</li>
 </ul>
 <p>The <span class="commandline">plugin_name</span> and <span class="commandline">plugin_version</span> 
 symbols required by the SLURM Plugin API require no specialization for authentication. 
diff --git a/doc/html/big_sys.shtml b/doc/html/big_sys.shtml
index b33a9e915e4b273cfedb918ee5d1a9c931b68515..23f11c18ad7dc3ef77bc0eb8b84dc654bd21882c 100644
--- a/doc/html/big_sys.shtml
+++ b/doc/html/big_sys.shtml
@@ -13,7 +13,7 @@ for this material.</p>
 
 <h2>Performance Results</h2>
 
-<p>SLURM has acutally been used on clusters containing up to 4,184 nodes. 
+<p>SLURM has been used on clusters containing up to 4,184 nodes. 
 At that scale, the total time to execute a simple program (resource 
 allocation, task launch, I/O processing, and cleanup, e.g. 
 "time srun -N4184 -n8368 uname") at 8,368 tasks 
diff --git a/doc/html/bluegene.shtml b/doc/html/bluegene.shtml
index 799cbde1a9546e4edcd8c7c235e5628c278282d0..d01449dce76230dc98cf42c34644e0f15e677dcf 100644
--- a/doc/html/bluegene.shtml
+++ b/doc/html/bluegene.shtml
@@ -90,20 +90,20 @@ configured with <i>small blocks</i> (any block less than a full base partition)
 into the base partition notation.  For example, if there were 64 psets in the
 configuration, bg012[0-15] represents
 the first quarter or first 16 ionodes of a midplane.  In BlueGene/L
-this would be 128 c-node block.  To represent the first nodecar in the
+this would be 128 c-node block.  To represent the first nodecard in the
 second quarter or ionodes 16-19 the notation would be bg012[16-19], or
 a 32 c-node block.
 Since jobs must allocate consecutive base partitions in all three dimensions, we have developed 
 an abbreviated format for describing the base partitions in one of these three-dimensional blocks. 
 The base partition has a prefix determined from the system which is followed by the end-points 
-of the block enclosed in square-brackets. 
+of the block enclosed in square-brackets and separated by an "x".
 For example, "bg[620x731]" is used to represent the eight base partitions enclosed in a block 
-with endpoints bg620 and bg731 (bg620, bg621, bg630, bg631, bg720, bg721, 
+with end-points and bg620 and bg731 (bg620, bg621, bg630, bg631, bg720, bg721, 
 bg730 and bg731).</p></a>
 
 <p>
-<b>IMPORTANT:</b> As of SLURM version 1.2 SLURM can handle a bluegene
-system of size 36x36x36.  To try to keep with the 'three-digit suffix  
+<b>IMPORTANT:</b> SLURM version 1.2 or higher can handle a bluegene system of 
+sizes up to 36x36x36.  To try to keep with the 'three-digit suffix  
 representing the its coordinates in the X, Y and Z dimensions with a
 zero origin', we now support A-Z as valid numbers.  This makes it so
 the prefix <b>must always be lower case</b>, and any letters in the 
@@ -120,7 +120,7 @@ invalid: BGL[000xC44] BglC00 bglb00 Bglzzz
 </p>
 
 <p>One new tool provided is <i>smap</i>. 
-As of SLURM verison 1.2, <i>sview</i> is
+As of SLURM version 1.2, <i>sview</i> is
 another new tool offering even more viewing and configuring options.
 Smap is aware of system topography and provides a map of what base partitions 
 are allocated to jobs, partitions, etc. 
@@ -215,7 +215,7 @@ calls only available on the SN.</p>
 to configure and build two sets of files for installation. 
 One set will be for the Service Node (SN), which has direct access to the 
 Bridge APIs. 
-The second set will be for the Front End Nodes (FEN), whick lack access to the 
+The second set will be for the Front End Nodes (FEN), which lack access to the 
 Bridge APIs and interact with using Remote Procedure Calls to the slurmctld 
 daemon.
 You should see "#define HAVE_BG 1" and "#define HAVE_FRONT_END 1" in the "config.h" 
@@ -264,7 +264,7 @@ etc.).  Sample prolog and epilog scripts follow. </p>
 #
 # Cancel job to start the termination process for this job
 # and release the bgblock
-/usr/bin/scancel $SLURM_JOBID
+/usr/bin/scancel $SLURM_JOB_ID
 #
 # Wait for bgblock to be released from this job's use
 /usr/sbin/slurm_epilog
@@ -317,7 +317,7 @@ so as to maximize its performance and minimize other risk factors.</p>
 
 <a name="bluegene-conf"><h2>Bluegene.conf File Creation</h2></a>
 <p>In addition to the normal <i>slurm.conf</i> file, a new 
-<i>bluegene.conf</i> configuration file is required with information pertainate 
+<i>bluegene.conf</i> configuration file is required with information pertinent 
 to the sytem.
 Put <i>bluegene.conf</i> into the SLURM configuration directory with
 <i>slurm.conf</i>.
@@ -372,18 +372,18 @@ Dynamic partitioning was developed primarily for smaller BlueGene systems,
 but can be used on larger systems.
 Dynamic partitioning may introduce fragmentation of resources.
 This fragementaiton may be severe since SLURM will run a job anywhere 
-resources are avaliable with little thought of the future.  
+resources are available with little thought of the future.  
 As with overlap partitioning, <b>use dynamic partitioning with 
 caution!</b>  
 This mode can result in job starvation since smaller jobs will run 
-if resources are avaliable and prevent larger jobs from running.
+if resources are available and prevent larger jobs from running.
 Bgblocks need not be assigned in the <i>bluegene.conf</i> file 
 for this mode.</p>
 
 <p>Blocks can be freed or set in an error state with scontrol,
 (i.e. "<i>scontrol update BlockName=RMP0 state=error</i>").
 This will end any job on the block and set the state of the block to ERROR
-making it so no job will run on the block.  To set it back to a usuable 
+making it so no job will run on the block.  To set it back to a useable 
 state set the state to free (i.e. 
 "<i>scontrol update BlockName=RMP0 state=free</i>"). 
 
@@ -393,7 +393,7 @@ need, you can set a set of ionodes into an error state with scontrol,
 (i.e. "<i>scontrol update subbpname=bg000[0-3] state=error</i>").
 This will end any job on the nodes listed, create a block there, and set 
 the state of the block to ERROR making it so no job will run on the
-block.  To set it back to a usuable state set the state to free (i.e. 
+block.  To set it back to a useable state set the state to free (i.e. 
 "<i>scontrol update BlockName=RMP0 state=free</i>" or
  "<i>scontrol update subbpname=bg000[0-3] state=free</i>"). This is
  helpful to allow other jobs to run on the unaffected nodes in
@@ -464,6 +464,8 @@ A sample <i>bluegene.conf</i> file is shown below.
 #                    2: Log level 1 and basic debug messages
 #                    3: Log level 2 and more debug message
 #                    4: Log all messages
+# DenyPassthrough:   Prevents use of passthrough ports in specific
+#                    dimensions, X, Y, and/or Z, plus ALL
 #
 # NOTE: The bgl_serial value is set at configuration time using the 
 #       "--with-bgl-serial=" option. Its default value is "BGL".
@@ -501,6 +503,8 @@ NumPsets=64	# An I/O rich environment
 BridgeAPILogFile=/var/log/slurm/bridgeapi.log
 BridgeAPIVerbose=0
 
+#DenyPassthrough=X,Y,Z
+
 ###############################################################################
 # Define the static/overlap partitions (bgblocks)
 #
diff --git a/doc/html/checkpoint_blcr.shtml b/doc/html/checkpoint_blcr.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..83835e1403099dd6089e3021fef9895c2bef33a3
--- /dev/null
+++ b/doc/html/checkpoint_blcr.shtml
@@ -0,0 +1,166 @@
+<!--#include virtual="header.txt"-->
+
+<h1><a name="top">SLURM Checkpoint/Restart with BLCR</a></h1>
+
+<h2>Overview</h2>
+<p>SLURM version 2.0 has been integrated with 
+<a href="https://ftg.lbl.gov/CheckpointRestart/CheckpointRestart.shtml">
+Berkeley Lab Checkpoint/Restart (BLCR)</a> in order to provide automatic
+job checkpoint/restart support.
+Functionality provided includes:
+<ol>
+<li>Checkpoint of whole batch jobs in addition to job steps</li>
+<li>Periodic checkpoint of batch jobs and job steps</li>
+<li>Restart execution of batch jobs and job steps from checkpoint files</li>
+<li>Automatically  requeue and restart the execution of batch jobs upon 
+node failure</li>
+</ol></p>
+
+<h2>User Commands</h2>
+
+<p>The following documents SLURM changes specific to BLCR support.
+Baic familiarity with SLURM commands is assumed.</p>
+
+<h3>srun</h3>
+
+<p>Several options have been added to support checkpoint restart:</p>
+<ul>
+<li><b>--checkpoint</b>: Specify the interval between periodic checkpoint
+of a job step, in seconds</li>
+<li><b>--checkpoint-dir</b>:Specify the directory when the checkpoint image
+files of a job step will be stored.
+The default value is the current working directory.
+Checkpoint files will be of the form <i>"&lt;job_id&gt;.ckpt"</i> for jobs
+and <i>"&lt;job_id&gt;.&lt;step_id&gt;.ckpt"</i> for job steps.</li>
+<li><b>--restart-dir</b>: Specify the directory when the checkpoint image
+files of a job step will be read from</li>
+</li>
+</ul>
+
+<p>Environment variables are available for all of these options:</p>
+<ul>
+<li<b>SLURM_CHECKPOINT</b> is equivalent to <b>--checkpoint</b>:</li>
+<li><b>SLURM_CHECKPOINT_DIR</b> is equivalent to <b>--checkpoint-dir</b></li>
+<li><b>SLURM_RESTART_DIR</b> is equivalent to <b>--restart-dir</b></li>
+</li>
+</ul>
+<p>The environment variable <b>SLURM_SRUN_CR_SOCKET</b> is used for job step 
+logic to interact with the <b>srun_cr</b> command.</p>
+
+<h3>srun_cr</h3>
+
+<p>This is a wrapper program for use with SLURM's <b>checkpoint/blcr</b>
+plugin to checkpoint/restart tasks launched by srun.
+The design of <b>srun_cr</b> is inspired by <b>mpiexec_cr</b> from MVAPICH2 and
+<b>cr_restart</b> form BLCR.
+It is a wrapper around the <b>srun</b> command to enable batch job 
+checkpoint/restart support when used with SLURM's <b>checkpoint/blcr</b> plugin.
+
+<p>The <b>srun_cr</b> execute line options are identical to those of the 
+<b>srun</b> command.
+See "man srun" for details.</p>
+
+<p>After initialization, <b>srun_cr</b> registers a thread context callback
+function.
+Then it forks a process and executes "cr_run --omit srun" with its arguments.
+<b>cr_run</b> is employed to exclude the <b>srun</b> process from being dumped 
+upon checkpoint.
+All catchable signals except SIGCHLD sent to <b>srun_cr</b> will be forwarded 
+to the child <b>srun</b> process.
+SIGCHLD will be captured to mimic the exit status of <b>srun</b> when it exits.
+Then <b>srun_cr</b> loops waiting for termination of tasks being launched 
+from <b>srun</b>.</p>
+
+<p>The step launch logic of SLURM is augmented to check if <b>srun</b> is 
+running under <b>srun_cr</b>.
+If true, the environment variable <b>SURN_SRUN_CR_SOCKET</b> should be present,
+the value of which is the address of a Unix domain socket created and listened
+to be <b>srun_cr</b>.
+After launching the tasks, <b>srun</b> tires to connect to the socket and sends
+the job ID, step ID and the nodes allocated to the step to <b>srun_cr</b>.</p>
+
+<p>Upon checkpoint, </b>srun_cr</b> checks to see if the tasks have been launched.
+If not </b>srun_cr</b> first forwards the checkpoint request to the tasks by 
+calling the SLURM API <b>slurm_checkpoint_tasks()</b> before dumping its process
+context.</p>
+
+<p>Upon restart, <b>srun_cr</b> checks to see if the tasks have been previously 
+launched and checkpointed. 
+If true, the environment variable </b>SLURM_RESTART_DIR</b> is set to the 
+directory of the checkpoint image files of the tasks.
+Then <b>srun</b> is forked and executed again. 
+The environment variable will be used by the <b>srun</b> command to restart 
+execution of the tasks from the previous checkpoint.</p>
+
+<h3>sbatch</h3>
+
+<p>Several options have been added to support checkpoint restart:</p>
+<ul>
+<li><b>--checkpoint</b>: Specify the interval between periodic checkpoint
+of a batch job, in seconds</li>
+<li><b>--checkpoint-dir</b>:Specify the directory when the checkpoint image
+files of a batch job will be stored.
+The default value is the current working directory.
+Checkpoint files will be of the form <i>"&lt;job_id&gt;.ckpt"</i> for jobs
+and <i>"&lt;job_id&gt;.&lt;step_id&gt;.ckpt"</i> for job steps.</li>
+</li>
+</ul>
+
+<p>Environment variables are available for all of these options:</p>
+<ul>
+<li<b>SLURM_CHECKPOINT</b> is equivalent to <b>--checkpoint</b>:</li>
+<li><b>SLURM_CHECKPOINT_DIR</b> is equivalent to <b>--checkpoint-dir</b></li>
+</li>
+</ul>
+
+<h3>scontrol</h3>
+
+<p><b>scontrol</b> is used to initiate checkpoint/restart requests.</p>
+<ul>
+<li><b>scontrol checkpoint create <i>jobid</i> [ImageDir=<i>dir</i>] 
+[MaxWait=<i>seconds</i>]</b><br>
+Requests a checkpoint on a specific job.
+For backward compatibility, if a job id is specified, all job steps of
+it are checkpointed. 
+If a batch job id is specified, the entire job is checkpointed including
+the batch shell and all running tasks of all job steps.
+Upon checkpoint, the task launch command must forward the requests to 
+tasks it launched.
+<ul>
+<li><b>ImageDir</b> specifies the directory in which to save the checkpoint 
+image files. If specified, this takes precedence over any <b>--checkpoint-dir</b>
+option specified when the job or job step were submitted.</li>
+<li><b>MaxWait</b> specifies the maximum time permitted for a checkpoint 
+request to complete. The request will be considered failed if not 
+completed in this time period.</li>
+</li>
+</ul>
+
+<li><b>scontrol checkpoint create <i>jobid.stepid</i> [ImageDir=<i>dir</i>] 
+[MaxWait=<i>seconds</i>]</b><br>
+Requests a checkpoint on a specific job step.</li>
+
+<li><b>scontrol checkpoint restart <i>jobid</i> [ImageDir=<i>dir</i>] 
+[StickToNodes]</b><br>
+Restart a previously checkpointed batch job.
+<ul>
+<li><b>ImageDir</b> specifies the directory from which to read the checkpoint 
+image files.</li>
+<li><b>StickToNodes</b> specifies that the job should be restarted on the
+same set of nodes from which it was previously checkpointed.</li>
+</ul></li>
+</ul>
+
+<h2>Configuration</h2>
+
+<p>The following SLURM configuration parameter has been added:</p>
+<ul>
+<li><b>JobCheckpointDir</b> specified the default directory for storing 
+or reading job checkpoint files</li>
+</ul>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<p style="text-align:center;">Last modified 11 March 2009</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/checkpoint_plugins.shtml b/doc/html/checkpoint_plugins.shtml
index 286b631b6134542c00f42781d48ba5ea5925c0c6..d458732d43dd1cdb78c1a23c49be4a6ee5928615 100644
--- a/doc/html/checkpoint_plugins.shtml
+++ b/doc/html/checkpoint_plugins.shtml
@@ -17,8 +17,12 @@ abbreviation for the type of checkpoint mechanism.
 We recommend, for example:</p>
 <ul>
 <li><b>aix</b>&#151;AIX system checkpoint.</li>
+<li><b>blcr</b>&#151;
+<a href="https://ftg.lbl.gov/CheckpointRestart/CheckpointRestart.shtml">
+Berkeley Lab Checkpoint/Restart (BLCR)</a></li>
 <li><b>none</b>&#151;No job checkpoint.</li>
 <li><b>ompi</b>&#151;OpenMPI checkpoint (requires OpenMPI version 1.3 or higher).</li>
+<li><b>xlch</b>&#151;XLCH</li>
 </ul></p>
 
 <p>The <span class="commandline">plugin_name</span> and 
@@ -92,12 +96,19 @@ from a buffer.</p>
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">int slurm_ckpt_op ( uint16_t op, uint16_t data,
-struct step_record * step_ptr, time_t * event_time,
+<p class="commandline">int slurm_ckpt_op ( uint32_t job_id, uint32_t step_id,
+struct step_record *step_ptr, uint16_t op, uint16_t data,
+char *image_dir, time_t *event_time, 
 uint32_t *error_code, char **error_msg );</p>
 <p style="margin-left:.2in"><b>Description</b>: Perform some checkpoint operation on a 
 specific job step.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
+<b>job_id</b> (input) identifies the job to be operated upon.
+May be SLURM_BATCH_SCRIPT for a batch job or NO_VAL for all steps of the 
+specified job.</br>
+<b>step_id</b> (input) identifies the job step to be operated upon.<br>
+<b>step_ptr</b> (input) pointer to the job step to be operated upon.
+Used by checkpoint/aix only.<br>
 <b>op</b> (input) specifies the operation to be performed. 
 Currently supported operations include 
 CHECK_ABLE (is job step currently able to be checkpointed),
@@ -107,8 +118,8 @@ CHECK_CREATE (create a checkpoint for this job step and continue its execution),
 CHECK_VACATE (create a checkpoint for this job step and terminate it),
 CHECK_RESTART (restart this previously checkpointed job step), and
 CHECK_ERROR (return checkpoint-specific error information for this job step).<br>
-<b>data</b> (input) operation-specific data.</br>
-<b>step_ptr</b> (input/output) identifies the job step to be operated upon.</br>
+<b>data</b> (input) operation-specific data.<br>
+<b>image_dir</b> (input) directory to be used to save or restore state.<br>
 <b>event_time</b> (output) identifies the time of a checkpoint or restart 
 operation.</br>
 <b>error_code</b> (output) returns checkpoint-specific error code 
@@ -117,8 +128,8 @@ associated with an operation.</br>
 associated with an operation.</p>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 SLURM_SUCCESS if successful. On failure,
-the plugin should return SLURM_ERROR and set the error_code and error_msg to an 
-appropriate value to indicate the reason for failure.</p>
+the plugin should return SLURM_ERROR and set the error_code and error_msg 
+to an appropriate value to indicate the reason for failure.</p>
 
 <p class="commandline">int slurm_ckpt_comp ( struct step_record * step_ptr, time_t event_time,
 uint32_t error_code, char *error_msg );</p>
@@ -135,14 +146,47 @@ with an operation.</p>
 the plugin should return SLURM_ERROR and set the error_code and error_msg to an
 appropriate value to indicate the reason for failure.</p>
                                                                                                                        
+<p class="commandline">int slurm_ckpt_stepd_prefork ( void *slurmd_job );</p>
+<p style="margin-left:.2in"><b>Description</b>: Do preparation work for
+the checkpoint/restart support. This function is called by <b>slurmstepd</b>
+before forking the user tasks.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<b>slurmd_job</b> (input) pointer to job structure internal to slurmstepd.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
+On failure, the plugin should return SLURM_ERROR and set the error_code 
+and error_msg to an appropriate value to indicate the reason for failure.</p>
+
+<p class="commandline">int slurm_ckpt_signal_tasks ( void *slurmd_job,
+char *image_dir );</p>
+<p style="margin-left:.2in"><b>Description</b>: Forward the checkpoint
+request to tasks managed by <b>slurmstepd</b>.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<b>slurmd_job</b> (input) pointer to job structure internal to slurmstepd.</br>
+<b>image_dir</b> (input) directory to be used to save or restore state.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
+On failure, the plugin should return SLURM_ERROR and set the error_code 
+and error_msg to an appropriate value to indicate the reason for failure.</p>
+
+<p class="commandline">int slurm_ckpt_restart_task ( void *slurmd_job,
+char *image_dir, int gtid);</p>
+<p style="margin-left:.2in"><b>Description</b>: Restart the execution
+of a tasks from a checkpoint image, called by <b>slurmstepd</b>.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<b>slurmd_job</b> (input) pointer to job structure internal to slurmstepd.<br>
+<b>image_dir</b> (input) directory to be used to save or restore state.<br>
+<b>gtid</b> (input) global task ID to be restarted</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
+On failure, the plugin should return SLURM_ERROR and set the error_code 
+and error_msg to an appropriate value to indicate the reason for failure.</p>
+
 
 <h2>Versioning</h2>
-<p> This document describes version 0 of the SLURM checkpoint API. 
+<p> This document describes version 100 of the SLURM checkpoint API. 
 Future releases of SLURM may revise this API. 
 A checkpoint plugin conveys its ability to implement a particular API 
 version using the mechanism outlined for SLURM plugins.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 21 August 2007</p>
+<p style="text-align:center;">Last modified 10 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in
index 50decd03b73028608800b61f23e71213d334052f..1c7c823e4a277f90cebcc5ab2f79b451a699a522 100644
--- a/doc/html/configurator.html.in
+++ b/doc/html/configurator.html.in
@@ -1,6 +1,6 @@
 <!--
 Copyright (C) 2005-2007 The Regents of the University of California.
-Copyright (C) 2008 Lawrence Livermore National Security.
+Copyright (C) 2008-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 Written by Morris Jette <jette1@llnl.gov> and Danny Auble <da@llnl.gov>
 
@@ -150,24 +150,30 @@ function displayfile()
    "#DisableRootJobs=NO <br>" +
    "#EnforcePartLimits=NO <br>" +
    get_field("Epilog",document.config.epilog) + "<br>" +
+   "#PrologSlurmctld= <br>" +
    "#FirstJobId=1 <br>" +
+   "#JobCheckpointDir=/var/slurm/checkpoint <br>" +
    get_field("JobCredentialPrivateKey", document.config.private_key) + "<br>" +
    get_field("JobCredentialPublicCertificate", document.config.public_key) + "<br>" +
    "#JobFileAppend=0 <br>" +
    "#JobRequeue=1 <br>" +
+   "#KillOnBadExit=0 <br>" +
    "#Licenses=foo*4,bar <br>" +
    "#MailProg=/bin/mail <br>" +
    "#MaxJobCount=5000 <br>" +
    "MpiDefault=" + get_radio_value(document.config.mpi_default) + "<br>" +
+   "#MpiParams=ports:#-# <br>" +
    "#PluginDir= <br>" +
    "#PlugStackConfig= <br>" +
    "#PrivateData=jobs <br>" +
    "ProctrackType=proctrack/" + get_radio_value(document.config.proctrack_type) + "<br>" +
    get_field("Prolog",document.config.prolog) + "<br>" +
+   "#PrologSlurmctld= <br>" +
    "#PropagatePrioProcess=0 <br>" +
    "#PropagateResourceLimits= <br>" +
    "#PropagateResourceLimitsExcept= <br>" +
    "ReturnToService=" + get_radio_value(document.config.return_to_service) + "<br>" +
+   "#SallocDefaultCommand= <br>" +
    "SlurmctldPidFile=" + document.config.slurmctld_pid_file.value + "<br>" +
    "SlurmctldPort=" + document.config.slurmctld_port.value + "<br>" +
    "SlurmdPidFile=" + document.config.slurmd_pid_file.value + "<br>" +
@@ -182,6 +188,7 @@ function displayfile()
    "TaskPlugin=task/" + get_radio_value(document.config.task_plugin) + "<br>" +
    get_task_plugin_param() + "<br>" +
    get_field("TaskProlog",document.config.task_prolog) + "<br>" +
+   "#TopologyPlugin=topology/tree <br>" +
    "#TmpFs=/tmp <br>" +
    "#TrackWCKey=no <br>" +
    "#TreeWidth= <br>" +
@@ -192,14 +199,17 @@ function displayfile()
    "# <br>" +
    "# TIMERS <br>" +
    "#BatchStartTimeout=10 <br>" +
+   "#CompleteWait=0 <br>" +
    "#EpilogMsgTime=2000 <br>" +
    "#GetEnvTimeout=2 <br>" +
    "#HealthCheckInterval=0 <br>" +
    "#HealthCheckProgram= <br>" +
    "InactiveLimit=" + document.config.inactive_limit.value + "<br>" +
-   "MinJobAge=" + document.config.min_job_age.value + "<br>" +
    "KillWait=" + document.config.kill_wait.value + "<br>" +
    "#MessageTimeout=10 <br>" +
+   "#ResvOverRun=0 <br>" +
+   "MinJobAge=" + document.config.min_job_age.value + "<br>" +
+   "#OverTimeLimit=0 <br>" +
    "SlurmctldTimeout=" + document.config.slurmctld_timeout.value + "<br>" +
    "SlurmdTimeout=" + document.config.slurmd_timeout.value + "<br>" +
    "#UnkillableStepProgram= <br>" +
@@ -219,6 +229,19 @@ function displayfile()
    get_select_type_params() + "<br>" +
    "# <br>" +
    "# <br>" +
+   "# JOB PRIORITY <br>" +
+   "#PriorityType=priority/basic <br>" +
+   "#PriorityDecayHalfLife= <br>" +
+   "#PriorityFavorSmall= <br>" +
+   "#PriorityMaxAge= <br>" +
+   "#PriorityUsageResetPeriod= <br>" +
+   "#PriorityWeightAge= <br>" +
+   "#PriorityWeightFairshare= <br>" +
+   "#PriorityWeightJobSize= <br>" +
+   "#PriorityWeightPartition= <br>" +
+   "#PriorityWeightQOS= <br>" +
+   "# <br>" +
+   "# <br>" +
    "# LOGGING AND ACCOUNTING <br>" +
    "#AccountingStorageEnforce=0 <br>" +
    get_field("AccountingStorageHost",document.config.accounting_storage_host) + "<br>" +
@@ -228,6 +251,7 @@ function displayfile()
    "AccountingStorageType=accounting_storage/" + get_accounting_storage_type_field(get_radio_value(document.config.job_acct_gather_type), document.config.accounting_storage_type) + "<br>" +
    get_field("AccountingStorageUser",document.config.accounting_storage_user) + "<br>" +
    get_field("ClusterName",document.config.cluster_name) + "<br>" +
+   "#DebugFlags= <br>" +
    get_field("JobCompHost",document.config.job_comp_host) + "<br>" +
    get_field("JobCompLoc",document.config.job_comp_loc) + "<br>" +
    get_field("JobCompPass",document.config.job_comp_pass) + "<br>" +
@@ -245,6 +269,8 @@ function displayfile()
    "# POWER SAVE SUPPORT FOR IDLE NODES (optional) <br>" +
    "#SuspendProgram= <br>" +
    "#ResumeProgram= <br>" +
+   "#SuspendTimeout= <br>" +
+   "#ResumeTimeout= <br>" +
    "#ResumeRate= <br>" +
    "#SuspendExcNodes= <br>" +
    "#SuspendExcParts= <br>" +
@@ -256,7 +282,7 @@ function displayfile()
    "NodeName=" + document.config.node_name.value +
    get_field2(" NodeAddr",document.config.node_addr) +
    get_field2(" Procs",document.config.procs) +
-   get_field2(" Memory",document.config.memory) +
+   get_field2(" RealMemory",document.config.memory) +
    get_field2(" Sockets",document.config.sockets) +
    get_field2(" CoresPerSocket",document.config.cores_per_socket) +
    get_field2(" ThreadsPerCore",document.config.threads_per_core) +
@@ -367,8 +393,9 @@ Name of the one partition to be created
 <input type="text" name="max_time" value="INFINITE"> <B>MaxTime</B>: 
 Maximum time limit of jobs in minutes or INFINITE
 <P>
-The following parameters are optional,
-but can be specified when using FastSchedule=1:
+The following parameters describe a node's configuration.
+Set a value for <B>Procs</B>.
+The other parameters are optional, but provide more control over scheduled resources:
 <P>
 <input type="text" name="procs" value="1"> <B>Procs</B>: Count of processors 
 on each compute node.
@@ -391,7 +418,7 @@ the logical number of processors per socket.
 <B>ThreadsPerCore</B>:
 Number of logical threads in a single physical core.
 <P>
-<input type="text" name="memory" value=""> <B>Memory</B>: Amount 
+<input type="text" name="memory" value=""> <B>RealMemory</B>: Amount 
 of real memory. This parameter is required when specifying Memory as a 
 consumable resource with the select/cons_res plug-in. See below 
 under Resource Selection. 
@@ -454,8 +481,8 @@ Select one value for <B>CryptoType</B>:<BR>
 <A href="http://www.openssl.org/">OpenSSL</A> 
 <P>
 Define the location of public and private keys used by SLURM's 
-cryptographic signature generation plugin (CryptoType).
-These values are only used if CryptoType=OpenSSL.
+cryptographic signature generation plugin (CryptoType).<br>
+<b>These values are only used if CryptoType=OpenSSL.</b><br>
 These files need to be generated by the SLURM administrator.
 Specify fully qualified pathnames.
 <P>
@@ -814,6 +841,6 @@ before terminating all remaining tasks. A value of zero indicates unlimited wait
 </FORM>
 <HR>
 <P class="footer">LLNL-WEB-402631<BR>
-Last modified 17 July 2008</P>
+Last modified 13 May 2009</P>
 </BODY>
 
diff --git a/doc/html/cons_res.shtml b/doc/html/cons_res.shtml
index 368810a9ebc241aab3849330d9073e34b8b017e7..db690ac339cc3e17b1739cb967115ce43d1be60f 100644
--- a/doc/html/cons_res.shtml
+++ b/doc/html/cons_res.shtml
@@ -459,7 +459,7 @@ JOBID PARTITION   NAME   USER  ST   TIME  NODES NODELIST(REASON)
     5       lsf  sleep   root   R   1:52      3 linux[01-03]
 </pre>
 
-<p>Job 3 and Job 4 have finshed and Job 5 is still running on nodes linux[01-03].</p>
+<p>Job 3 and Job 4 have finished and Job 5 is still running on nodes linux[01-03].</p>
 
 <p>The advantage of the consumable resource scheduling policy
 is that the job throughput can increase dramatically. The overall job
diff --git a/doc/html/cons_res_share.shtml b/doc/html/cons_res_share.shtml
index 2221f4a2e586d6f672b40f385f4cdca1cf7f1dd6..84e6db3fa4b143cf1025bc5f088682b3e1c68edf 100644
--- a/doc/html/cons_res_share.shtml
+++ b/doc/html/cons_res_share.shtml
@@ -41,8 +41,9 @@ The following table describes this new functionality in more detail:
 <TD>Whole nodes are allocated to jobs. No node will run more than one job.</TD>
 </TR><TR>
 <TD>Shared=YES</TD>
-<TD>Same as Shared=FORCE if job request specifies --shared option.
-Otherwise same as Shared=NO.</TD>
+<TD>By default same as Shared=NO. Nodes allocated to a job may be shared with
+other jobs if each job allows sharing via the <CODE>srun --shared</CODE>
+option.</TD>
 </TR><TR>
 <TD>Shared=FORCE</TD>
 <TD>Whole nodes are allocated to jobs. A node may run more than one job.</TD>
@@ -55,8 +56,9 @@ SelectTypeParameters=<B>CR_Core_Memory</B></TD>
 <TD>Cores are allocated to jobs. No core will run more than one job.</TD>
 </TR><TR>
 <TD>Shared=YES</TD>
-<TD>Allocate whole nodes if job request specifies --exclusive option.
-Otherwise same as Shared=FORCE.</TD>
+<TD>By default same as Shared=NO. Cores allocated to a job may be shared with
+other jobs if each job allows sharing via the <CODE>srun --shared</CODE>
+option.</TD>
 </TR><TR>
 <TD>Shared=FORCE</TD>
 <TD>Cores are allocated to jobs. A core may run more than one job.</TD>
@@ -69,8 +71,9 @@ SelectTypeParameters=<B>CR_CPU_Memory</B></TD>
 <TD>CPUs are allocated to jobs. No CPU will run more than one job.</TD>
 </TR><TR>
 <TD>Shared=YES</TD>
-<TD>Allocate whole nodes if job request specifies --exclusive option.
-Otherwise same as Shared=FORCE.</TD>
+<TD>By default same as Shared=NO. CPUs allocated to a job may be shared with
+other jobs if each job allows sharing via the <CODE>srun --shared</CODE>
+option.</TD>
 </TR><TR>
 <TD>Shared=FORCE</TD>
 <TD>CPUs are allocated to jobs. A CPU may run more than one job.</TD>
@@ -83,8 +86,9 @@ SelectTypeParameters=<B>CR_Socket_Memory</B></TD>
 <TD>Sockets are allocated to jobs. No socket will run more than one job.</TD>
 </TR><TR>
 <TD>Shared=YES</TD>
-<TD>Allocate whole nodes if job request specifies --exclusive option.
-Otherwise same as Shared=FORCE.</TD>
+<TD>By default same as Shared=NO. Sockets allocated to a job may be shared with
+other jobs if each job allows sharing via the <CODE>srun --shared</CODE>
+option.</TD>
 </TR><TR>
 <TD>Shared=FORCE</TD>
 <TD>Sockets are allocated to jobs. A socket may run more than one job.</TD>
@@ -110,9 +114,9 @@ busy nodes that have more than half of the CPUs available for use. The
 <CODE>select/linear</CODE> plugin simply counts jobs on nodes, and does not
 track the CPU usage on each node.
 </P><P>
-This new functionality also supports the new
-<CODE>Shared=FORCE:&lt;num&gt;</CODE> syntax. If <CODE>Shared=FORCE:3</CODE> is
-configured with <CODE>select/cons_res</CODE> and <CODE>CR_Core</CODE> or
+This new sharing functionality in the select/cons_res plugin also supports the
+new <CODE>Shared=FORCE:&lt;num&gt;</CODE> syntax. If <CODE>Shared=FORCE:3</CODE>
+is configured with <CODE>select/cons_res</CODE> and <CODE>CR_Core</CODE> or
 <CODE>CR_Core_Memory</CODE>, then the <CODE>select/cons_res</CODE> plugin will
 run up to 3 jobs on each <U>core</U> of each node in the partition. If
 <CODE>CR_Socket</CODE> or <CODE>CR_Socket_Memory</CODE> is configured, then the
@@ -122,10 +126,28 @@ of each node in the partition.
 <H3>Nodes in Multiple Partitions</H3>
 <P>
 SLURM has supported configuring nodes in more than one partition since version
-0.7.0. The <CODE>Shared=FORCE</CODE> support in the <CODE>select/cons_res</CODE>
-plugin accounts for this "multiple partition" support. Here are several
-scenarios with the <CODE>select/cons_res</CODE> plugin enabled to help
-understand how all of this works together:
+0.7.0. The following table describes how nodes configured in two partitions with
+different <CODE>Shared</CODE> settings will be allocated to jobs. Note that
+"shared" jobs are jobs that are submitted to partitions configured with
+<CODE>Shared=FORCE</CODE> or with <CODE>Shared=YES</CODE> and the job requested
+sharing with the <CODE>srun --shared</CODE> option. Conversely, "non-shared"
+jobs are jobs that are submitted to partitions configured with
+<CODE>Shared=NO</CODE> or <CODE>Shared=YES</CODE> and the job did <U>not</U>
+request sharable resources.
+</P>
+<TABLE CELLPADDING=3 CELLSPACING=1 BORDER=1>
+<TR><TH>&nbsp;</TH><TH>First job "sharable"</TH><TH>First job not
+"sharable"</TH></TR>
+<TR><TH>Second job "sharable"</TH><TD>Both jobs can run on the same nodes and may
+share resources</TD><TD>Jobs do not run on the same nodes</TD></TR>
+<TR><TH>Second job not "sharable"</TH><TD>Jobs do not run on the same nodes</TD>
+<TD>Jobs can run on the same nodes but will not share resources</TD></TR>
+</TABLE>
+<P>
+The next table contains several
+scenarios with the <CODE>select/cons_res</CODE> plugin enabled to further
+clarify how a node is used when it is configured in more than one partition and
+the partitions have different "Shared" policies:
 </P>
 <TABLE CELLPADDING=3 CELLSPACING=1 BORDER=1>
 <TR><TH>SLURM configuration</TH>
@@ -185,6 +207,12 @@ having memory pages swapped out and severely degraded performance.
 <TD>Memory allocation is not tracked. Jobs are allocated to nodes without
 considering if there is enough free memory. Swapping could occur!</TD>
 </TR><TR>
+<TD>SelectType=<B>select/linear</B> plus<BR>
+SelectTypeParameters=<B>CR_Memory</B></TD>
+<TD>Memory allocation is tracked.  Nodes that do not have enough available
+memory to meet the jobs memory requirement will not be allocated to the job.
+</TD>
+</TR><TR>
 <TD>SelectType=<B>select/cons_res</B><BR>
 Plus one of the following:<BR>
 SelectTypeParameters=<B>CR_Core</B><BR>
@@ -200,32 +228,38 @@ SelectTypeParameters=<B>CR_Core_Memory</B><BR>
 SelectTypeParameters=<B>CR_CPU_Memory</B><BR>
 SelectTypeParameters=<B>CR_Socket_Memory</B></TD>
 <TD>Memory allocation for all jobs are tracked. Nodes that do not have enough
-available memory to meet the job's memory requirement will not be allocated to
+available memory to meet the jobs memory requirement will not be allocated to
 the job.</TD>
 </TR>
 </TABLE>
-<P>Users can specify their job's memory requirements one of two ways.
-<CODE>--mem=&lt;num&gt;</CODE> can be used to specify the job's memory 
-requirement on a per allocated node basis. This option is probably best 
-suited for use with the <CODE>select/linear</CODE> plugin, which allocates 
-whole nodes to jobs. 
-<CODE>--mem-per-cpu=&lt;num&gt;</CODE> can be used to specify the job's 
-memory requirement on a per allocated CPU basis. This is probably best
-suited for use with the <CODE>select/cons_res</CODE> plugin which can 
+<P>Users can specify their job's memory requirements one of two ways. The
+<CODE>srun --mem=&lt;num&gt;</CODE> option can be used to specify the jobs
+memory requirement on a per allocated node basis. This option is recommended 
+for use with the <CODE>select/linear</CODE> plugin, which allocates 
+whole nodes to jobs. The
+<CODE>srun --mem-per-cpu=&lt;num&gt;</CODE> option can be used to specify the
+jobs memory requirement on a per allocated CPU basis. This is recommended
+for use with the <CODE>select/cons_res</CODE> plugin which can 
 allocate individual CPUs to jobs.</P>
 
 <P>Default and maximum values for memory on a per node or per CPU basis can 
-be configued using the following options: <CODE>DefMemPerCPU</CODE>,
-<CODE>DefMemPerNode</CODE>, <CODE>MaxMemPerCPU</CODE> and <CODE>MaxMemPerNode</CODE>.
+be configured by the system administrator using the following
+<CODE>slurm.conf</CODE> options: <CODE>DefMemPerCPU</CODE>,
+<CODE>DefMemPerNode</CODE>, <CODE>MaxMemPerCPU</CODE> and
+<CODE>MaxMemPerNode</CODE>.
 Users can use the <CODE>--mem</CODE> or <CODE>--mem-per-cpu</CODE> option
-at job submission time to specify their memory requirements.
-Enforcement of a job's memory allocation is performed by the accounting 
-plugin, which periodically gathers data about running jobs. Set 
+at job submission time to override the default value, but they cannot exceed
+the maximum value.
+</P><P>
+Enforcement of a jobs memory allocation is performed by setting the "maximum
+data segment size" and the "maximum virtual memory size" system limits to the
+appropriate values before launching the tasks. Enforcement is also managed by
+the accounting plugin, which periodically gathers data about running jobs. Set 
 <CODE>JobAcctGather</CODE> and <CODE>JobAcctFrequency</CODE> to 
 values suitable for your system.</P>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 July 2008</p>
+<p style="text-align:center;">Last modified 2 December 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/cray.shtml b/doc/html/cray.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..0d92a64b4ae903590b6b4c981bd1099e60beab46
--- /dev/null
+++ b/doc/html/cray.shtml
@@ -0,0 +1,145 @@
+<!--#include virtual="header.txt"-->
+
+<h1>SLURM User and Administrator Guide for Cray systems</h1>
+
+<b>NOTE: As of January 2009, the SLURM interface to Cray systems is incomplete.</b>
+
+<h2>User Guide</h2>
+
+<p>This document describes the unique features of SLURM on
+Cray computers.
+You should be familiar with the SLURM's mode of operation on Linux clusters 
+before studying the relatively few differences in Cray system 
+operation described in this document.</p>
+
+<p>SLURM's primary mode of operation is designed for use on clusters with
+nodes configured in a one-dimensional space. 
+Minor changes were required for the <i>smap</i> and <i>sview</i> tools 
+to map nodes in a three-dimensional space. 
+Some changes are also desirable to optimize job placement in three-dimensional 
+space.</p>
+
+<p>SLURM has added an interface to Cray's Application Level Placement Scheduler
+(ALPS). The ALPS <i>aprun</i> command must used for task launch rather than SLURM's 
+<i>srun</i> command. You should create a resource reservation using SLURM's
+<i>salloc</i> or <i>sbatch</i> command and execute <i>aprun</i> from within
+that allocation. <//p>
+
+<h2>Administrator Guide</h2>
+
+<h3>Cray/ALPS configuration</h3>
+
+<p>Node names must have a three-digit suffix describing their 
+zero-origin position in the X-, Y- and Z-dimension respectively (e.g. 
+"tux000" for X=0, Y=0, Z=0; "tux123" for X=1, Y=2, Z=3). 
+Rectangular prisms of nodes can be specified in SLURM commands and
+configuration files using the system name prefix with the end-points 
+enclosed in square brackets and separated by an "x". 
+For example "tux[620x731]" is used to represent the eight nodes in a 
+block with endpoints at "tux620" and "tux731" (tux620, tux621, tux630, 
+tux631, tux720, tux721, tux730, tux731).
+<b>NOTE:</b> We anticipate that Cray will provide node coordinate
+information via the ALPS interface in the future, which may result
+in a more flexible node naming convention.</p>
+
+<p>In ALPS, configure each node to be scheduled using SLURM as type
+BATCH.</p>
+
+<h3>SLURM configuration</h3>
+
+<p>Four variables must be defined in the <i>config.h</i> file: 
+<i>APBASIL_LOC</i> (location of the <i>apbasil</i> command), 
+<i>HAVE_FRONT_END</i>, <i>HAVE_CRAY_XT</i> and <i>HAVE_3D</i>.
+The <i>apbasil</i> command should automatically be found. 
+If that is not the case, please notify us of its location on your system
+and we will add that to the search paths tested at configure time.
+The other variable definitions can be initiated in several different 
+ways depending upon how SLURM is being built.
+<ol>
+<li>Execute the <i>configure</i> command with the option 
+<i>--enable-cray-xt</i> <b>OR</b></li>
+<li>Execute the <i>rpmbuild</i> command with the option 
+<i>--with cray_xt</i> <b>OR</b></li>
+<li>Add <i>%with_cray_xt 1</i> to your <i>~/.rpmmacros</i> file.</li>
+</ol></p>
+
+<p>One <i>slurmd</i> will be used to run all of the batch jobs on
+the system. It is from here that users will execute <i>aprun</i>
+commands to launch tasks.
+This is specified in the <i>slurm.conf</i> file by using the
+<i>NodeName</i> field to identify the compute nodes and both the
+<i>NodeAddr</i> and <i>NodeHostname</i> fields to identify the 
+computer when <i>slurmd</i> runs (normally some sort of front-end node)
+as seen in the examples below.</p>
+
+<p>Next you need to select from two options for the resource selection 
+plugin (the <i>SelectType</i> option in SLURM's <i>slurm.conf</i> configuration
+file):
+<ol>
+<li><b>select/cons_res</b> - Performs a best-fit algorithm based upon a 
+one-dimensional space to allocate whole nodes, sockets, or cores to jobs
+based upon other configuration parameters.</li>
+<li><b>select/linear</b> - Performs a best-fit algorithm based upon a 
+one-dimensional space to allocate whole nodes to jobs.</li>
+</ol>
+
+<p>In order for <i>select/cons_res</i> or <i>select/linear</i> to 
+allocate resources physically nearby in three-dimensional space, the 
+nodes be specified in SLURM's <i>slurm.conf</i> configuration file in 
+such a fashion that those nearby in <i>slurm.conf</i> (one-dimensional
+space) are also nearby in the physical three-dimensional space. 
+If the definition of the nodes in SLURM's <i>slurm.conf</i> configuration 
+file are listed on one line (e.g. <i>NodeName=tux[000x333]</i>),
+SLURM will automatically perform that conversion using a 
+<a href="http://en.wikipedia.org/wiki/Hilbert_curve">Hilbert curve</a>.
+Otherwise you may construct your own node name ordering and list them
+one node per line in <i>slurm.conf</i>.
+Note that each node must be listed exactly once and consecutive
+nodes should be nearby in three-dimensional space. 
+Also note that each node must be defined individually rather than using 
+a hostlist expression in order to preserve the ordering (there is no 
+problem using a hostlist expression in the partition specification after
+the nodes have already been defined).
+The open source code used by SLURM to generate the Hilbert curve is 
+included in the distribution at <i>contribs/skilling.c</i> in the event
+that you wish to experiment with it to generate your own node ordering.
+Two examples of SLURM configuration files are shown below:</p>
+
+<pre>
+# slurm.conf for Cray XT system of size 4x4x4
+# Parameters removed here
+SelectType=select/linear
+NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
+NodeName=tux[000x333] NodeAddr=front_end NodeHostname=front_end
+PartitionName=debug Nodes=tux[000x333] Default=Yes State=UP
+</pre>
+
+<pre>
+# slurm.conf for Cray XT system of size 4x4x4
+# Parameters removed here
+SelectType=select/linear
+NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
+NodeName=tux000 NodeAddr=front_end NodeHostname=front_end
+NodeName=tux100 NodeAddr=front_end NodeHostname=front_end
+NodeName=tux110 NodeAddr=front_end NodeHostname=front_end
+NodeName=tux010 NodeAddr=front_end NodeHostname=front_end
+NodeName=tux011 NodeAddr=front_end NodeHostname=front_end
+NodeName=tux111 NodeAddr=front_end NodeHostname=front_end
+NodeName=tux101 NodeAddr=front_end NodeHostname=front_end
+NodeName=tux001 NodeAddr=front_end NodeHostname=front_end
+PartitionName=debug Nodes=tux[000x111] Default=Yes State=UP
+</pre>
+
+<p>In both of the examples above, the node names output by the
+<i>scontrol show nodes</i> will be ordered as defined (sequentially 
+along the Hilbert curve or per the ordering in the <i>slurm.conf</i> file)
+rather than in numeric order (e.g. "tux001" follows "tux101" rather 
+than "tux000"). 
+SLURM partitions should contain nodes which are defined sequentially
+by that ordering for optimal performance.</p>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<p style="text-align:center;">Last modified 9 January 2009</p></td>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/crypto_plugins.shtml b/doc/html/crypto_plugins.shtml
index 6de9f151245acbf652d8fd2232b788e687b66b66..86b04f8ac4377cb0f5c813d1e5187fa89aae4fa3 100644
--- a/doc/html/crypto_plugins.shtml
+++ b/doc/html/crypto_plugins.shtml
@@ -12,7 +12,7 @@ This is version 0 of the API.</p>
 <p>SLURM cryptographic plugins are SLURM plugins that implement 
 a digital signature mechanism. 
 The slurmctld daemon generates a job step credential, signs it, 
-and tranmits it to an srun program. 
+and transmits it to an srun program. 
 The srun program then transmits it to the slurmd daemons directly. 
 The slurmctld daemon does not communicate directly with the slurmd 
 daemons at this time for performance reasons, but the job step 
diff --git a/doc/html/dist_plane.shtml b/doc/html/dist_plane.shtml
index 72bedd988cde9486cfae063cb8c84119bc3bcae2..c910cb27e6a446842f6e72e7e032ffc9f88281e2 100644
--- a/doc/html/dist_plane.shtml
+++ b/doc/html/dist_plane.shtml
@@ -14,7 +14,7 @@ where <i>plane_size</i> is the requested plane/block size.
 <p>In the examples below we assume we have 21 tasks and that the
 task list is: 0, 1, 2, 3, 4, ..., 19, 20.
 
-<p>On <u>One (1)</u> node: <i>srun -N 1-1 -n 21 -m plane=4 -s <...></i>.
+<p>On <u>One (1)</u> node: <i>srun -N 1-1 -n 21 -m plane=4 <...></i>.
 
 <p>The distribution results in a plane distribution with plane_size 21.
 Even thought the user specified a plane_size of 4 the final plane
@@ -23,10 +23,10 @@ distribution results in a plane_size of 21.
 <p>
 <center>
 <img src="plane_ex1.gif">
-<p>Figure 1: Process layout for <i>srun -N 1-1 -n 21 -m plane=4 -s <...></i>
+<p>Figure 1: Process layout for <i>srun -N 1-1 -n 21 -m plane=4 <...></i>
 </center>
 
-<p>On <u>four (4)</u> nodes: <i>srun -N 4-4 -n 21 -m plane=4 -s <...></i>.
+<p>On <u>four (4)</u> nodes: <i>srun -N 4-4 -n 21 -m plane=4 <...></i>.
 
 <p>The plane distribution with a plane_size of 4 results in the
 following allocation of the task ids:
@@ -34,10 +34,10 @@ following allocation of the task ids:
 <p>
 <center>
 <img src="plane_ex2.gif">
-<p>Figure 2: Process layout for <i>srun -N 4-4 -n 21 -m plane=4 -s <...> </i>
+<p>Figure 2: Process layout for <i>srun -N 4-4 -n 21 -m plane=4 <...> </i>
 </center>
 
-<p>On <u>four (4)</u> nodes: <i>srun -N 4-4 -n 21 -m plane=2 -s <...>
+<p>On <u>four (4)</u> nodes: <i>srun -N 4-4 -n 21 -m plane=2 <...>
 </i>. 
 
 <p>The plane distribution with a plane_size of 2 results in the
@@ -46,7 +46,7 @@ following allocation of the task ids:
 <p>
 <center>
 <img src="plane_ex3.gif">
-<p>Figure 3: Process layout for <i>srun -N 4-4 -n 21 -m plane=2 -s <...></i>
+<p>Figure 3: Process layout for <i>srun -N 4-4 -n 21 -m plane=2 <...></i>
 </center>
 
 <p class="footer"><a href="#top">top</a></p>
@@ -112,38 +112,44 @@ affinity enabled</h3>
 <p>In the examples below we assume we have 21 tasks and that the
 task list is: 0, 1, 2, 3, 4, ..., 19, 20.
 
-<p>On <u>One (1)</u> node: <i>srun -N 1-1 -n 21 -m plane=4 -s <...></i>.
+<p>On <u>One (1)</u> node: 
+<i>srun -N 1-1 -n 21 -m plane=4 --cpu_bind=core <...></i>.
 Even thought the user specified a plane_size of 4 the final plane
 distribution results in a plane distribution with plane_size=8.
 
 <p>
 <center>
 <img src="plane_ex5.gif">
-<p>Figure 5: Process layout for <i>srun -N 1-1 -n 21 -m plane=4 -s <...></i>.
+<p>Figure 5: Process layout for 
+<i>srun -N 1-1 -n 21 -m plane=4 --cpu_bind=core <...></i>.
 </center>
 
-<p>On <u>four (4)</u> nodes: <i>srun -N 4-4 -n 21 -m plane=4 -s <...>
-</i>. The plane distribution with a plane_size of 4 results in the
+<p>On <u>four (4)</u> nodes: 
+<i>srun -N 4-4 -n 21 -m plane=4 --cpu_bind=core <...></i>. 
+The plane distribution with a plane_size of 4 results in the
 following allocation of the task ids:
 
 <p>
 <center>
-<img src="plane_ex6.gif">
-<p>Figure 6: Process layout for <i>srun -N 4-4 -n 21 -m plane=4 -s <...></i>.
+<img src="plane_ex6.gif" width=600>
+<p>Figure 6: Process layout for 
+<i>srun -N 4-4 -n 21 -m plane=4 --cpu_bind=core <...></i>.
 </center>
 
-<p>On <u>four (4)</u> nodes: <i>srun -N 4-4 -n 21 -m plane=2 -s <...>
+<p>On <u>four (4)</u> nodes: 
+<i>srun -N 4-4 -n 21 -m plane=2 --cpu_bind=core <...>
 </i>. The plane distribution with a plane_size of 2 results in the
 following allocation of the task ids:
 
 <p>
 <center>
-<img src="plane_ex7.gif">
-<p>Figure 7: Process layout for <i>srun -N 4-4 -n 21 -m plane=2 -s <...></i>.
+<img src="plane_ex7.gif" width=600>
+<p>Figure 7: Process layout for 
+<i>srun -N 4-4 -n 21 -m plane=2 --cpu_bind=core <...></i>.
 </center>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 12 October 2006</p>
+<p style="text-align:center;">Last modified 1 April 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/documentation.shtml b/doc/html/documentation.shtml
index 0570953900c5fcb90556e6e308e0c0eb6fb06d98..0d575993a305aa3b11ff75b617a592eebf1b0288 100644
--- a/doc/html/documentation.shtml
+++ b/doc/html/documentation.shtml
@@ -6,51 +6,76 @@ Also see <a href="publications.html">Publications and Presentations</a>.
 
 <h2>SLURM Users</h2>
 <ul>
-<li><a href="quickstart.shtml">Quick Start User Guide</a></li>
-<li><a href="mc_support.shtml">Support for Multi-core/Multi-threaded Architectures</a></li>
-<li><a href="quickstart.shtml#mpi">Guide to MPI Use</a></li>
-<li><a href="bluegene.shtml">Blue Gene User and Administrator Guide</a></li>
-<li><a href="ibm.shtml">IBM AIX User and Administrator Guide</a></li>
+<li><a href="quickstart.html">Quick Start User Guide</a></li>
+<li><a href="mpi_guide.html">MPI Use Guide</a></li>
+<li><a href="mc_support.html">Support for Multi-core/Multi-threaded Architectures</a></li>
+<li><a href="checkpoint_blcr.html">SLURM Checkpoint/Restart with BLCR</a></li>
+<li>Specific Systems</li>
+<ul>
+<li><a href="bluegene.html">Blue Gene User and Administrator Guide</a></li>
+<li><a href="cray.html">Cray User and Administrator Guide</a></li>
+<li><a href="ibm.html">IBM AIX User and Administrator Guide</a></li>
+</ul>
 </ul>
 
 <h2>SLURM Administrators</h2>
 <ul>
-<li><a href="quickstart_admin.shtml">Quick Start Administrator Guide</a></li>
+<li><a href="quickstart_admin.html">Quick Start Administrator Guide</a></li>
 <li><a href="configurator.html">Configuration Tool</a></li>
-<li><a href="troubleshoot.shtml">Troubleshooting Guide</a></li>
-<li><a href="big_sys.shtml">Large Cluster Administration Guide</a></li>
-<li><a href="cons_res.shtml">Consumable Resources Guide</a></li>
-<li><a href="cons_res_share.shtml">Sharing Consumable Resources</a></li>
-<li><a href="accounting.shtml">Accounting</a></li>
-<li><a href="gang_scheduling.shtml">Gang Scheduling</a></li>
-<li><a href="preempt.shtml">Preemption</a></li>
-<li><a href="maui.shtml">Maui Scheduler Integration Guide</a></li>
-<li><a href="moab.shtml">Moab Cluster Suite Integration Guide</a></li>
-<li><a href="http://docs.hp.com/en/5991-4847/ch09s02.html">Submitting Jobs throuh LSF</a></li>
-<li><a href="bluegene.shtml">Blue Gene User and Administrator Guide</a></li>
-<li><a href="ibm.shtml">IBM AIX User and Administrator Guide</a></li>
-<li><a href="power_save.shtml">Power Saving Guide</a></li>
+<li><a href="troubleshoot.html">Troubleshooting Guide</a></li>
+<li><a href="big_sys.html">Large Cluster Administration Guide</a></li>
+<li><a href="accounting.html">Accounting</a></li>
+<li><a href="power_save.html">Power Saving Guide</a></li>
+<li>SLURM Scheduling</li>
+<ul>
+<li><a href="cons_res.html">Consumable Resources Guide</a></li>
+<li><a href="gang_scheduling.html">Gang Scheduling</a></li>
+<li><a href="priority_multifactor.html">Multifactor Job Priority</a></li>
+<li><a href="preempt.html">Preemption</a></li>
+<li><a href="resource_limits.html">Resource Limits</a></li>
+<li><a href="reservations.html">Resource Reservation Gude</a></li>
+<li><a href="cons_res_share.html">Sharing Consumable Resources</a></li>
+<li><a href="topology.html">Topology</a></li>
+</ul>
+<li>External Schedulers</li>
+<ul>
+<li><a href="maui.html">Maui Scheduler Integration Guide</a></li>
+<li><a href="moab.html">Moab Cluster Suite Integration Guide</a></li>
+<li><a href="http://docs.hp.com/en/5991-4847/ch09s02.html">Submitting Jobs through LSF</a></li>
+</ul>
+<li>Specific Systems</li>
+<ul>
+<li><a href="bluegene.html">Blue Gene User and Administrator Guide</a></li>
+<li><a href="cray.html">Cray User and Administrator Guide</a></li>
+<li><a href="ibm.html">IBM AIX User and Administrator Guide</a></li>
+<li><a href="sun_const.html">Sun Constellation Aministrator Guide</a></li>
+</ul>
 </ul>
 
 <h2>SLURM Developers</h2>
 <ul>
-<li><a href="programmer_guide.shtml">Programmer Guide</a></li>
-<li><a href="api.shtml">Application Programmer Interface (API) Guide</a></li>
-<li><a href="plugins.shtml">Plugin Programmer Guide</a></li>
-<li><a href="authplugins.shtml">Authentication Plugin Programmer Guide</a></li>
-<li><a href="crypto_plugins.shtml">Cryptographic Plugin Programmer Guild</a></li>
-<li><a href="jobacct_gatherplugins.shtml">Job Accounting Gather Plugin Programmer Guide</a></li>
-<li><a href="accounting_storageplugins.shtml">Accounting Storage Plugin Programmer Guide</a></li>
-<li><a href="checkpoint_plugins.shtml">Job Checkpoint Plugin Programmer Guide</a></li>
-<li><a href="jobcompplugins.shtml">Job Completion Logging Plugin Programmer Guide</a></li>
-<li><a href="mpiplugins.shtml">MPI Plugin Programmer Guide</a></li>
-<li><a href="proctrack_plugins.shtml">Process Tracking Plugin Programmer Guide</a></li>
-<li><a href="schedplugins.shtml">Scheduler Plugin Programmer Guide</a></li>
-<li><a href="selectplugins.shtml">Node Selection Plugin Programmer Guide</a></li>
-<li><a href="switchplugins.shtml">Switch (Interconnect) Plugin Programmer Guide</a></li>
-<li><a href="taskplugins.shtml">Task Plugin Programmer Guide</a></li>
+<li><a href="programmer_guide.html">Programmer Guide</a></li>
+<li><a href="api.html">Application Programmer Interface (API) Guide</a></li>
+<li><a href="plugins.html">Plugin Programmer Guide</a></li>
+<li>Plugin Interface Details</li>
+<ul>
+<li><a href="authplugins.html">Authentication Plugin Programmer Guide</a></li>
+<li><a href="crypto_plugins.html">Cryptographic Plugin Programmer Guide</a></li>
+<li><a href="jobacct_gatherplugins.html">Job Accounting Gather Plugin Programmer Guide</a></li>
+<li><a href="accounting_storageplugins.html">Accounting Storage Plugin Programmer Guide</a></li>
+<li><a href="checkpoint_plugins.html">Job Checkpoint Plugin Programmer Guide</a></li>
+<li><a href="jobcompplugins.html">Job Completion Logging Plugin Programmer Guide</a></li>
+<li><a href="mpiplugins.html">MPI Plugin Programmer Guide</a></li>
+<li><a href="priority_plugins.html">Priority Plugin Programmer Guide</a></li>
+<li><a href="proctrack_plugins.html">Process Tracking Plugin Programmer Guide</a></li>
+<li><a href="schedplugins.html">Scheduler Plugin Programmer Guide</a></li>
+<li><a href="selectplugins.html">Resource Selection Plugin Programmer Guide</a></li>
+<li><a href="switchplugins.html">Switch (Interconnect) Plugin Programmer Guide</a></li>
+<li><a href="taskplugins.html">Task Plugin Programmer Guide</a></li>
+<li><a href="topology_plugin.html">Topology Plugin Programmer Guide</a></li>
+</li>
 </ul>
 
-<p style="text-align:center;">Last modified 3 June 2008</p>
+<p style="text-align:center;">Last modified 24 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/download.shtml b/doc/html/download.shtml
index 61bd587d3e57913ab040fe7766ef383622a44965..44c518f9a9431c77000de4c793c4e3132bfea543 100644
--- a/doc/html/download.shtml
+++ b/doc/html/download.shtml
@@ -37,7 +37,7 @@ As assortment of SPANK plugins are available from<br>
 <a href="http://code.google.com/p/slurm-spank-plugins/">
 http://code.google.com/p/slurm-spank-plugins/</a>.<br>
 The current source for the plugins can be checked out of the subversion
-respository with the following command:<br>
+repository with the following command:<br>
 <i>svn checkout http://slurm-spank-plugins.googlecode.com/svn/trunk/ slurm-plugins</i></li>
 
 <li><b>I/O Watchdog</b><br>
@@ -46,7 +46,7 @@ for <i>hangs</i> which typically have a side-effect of ceasing all write
 activity. This faciltiy attempts to monitor all write activity of an 
 application and trigger a set of user-defined actions when write activity 
 as ceased for a configurable period of time. A SPANK plugin is provided
-for use with SLURM. See the README and man page in tha package for more
+for use with SLURM. See the README and man page in the package for more
 details. Download the latest source from:<br>
 <a href="http://io-watchdog.googlecode.com/files/io-watchdog-0.6.tar.bz2">
 http://io-watchdog.googlecode.com/files/io-watchdog-0.6.tar.bz2</a></li>
@@ -71,19 +71,17 @@ See our <a href="accounting.html">Accounting</a> web page for more information.<
 <ul>
 <li><a href="http://www.mysql.com/">MySQL</a> (recommended)</li>
 <li><a href="http://www.postgresql.org/">PostgreSQL</a></li>
-<li><a href="http://www.clusterresources.com/pages/products/gold-allocation-manager.php">Gold</a></li>
 </ul>
 
 <li>Digital signatures (Cypto plugin) are used to insure message are not altered.</li>
 <ul>
+<li><b>Munge</b> (recommended)<br>
+Munge can be used at an alternative to OpenSSL. 
+Munge is available under the Gnu General Public License.
+See Munge download information above.</li>
 <li><b>OpenSSL</b><br>
-OpenSSL is recommended for generation of digital signatures.
+OpenSSL may be used as an alternative to Munge for generation of digital signatures.
 Download it from <a href="http://www.openssl.org/">http://www.openssl.org/</a>.</li>
-<li><b>Munge</b><br>
-Munge can be used at an alternative to OpenSSL. 
-Munge is available under the Gnu General Public License, but is slower than 
-OpenSSL for the generation of digital signatures. See Munge download 
-information above.</li>
 </ul> 
 
 <li>Interconnect plugins (Switch plugin)</li>
@@ -114,7 +112,7 @@ https://sourceforge.net/projects/slurm/</a>.
 <li><a href="http://www.quadrics.com/">Quadrics MPI</a></li>
 </ul>
 
-<li>Schedulers offering greater control over the workload</li>
+<li>External schedulers offering control over the workload</li>
 <ul>
 <li><a href="http://www.platform.com/">Load Sharing Facility (LSF)</a></li>
 <li><a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
@@ -131,6 +129,6 @@ Portable Linux Processor Affinity (PLPA)</a></li>
 
 </ul>
 
-<p style="text-align:center;">Last modified 30 September 2008</p>
+<p style="text-align:center;">Last modified 13 November 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/faq.shtml b/doc/html/faq.shtml
index 510a8ad4b8d1e03f91b27bcc815f6cb0f63c1431..892c4c3bf661e0c62042c274bde23a31080fcf7c 100644
--- a/doc/html/faq.shtml
+++ b/doc/html/faq.shtml
@@ -31,7 +31,10 @@ allocated to a SLURM job?</a></li>
 killed?</a></li>
 <li><a href="#arbitrary">How do I run specific tasks on certain nodes
 in my allocation?</a></li> 
+<li><a href="#hold">How can I temporarily prevent a job from running 
+(e.g. place it into a <i>hold</i> state)?</a></li>
 </ol>
+
 <h2>For Administrators</h2>
 <ol>
 <li><a href="#suspend">How is job suspend/resume useful?</a></li>
@@ -93,8 +96,25 @@ regular database plugins?</li>
 information between major SLURM updates?</li>
 <li><a href="#health_check">Why doesn't the <i>HealthCheckProgram</i>
 execute on DOWN nodes?</li>
+<li><a href="#batch_lost">What is the meaning of the error 
+&quot;Batch JobId=# missing from master node, killing it&quot;?</a></li>
+<li><a href="#accept_again">What does the messsage
+&quot;srun: error: Unable to accept connection: Resources temporarily unavailable&quot; 
+indicate?</a></li>
+<li><a href="#task_prolog">How could I automatically print a job's 
+SLURM job ID to its standard output?</li>
+<li><a href="#moab_start">I run SLURM with the Moab or Maui scheduler.
+How can I start a job under SLURM without the scheduler?</li>
+<li><a href="#orphan_procs">Why are user processes and <i>srun</i>
+running even though the job is supposed to be completed?</li>
+<li><a href="#slurmd_oom">How can I prevent the <i>slurmd</i> and
+<i>slurmstepd</i> daemons from being killed when a node's memory 
+is exhausted?</li>
+<li><a href="#ubuntu">I see my host of my calling node as 127.0.1.1
+    instead of the correct ip address.  Why is that?</a></li>
 </ol>
 
+
 <h2>For Users</h2>
 <p><a name="comp"><b>1. Why is my job/node in COMPLETING state?</b></a><br>
 When a job is terminating, both the job and its nodes enter the COMPLETING state. 
@@ -111,7 +131,7 @@ for an extended period of time.
 This may be indicative of processes hung waiting for a core file 
 to complete I/O or operating system failure. 
 If this state persists, the system administrator should check for processes 
-associated with the job that can not be terminated then use the 
+associated with the job that cannot be terminated then use the 
 <span class="commandline">scontrol</span> command to change the node's 
 state to DOWN (e.g. &quot;scontrol update NodeName=<i>name</i> State=DOWN Reason=hung_completing&quot;), 
 reboot the node, then reset the node's state to IDLE 
@@ -123,14 +143,14 @@ associated with it terminate before setting it DOWN and re-booting.</p>
 <p>Note that SLURM has two configuration parameters that may be used to 
 automate some of this process.
 <i>UnkillableStepProgram</i> specifies a program to execute when 
-non-killable proceses are identified.
+non-killable processes are identified.
 <i>UnkillableStepTimeout</i> specifies how long to wait for processes
 to terminate. 
 See the "man slurm.conf" for more information about these parameters.</p>
 
 <p><a name="rlimit"><b>2. Why are my resource limits not propagated?</b></a><br>
 When the <span class="commandline">srun</span> command executes, it captures the 
-resource limits in effect at that time. These limits are propagated to the allocated 
+resource limits in effect at submit time. These limits are propagated to the allocated 
 nodes before initiating the user's job. The SLURM daemon running on that node then 
 tries to establish identical resource limits for the job being initiated. 
 There are several possible reasons for not being able to establish those 
@@ -139,17 +159,17 @@ resource limits.
 <li>The hard resource limits applied to SLURM's slurmd daemon are lower 
 than the user's soft resources limits on the submit host. Typically 
 the slurmd daemon is initiated by the init daemon with the operating 
-system default limits. This may be address either through use of the 
+system default limits. This may be addressed either through use of the 
 ulimit command in the /etc/sysconfig/slurm file or enabling
 <a href="#pam">PAM in SLURM</a>.</li>
-<li>The user's hard resource limits on the allocated node sre lower than 
-the same user's soft  hard resource limits on the node from which the 
+<li>The user's hard resource limits on the allocated node are lower than 
+the same user's soft hard resource limits on the node from which the 
 job was submitted. It is recommended that the system administrator 
 establish uniform hard resource limits for users on all nodes 
 within a cluster to prevent this from occurring.</li>
 </ul></p>
 <p>NOTE: This may produce the error message &quot;Can't propagate RLIMIT_...&quot;.
-The error message is printed only if the user explicity specifies that
+The error message is printed only if the user explicitly specifies that
 the resource limit should be propagated or the srun command is running
 with verbose logging of actions from the slurmd daemon (e.g. "srun -d6 ...").</p>
 
@@ -166,7 +186,7 @@ until no previously submitted job is pending. If the scheduler type is <b>backfi
 then jobs will generally be executed in the order of submission for a given partition 
 with one exception: later submitted jobs will be initiated early if doing so does 
 not delay the expected execution time of an earlier submitted job. In order for 
-backfill scheduling to be effective, users jobs should specify reasonable time 
+backfill scheduling to be effective, users' jobs should specify reasonable time
 limits. If jobs do not specify time limits, then all jobs will receive the same 
 time limit (that associated with the partition), and the ability to backfill schedule 
 jobs will be limited. The backfill scheduler does not alter job specifications 
@@ -208,7 +228,7 @@ more information.</p>
 SLURM has a job purging mechanism to remove inactive jobs (resource allocations)
 before reaching its time limit, which could be infinite.
 This inactivity time limit is configurable by the system administrator. 
-You can check it's value with the command</p>
+You can check its value with the command</p>
 <blockquote>
 <p><span class="commandline">scontrol show config | grep InactiveLimit</span></p>
 </blockquote>
@@ -233,7 +253,7 @@ the command. For example:</p>
 </blockquote>
 <p>srun processes "-N2" as an option to itself. "hostname" is the 
 command to execute and "-pdebug" is treated as an option to the 
-hostname command. Which will change the name of the computer 
+hostname command. This will change the name of the computer 
 on which SLURM executes the command - Very bad, <b>Don't run 
 this command as user root!</b></p>
 
@@ -242,9 +262,10 @@ this command as user root!</b></p>
 There are significant limitations in the current backfill scheduler plugin. 
 It was designed to perform backfill node scheduling for a homogeneous cluster.
 It does not manage scheduling on individual processors (or other consumable 
-resources). It also does not update the required or excluded node list of 
-individual jobs. These are the current limiations. You can use the 
-scontrol show command to check if these conditions apply.</p> 
+resources). It does not update the required or excluded node list of 
+individual jobs. It does support job's with constraints/features unless 
+the exclusive OR operator is used in the constraint expression. 
+You can use the scontrol show command to check if these conditions apply.</p> 
 <ul>
 <li>Partition: State=UP</li>
 <li>Partition: RootOnly=NO</li>
@@ -287,7 +308,7 @@ that the processes associated with the switch have been terminated
 to avoid the possibility of re-using switch resources for other 
 jobs (even on different nodes).
 SLURM considers jobs COMPLETED when all nodes allocated to the 
-job are either DOWN or confirm termination of all it's processes.
+job are either DOWN or confirm termination of all its processes.
 This enables SLURM to purge job information in a timely fashion 
 even when there are many failing nodes.
 Unfortunately the job step information may persist longer.</p>
@@ -297,7 +318,7 @@ job allocation?</b></a><br>
 There is a srun option <i>--jobid</i> that can be used to specify 
 a job's ID. 
 For a batch job or within an existing resource allocation, the 
-environment variable <i>SLURM_JOBID</i> has already been defined, 
+environment variable <i>SLURM_JOB_ID</i> has already been defined, 
 so all job steps will run within that job allocation unless 
 otherwise specified.
 The one exception to this is when submitting batch jobs. 
@@ -442,7 +463,7 @@ named <i>_interactive</i>.</p>
 # 
 # Simple batch script that starts SCREEN.
 
-exec screen -Dm -S slurm$SLURM_JOBID
+exec screen -Dm -S slurm$SLURM_JOB_ID
 </pre>
 
 <p>The following script named <i>_interactive_screen</i> is also used.</p>
@@ -472,7 +493,7 @@ indicate?</b></a><br>
 The srun command normally terminates when the standard output and 
 error I/O from the spawned tasks end. This does not necessarily 
 happen at the same time that a job step is terminated. For example, 
-a file system problem could render a spawned tasks non-killable 
+a file system problem could render a spawned task non-killable
 at the same time that I/O to srun is pending. Alternately a network 
 problem could prevent the I/O from being transmitted to srun.
 In any event, the srun command is notified when a job step is 
@@ -508,8 +529,8 @@ If the user's resource limit is not propagated, the limit in
 effect for the <i>slurmd</i> daemon will be used for the spawned job.
 A simple way to control this is to insure that user <i>root</i> has a 
 sufficiently large resource limit and insuring that <i>slurmd</i> takes 
-full advantage of this limit. For example, you can set user's root's
-locked memory limit limit to be unlimited on the compute nodes (see
+full advantage of this limit. For example, you can set user root's
+locked memory limit ulimit to be unlimited on the compute nodes (see
 <i>"man limits.conf"</i>) and insuring that <i>slurmd</i> takes 
 full advantage of this limit (e.g. by adding something like
 <i>"ulimit -l unlimited"</i> to the <i>/etc/init.d/slurm</i>
@@ -521,7 +542,7 @@ job steps being killed?</b></a><br>
 SLURM has a configuration parameter <i>InactiveLimit</i> intended 
 to kill jobs that do not spawn any job steps for a configurable
 period of time. Your system administrator may modify the <i>InactiveLimit</i>
-to satisfy your needs. Alternatly, you can just spawn a job step
+to satisfy your needs. Alternately, you can just spawn a job step
 at the beginning of your script to execute in the background. It
 will be purged when your script exits or your job otherwise terminates.
 A line of this sort near the beginning of your script should suffice:<br>
@@ -571,9 +592,19 @@ print $layout;
 We can now use this script in our srun line in this fashion.<p>
 <i>srun -m arbitrary -n5 -w `arbitrary.pl 4,1` -l hostname</i><p>
 This will layout 4 tasks on the first node in the allocation and 1
-task on the second node.  
-
-</p>
+task on the second node.</p>
+
+<p><a name="hold"><b>21. How can I temporarily prevent a job from running 
+(e.g. place it into a <i>hold</i> state)?</b></a><br>
+The easiest way to do this is to change a job's earliest begin time
+(optionally set at job submit time using the <i>--begin</i> option).
+The example below places a job into hold state (preventing its initiation
+for 30 days) and later permitting it to start now.</p>
+<pre>
+$ scontrol update JobId=1234 StartTime=now+30days
+... later ...
+$ scontrol update JobId=1234 StartTime=now
+</pre>
 
 <p class="footer"><a href="#top">top</a></p>
 
@@ -604,11 +635,11 @@ accommodate all jobs allocated to a node, either running or suspended.
 <p><a name="fast_schedule"><b>2. How can I configure SLURM to use 
 the resources actually found on a node rather than what is defined 
 in <i>slurm.conf</i>?</b></a><br>
-SLURM can either base it's scheduling decisions upon the node 
+SLURM can either base its scheduling decisions upon the node
 configuration defined in <i>slurm.conf</i> or what each node 
 actually returns as available resources. 
 This is controlled using the configuration parameter <i>FastSchedule</i>.
-Set it's value to zero in order to use the resources actually 
+Set its value to zero in order to use the resources actually 
 found on each node, but with a higher overhead for scheduling.
 A value of one is the default and results in the node configuration 
 defined in <i>slurm.conf</i> being used. See &quot;man slurm.conf&quot;
@@ -622,7 +653,7 @@ Set its value to one in order for DOWN nodes to automatically be
 returned to service once the <i>slurmd</i> daemon registers 
 with a valid node configuration.
 A value of zero is the default and results in a node staying DOWN 
-until an administrator explicity returns it to service using 
+until an administrator explicitly returns it to service using 
 the command &quot;scontrol update NodeName=whatever State=RESUME&quot;.
 See &quot;man slurm.conf&quot; and &quot;man scontrol&quot; for more 
 details.</p>
@@ -639,7 +670,7 @@ See the slurm.conf and srun man pages for more information.</p>
  
 <p><a name="multi_job"><b>5. How can I control the execution of multiple 
 jobs per node?</b></a><br>
-There are two mechanism to control this. 
+There are two mechanisms to control this.
 If you want to allocate individual processors on a node to jobs, 
 configure <i>SelectType=select/cons_res</i>. 
 See <a href="cons_res.html">Consumable Resources in SLURM</a>
@@ -649,7 +680,7 @@ configure <i>SelectType=select/linear</i>.
 Each partition also has a configuration parameter <i>Shared</i>
 that enables more than one job to execute on each node. 
 See <i>man slurm.conf</i> for more information about these 
-configuration paramters.</p>
+configuration parameters.</p>
 
 <p><a name="inc_plugin"><b>6. When the SLURM daemon starts, it 
 prints &quot;cannot resolve X plugin operations&quot; and exits. 
@@ -663,7 +694,7 @@ for more information (e.g. &quot;slurmctld -Dvvvvv&quot;).
 
 <p><a name="sigpipe"><b>7. Why are user tasks intermittently dying
 at launch with SIGPIPE error messages?</b></a><br>
-If you are using ldap or some other remote name service for
+If you are using LDAP or some other remote name service for
 username and groups lookup, chances are that the underlying
 libc library functions are triggering the SIGPIPE.  You can likely
 work around this problem by setting <i>CacheGroups=1</i> in your slurm.conf
@@ -672,11 +703,8 @@ reconfigure &quot; any time your groups database is updated.
 
 <p><a name="maint_time"><b>8. How can I dry up the workload for a 
 maintenance period?</b></a><br>
-There isn't a mechanism to tell SLURM that all jobs should be 
-completed by a specific time. The best way to address this is 
-to shorten the <i>MaxTime</i> associated with the partitions so 
-as to avoid initiating jobs that will not have completed by 
-the maintenance period.
+Create a resource reservation as described by SLURM's 
+<a href="reservations.html">Resource Reservation Guide</a>.
 
 <p><a name="pam"><b>9. How can PAM be used to control a user's limits on 
 or access to compute nodes?</b></a><br>
@@ -697,7 +725,7 @@ For example, to set the locked memory limit to unlimited for all users:</p>
 </pre>
 <p>Finally, you need to disable SLURM's forwarding of the limits from the 
 session from which the <i>srun</i> initiating the job ran. By default 
-all resource limits are propogated from that session. For example, adding 
+all resource limits are propagated from that session. For example, adding 
 the following line to <i>slurm.conf</i> will prevent the locked memory 
 limit from being propagated:<i>PropagateResourceLimitsExcept=MEMLOCK</i>.</p>
 
@@ -740,7 +768,7 @@ to relocate them. In order to do so, follow this procedure:</p>
 <li>Stop all SLURM daemons</li>
 <li>Modify the <i>ControlMachine</i>, <i>ControlAddr</i>, 
 <i>BackupController</i>, and/or <i>BackupAddr</i> in the <i>slurm.conf</i> file</li>
-<li>Distribute the updated <i>slurm.conf</i> file file to all nodes</li>
+<li>Distribute the updated <i>slurm.conf</i> file to all nodes</li>
 <li>Restart all SLURM daemons</li>
 </ol>
 <p>There should be no loss of any running or pending jobs. Insure that
@@ -778,11 +806,11 @@ cluster?</b></a><br>
 Yes, this can be useful for testing purposes. 
 It has also been used to partition "fat" nodes into multiple SLURM nodes.
 There are two ways to do this.
-The best method for most conditins is to run one <i>slurmd</i> 
+The best method for most conditions is to run one <i>slurmd</i> 
 daemon per emulated node in the cluster as follows.
 <ol>
 <li>When executing the <i>configure</i> program, use the option 
-<i>--multiple-slurmd</i> (or add that option to your <i>~/.rpmmacros</i>
+<i>--enable-multiple-slurmd</i> (or add that option to your <i>~/.rpmmacros</i>
 file).</li>
 <li>Build and install SLURM in the usual manner.</li>
 <li>In <i>slurm.conf</i> define the desired node names (arbitrary 
@@ -797,9 +825,9 @@ slurm.conf. </li>
 of the node that it is supposed to serve on the execute line.</li> 
 </ol>
 <p>It is strongly recommended that SLURM version 1.2 or higher be used 
-for this due to it's improved support for multiple slurmd daemons.
+for this due to its improved support for multiple slurmd daemons.
 See the
-<a href="programmer_guide.shtml#multiple_slurmd_support">Programmers Guide</a>
+<a href="programmer_guide.html#multiple_slurmd_support">Programmers Guide</a>
 for more details about configuring multiple slurmd support.</p>
 
 <p>In order to emulate a really large cluster, it can be more 
@@ -863,7 +891,7 @@ any desired node resource specifications (<i>Procs</i>, <i>Sockets</i>,
 <i>CoresPerSocket</i>, <i>ThreadsPerCore</i>, and/or <i>TmpDisk</i>).
 SLURM will use the resource specification for each node that is 
 given in <i>slurm.conf</i> and will not check these specifications 
-against those actaully found on the node.
+against those actually found on the node.
 
 <p><a name="credential_replayed"><b>16. What does a 
 &quot;credential replayed&quot; 
@@ -958,7 +986,7 @@ This error indicates that a job credential generated by the slurmctld daemon
 corresponds to a job that the slurmd daemon has already revoked. 
 The slurmctld daemon selects job ID values based upon the configured 
 value of <b>FirstJobId</b> (the default value is 1) and each job gets 
-an value one large than the previous job. 
+a value one larger than the previous job.
 On job termination, the slurmctld daemon notifies the slurmd on each 
 allocated node that all processes associated with that job should be 
 terminated. 
@@ -972,7 +1000,7 @@ for jobs that it considers terminated.
 This solution to this problem is to cold-start all slurmd daemons whenever
 the slurmctld daemon is cold-started.
 
-<p><a name="globus"><b>23. Can SLURM be used with Globus?</b><br>
+<p><a name="globus"><b>23. Can SLURM be used with Globus?</b></a><br>
 Yes. Build and install SLURM's Torque/PBS command wrappers along with 
 the Perl APIs from SLURM's <i>contribs</i> directory and configure 
 <a href="http://www-unix.globus.org/">Globus</a> to use those PBS commands.
@@ -980,7 +1008,7 @@ Note there are RPMs available for both of these packages, named
 <i>torque</i> and <i>perlapi</i> respectively.
 
 <p><a name="time_format"><b>24. Can SLURM time output format include the 
-year?</b><br>
+year?</b></a><br>
 The default SLURM time format output is <i>MM/DD-HH:MM:SS</i>. 
 Define &quot;ISO8601&quot; at SLURM build time to get the time format
 <i>YYYY-MM-DDTHH:MM:SS</i>.
@@ -988,14 +1016,14 @@ Note that this change in format will break anything that parses
 SLURM output expecting the old format (e.g. LSF, Maui or Moab).
 
 <p><a name="file_limit"><b>25. What causes the error 
-&quot;Unable to accept new connection: Too many open files&quot;?</b><br>
+&quot;Unable to accept new connection: Too many open files&quot;?</b></a><br>
 The srun command automatically increases its open file limit to 
 the hard limit in order to process all of the standard input and output
 connections to the launched tasks. It is recommended that you set the
 open file hard limit to 8192 across the cluster.
 
 <p><a name="slurmd_log"><b>26. Why does the setting of <i>SlurmdDebug</i> 
-fail to log job step information at the appropriate level?</b><br>
+fail to log job step information at the appropriate level?</b></a><br>
 There are two programs involved here. One is <b>slurmd</b>, which is 
 a persistent daemon running at the desired debug level. The second 
 program is <b>slurmstep</b>, which executed the user job and its
@@ -1005,8 +1033,8 @@ detail being logged in the <i>SlurmdLogFile</i> plus the output
 of the program.
 
 <p><a name="rpm"><b>27. Why isn't the auth_none.so (or other file) in a 
-SLURM RPM?</b><br>
-The auth_none plugin is in a separete RPM and not built by default.
+SLURM RPM?</b></a><br>
+The auth_none plugin is in a separate RPM and not built by default.
 Using the auth_none plugin means that SLURM communications are not 
 authenticated, so you probably do not want to run in this mode of operation 
 except for testing purposes. If you want to build the auth_none RPM then 
@@ -1015,9 +1043,9 @@ add <i>--with auth_none</i> on the rpmbuild command line or add
 in the SLURM distribution for a list of other options.
 
 <p><a name="slurmdbd"><b>28. Why should I use the slurmdbd instead of the
-regular database plugins?</b><br>
+regular database plugins?</b></a><br>
 While the normal storage plugins will work fine without the added
-layer of the slurmdbd there are some great benifits to using the
+layer of the slurmdbd there are some great benefits to using the
 slurmdbd.
 
 1. Added security.  Using the slurmdbd you can have an authenticated
@@ -1032,14 +1060,14 @@ slurmdbd.
    slurmdbd you can also query any cluster using the slurmdbd from any
    other cluster's nodes.
 
-<p><a name="debug"><b>29. How can I build SLURM with debugging symbols?</b></br>
+<p><a name="debug"><b>29. How can I build SLURM with debugging symbols?</b></a></br>
 Set your CFLAGS environment variable before building. 
 You want the "-g" option to produce debugging information and
 "-O0" to set the optimization level to zero (off). For example:<br>
 CFLAGS="-g -O0" ./configure ...
 
 <p><a name="state_preserve"><b>30. How can I easily preserve drained node 
-information between major SLURM updates?</b><br>
+information between major SLURM updates?</b></a><br>
 Major SLURM updates generally have changes in the state save files and 
 communication protocols, so a cold-start (without state) is generally 
 required. If you have nodes in a DRAIN state and want to preserve that
@@ -1052,12 +1080,12 @@ sinfo -t drain -h -o "scontrol update nodename='%N' state=drain reason='%E'"
 </pre>
 
 <p><a name="health_check"><b>31. Why doesn't the <i>HealthCheckProgram</i>
-execute on DOWN nodes?</b><br>
+execute on DOWN nodes?</a></b><br>
 Hierarchical communications are used for sending this message. If there
 are DOWN nodes in the communications hierarchy, messages will need to 
-be re-routed. This limits SLURM's ability to tightly synchroize the 
+be re-routed. This limits SLURM's ability to tightly synchronize the
 execution of the <i>HealthCheckProgram</i> across the cluster, which
-could adversly impact performance of parallel applications. 
+could adversely impact performance of parallel applications. 
 The use of CRON or node startup scripts may be better suited to insure
 that <i>HealthCheckProgram</i> gets executed on nodes that are DOWN
 in SLURM. If you still want to have SLURM try to execute 
@@ -1079,8 +1107,103 @@ Index: src/slurmctld/ping_nodes.c
                         continue;
 </pre>
 
+<p><a name="batch_lost"><b>32. What is the meaning of the error 
+&quot;Batch JobId=# missing from master node, killing it&quot;?</b></a><br>
+A shell is launched on node zero of a job's allocation to execute
+the submitted program. The <i>slurmd</i> daemon executing on each compute
+node will periodically report to the <i>slurmctld</i> what programs it
+is executing. If a batch program is expected to be running on some
+node (i.e. node zero of the job's allocation) and is not found, the
+message above will be logged and the job cancelled. This typically is 
+associated with exhausting memory on the node or some other critical 
+failure that cannot be recovered from. The equivalent message in 
+earlier releases of slurm is 
+&quot;Master node lost JobId=#, killing it&quot;.
+
+<p><a name="accept_again"><b>33. What does the messsage
+&quot;srun: error: Unable to accept connection: Resources temporarily unavailable&quot; 
+indicate?</b></a><br>
+This has been reported on some larger clusters running SUSE Linux when
+a user's resource limits are reached. You may need to increase limits
+for locked memory and stack size to resolve this problem.
+
+<p><a name="task_prolog"><b>34. How could I automatically print a job's 
+SLURM job ID to its standard output?</b></a></br>
+The configured <i>TaskProlog</i> is the only thing that can write to 
+the job's standard output or set extra environment variables for a job
+or job step. To write to the job's standard output, precede the message
+with "print ". To export environment variables, output a line of this
+form "export name=value". The example below will print a job's SLURM
+job ID and allocated hosts for a batch job only.
+
+<pre>
+#!/bin/sh
+#
+# Sample TaskProlog script that will print a batch job's
+# job ID and node list to the job's stdout
+#
+
+if [ X"$SLURM_STEP_ID" = "X" -a X"$SLURM_PROCID" = "X"0 ]
+then
+  echo "print =========================================="
+  echo "print SLURM_JOB_ID = $SLURM_JOB_ID"
+  echo "print SLURM_NODELIST = $SLURM_NODELIST"
+  echo "print =========================================="
+fi
+</pre>
+
+<p><a name="moab_start"><b>35. I run SLURM with the Moab or Maui scheduler.
+How can I start a job under SLURM without the scheduler?</b></a></br>
+When SLURM is configured to use the Moab or Maui scheduler, all submitted
+jobs have their priority initialized to zero, which SLURM treats as a held
+job. The job only begins when Moab or Maui decide where and when to start
+the job, setting the required node list and setting the job priority to 
+a non-zero value. To circumvent this, submit your job using a SLURM or
+Moab command then manually set its priority to a non-zero value (must be
+done by user root). For example:</p>
+<pre>
+$ scontrol update jobid=1234 priority=1000000
+</pre>
+<p>Note that changes in the configured value of <i>SchedulerType</i> only
+take effect when the <i>slurmctld</i> daemon is restarted (reconfiguring
+SLURM will not change this parameter. You will also manually need to
+modify the priority of every pending job. 
+When changing to Moab or Maui scheduling, set every job priority to zero. 
+When changing from Moab or Maui scheduling, set every job priority to a
+non-zero value (preferably fairly large, say 1000000).</p>
+
+<p><a name="orphan_procs"><b>36. Why are user processes and <i>srun</i>
+running even though the job is supposed to be completed?</b></a></br>
+SLURM relies upon a configurable process tracking plugin to determine
+when all of the processes associated with a job or job step have completed.
+Those plugins relying upon a kernel patch can reliably identify every process.
+Those plugins dependent upon process group IDs or parent process IDs are not 
+reliable. See the <i>ProctrackType</i> description in the <i>slurm.conf</i>
+man page for details. We rely upon the sgi_job for most systems.</p>
+
+<p><a name="slurmd_oom"><b>37. How can I prevent the <i>slurmd</i> and
+<i>slurmstepd</i> daemons from being killed when a node's memory 
+is exhausted?</b></a></br>
+You can the value set in the <i>/proc/self/oom_adj</i> for 
+<i>slurmd</i> and <i>slurmstepd</i> by initiating the <i>slurmd</i>
+daemon with the <i>SLURMD_OOM_ADJ</i> and/or <i>SLURMSTEPD_OOM_ADJ</i>
+environment variables set to the desired values.
+A value of -17 typically will disable killing.</p>
+
+<p><a name="ubuntu"><b>38. I see my host of my calling node as 127.0.1.1
+    instead of the correct ip address.  Why is that?</b></a></br>
+Some systems by default will put your host in the /etc/hosts file as
+    something like 
+<pre>
+127.0.1.1	snowflake.llnl.gov	snowflake
+</pre>
+This will cause srun and other things to grab 127.0.1.1 as it's
+address instead of the correct address and make it so the
+communication doesn't work.  Solution is to either remove this line or
+set a different nodeaddr that is known by your other nodes.</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 24 October 2008</p>
+<p style="text-align:center;">Last modified 12 June 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/footer.txt b/doc/html/footer.txt
index dbed09229de86685e99ebb3e7a88fa755db5407a..c333e9dddfcbfd09a8b4822bb8a57ebaa5ea423c 100644
--- a/doc/html/footer.txt
+++ b/doc/html/footer.txt
@@ -1,7 +1,7 @@
 </div> <!-- closes "content" -->
 
 <div id="footer">
-<div id="left">&nbsp;&nbsp;<span class="ucrlnum">LLNL-WEB-405518 |</span> <a href="https://www.llnl.gov/disclaimer.html" target="_blank" class="privacy">Privacy &amp; Legal Notice</a></div>
+<div id="left">&nbsp;&nbsp;<span class="ucrlnum">LLNL-WEB-411573 |</span> <a href="https://www.llnl.gov/disclaimer.html" target="_blank" class="privacy">Privacy &amp; Legal Notice</a></div>
 <div id="right"><span class="ucrlnum">18 July 2008&nbsp;&nbsp;</span></div>
 </div>
 
diff --git a/doc/html/gang_scheduling.shtml b/doc/html/gang_scheduling.shtml
index e8d37467bb11b663d28df404c56201a00af4227c..248c14022e86b1eb9d649a28c60751e047acdf01 100644
--- a/doc/html/gang_scheduling.shtml
+++ b/doc/html/gang_scheduling.shtml
@@ -3,12 +3,16 @@
 <H1>Gang Scheduling</H1>
 
 <P>
-SLURM version 1.2 and earlier supported dedication of resources
-to jobs.
-Beginning in SLURM version 1.3, gang scheduling is supported. 
-Gang scheduling is when two or more jobs are allocated to the same resources 
-and these jobs are alternately suspended to let all of the tasks of each 
-job have full access to the shared resources for a period of time.
+SLURM version 1.2 and earlier supported dedication of resources to jobs.
+Beginning in SLURM version 1.3, timesliced gang scheduling is supported. 
+Timesliced gang scheduling is when two or more jobs are allocated to the same
+resources and these jobs are alternately suspended to let one job at a time have
+dedicated access to the resources for a configured period of time.
+</P>
+<P>
+Preemptive priority job scheduling is another form of gang-scheduling that is
+supported by SLURM. See the <a href="preempt.html">Preemption</a> document for
+more information.
 </P>
 <P>
 A resource manager that supports timeslicing can improve it's responsiveness
@@ -87,7 +91,8 @@ the overhead of gang scheduling.
 The <I>FORCE</I> option now supports an additional parameter that controls 
 how many jobs can share a resource (FORCE[:max_share]). By default the 
 max_share value is 4. To allow up to 6 jobs from this partition to be 
-allocated to a common resource, set <I>Shared=FORCE:6</I>.
+allocated to a common resource, set <I>Shared=FORCE:6</I>. To only let 2 jobs
+timeslice on the same resources, set <I>Shared=FORCE:2</I>.
 </LI>
 </UL>
 <P>
@@ -131,7 +136,7 @@ the "active bitmap".
 </P>
 <P>
 This <I>timeslicer thread</I> algorithm for rotating jobs is designed to prevent
-jobs from starving (remaining in the suspended state indefinitly) and to be as
+jobs from starving (remaining in the suspended state indefinitely) and to be as
 fair as possible in the distribution of runtime while still keeping all of the
 resources as busy as possible.
 </P>
@@ -489,10 +494,6 @@ around on the cores to maximize performance. This is different than when
 
 <H2>Future Work</H2>
 
-<P>
-Priority scheduling and preemptive scheduling are other forms of gang
-scheduling that are currently under development for SLURM.
-</P>
 <P>
 <B>Making use of swap space</B>: (note that this topic is not currently
 scheduled for development, unless someone would like to pursue this) It should
@@ -508,6 +509,6 @@ For now this idea could be experimented with by disabling memory support in the
 selector and submitting appropriately sized jobs.
 </P>
 
-<p style="text-align:center;">Last modified 7 July 2008</p>
+<p style="text-align:center;">Last modified 5 December 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/header.txt b/doc/html/header.txt
index 1adedfd3ddcb58b190f617877d53730610ec8a35..fb36083e7dec5b50c546c719c143bbc49a94199f 100644
--- a/doc/html/header.txt
+++ b/doc/html/header.txt
@@ -8,8 +8,8 @@
 <meta http-equiv="Pragma" content="no-cache">
 <meta http-equiv="keywords" content="Simple Linux Utility for Resource Management, SLURM, resource management, 
 Linux clusters, high-performance computing, Livermore Computing">
-<meta name="LLNLRandR" content="LLNL-WEB-405518">
-<meta name="LLNLRandRdate" content="18 July 2008">
+<meta name="LLNLRandR" content="LLNL-WEB-411573">
+<meta name="LLNLRandRdate" content="26 March 2009">
 <meta name="distribution" content="global">
 <meta name="description" content="Simple Linux Utility for Resource Management">
 <meta name="copyright"
diff --git a/doc/html/ibm.shtml b/doc/html/ibm.shtml
index 2d0a7f6d78916b315d955fa52d801d750187f2d8..5eb7b6ac1db514154b0fe15ede927fb9aca2a21c 100644
--- a/doc/html/ibm.shtml
+++ b/doc/html/ibm.shtml
@@ -23,14 +23,14 @@ This architecture insures proper operation of all IBM tools.</p>
 This script should contain one or more invocations of poe to launch 
 the tasks.
 If you want to run a job interactively, just execute poe directly. 
-Poe will recognize that it lacks a SLURM job allocation (the SLURM_JOBID 
+Poe will recognize that it lacks a SLURM job allocation (the SLURM_JOB_ID 
 environment variable will be missing) and create the SLURM allocation 
 prior to launching tasks.</p>
 
 <p>Each poe invocation (or SLURM job step) can have it's own network 
 specification.
 For example one poe may use IP mode communications and the next use
-User Space (US) mode communcations. 
+User Space (US) mode communications. 
 This enhancement to normal poe functionality may be accomplished by 
 setting the SLURM_NETWORK environment variable.
 The format of SLURM_NETWORK is "network.[protocol],[type],[usage],[mode]". 
@@ -46,7 +46,7 @@ One file is written for each node on which the job is executing, plus
 another for the script executing poe.a
 By default, the checkpoint files will be written to the current working
 directory of the job.
-Names and locations of these files can be controled using the 
+Names and locations of these files can be controlled using the 
 environment variables <b>MP_CKPTFILE</b> and <b>MP_CKPTDIR</b>.
 Use the squeue command to identify the job and job step of interest. 
 To initiate a checkpoint in which the job step will continue execution, 
@@ -61,11 +61,11 @@ use the command: <br>
 <p>Three unique components are required to use SLURM on an IBM system.</p>
 <ol>
 <li>The Federation switch plugin is required.  
-This component is packaged with the SLURM distrbution.</li>
+This component is packaged with the SLURM distribution.</li>
 <li>There is a process tracking kernel extension required. 
 This is used to insure that all processes associated with a job 
 are tracked.
-SLURM normatlly uses session ID and process group ID on Linux systems,
+SLURM normally uses session ID and process group ID on Linux systems,
 but these mechanisms can not prevent user processes from establishing 
 their own session or process group and thus "escape" from SLURM 
 tracking.
diff --git a/doc/html/jobacct_gatherplugins.shtml b/doc/html/jobacct_gatherplugins.shtml
index 1dc934d314bdee436329f41a3ea7da2689833b5d..ea779aaefdc964cf789132293af3ad977d9e21fe 100644
--- a/doc/html/jobacct_gatherplugins.shtml
+++ b/doc/html/jobacct_gatherplugins.shtml
@@ -12,7 +12,7 @@ their own SLURM job accounting gather plugins. This is version 1 of the API.
 SLURM Plugin API with the following specifications:
 
 <p><span class="commandline">const char
-plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"
+plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
 <p style="margin-left:.2in">
 A free-formatted ASCII text string that identifies the plugin.
 
@@ -257,6 +257,6 @@ ability to implement a particular API version using the mechanism outlined
 for SLURM plugins.
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 11 Sep 2007</p>
+<p style="text-align:center;">Last modified 11 September 2007</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/jobcompplugins.shtml b/doc/html/jobcompplugins.shtml
index e208eb51195aa37294ac1b8df30598aec011fcf9..7246c74599bb7b2ee913d3ce746f9c57fa419c56 100644
--- a/doc/html/jobcompplugins.shtml
+++ b/doc/html/jobcompplugins.shtml
@@ -62,15 +62,15 @@ SLURM_SUCCESS. </p>
 <p style="margin-left:.2in"><b>Description</b>: Specify the location to be used for job logging.</p>
 <p style="margin-left:.2in"><b>Argument</b>:<span class="commandline"> location</span>&nbsp; 
 &nbsp;&nbsp;(input) specification of where logging should be done. The interpretation of 
-this string is at the discression of the plugin implementation.</p>
+this string is at the discresion of the plugin implementation.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
 the plugin should return SLURM_ERROR and set the errno to an appropriate value 
 to indicate the reason for failure.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <p class="commandline">int slurm_jobcomp_log_record (struct job_record *job_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Note termation of a job with the specified 
-characteristics.</p>
+<p style="margin-left:.2in"><b>Description</b>: Note termin ation of a job 
+with the specified characteristics.</p>
 <p style="margin-left:.2in"><b>Argument</b>: <br>
 <span class="commandline"> job_ptr</span>&nbsp;&nbsp;&nbsp;(input) Pointer to job record as defined
 in <i>src/slurmctld/slurmctld.h</i></p>
@@ -129,6 +129,6 @@ releases of SLURM may revise this API. A job completion plugin conveys its abili
 to implement a particular API version using the mechanism outlined for SLURM plugins.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 10 Sep 2007</p>
+<p style="text-align:center;">Last modified 10 September 2007</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/maui.shtml b/doc/html/maui.shtml
index 51667d14574a747e53faa1726e3b1a34b2bf8143..b83dafd3a8bc441d7fca45764f387338a0d93e3d 100644
--- a/doc/html/maui.shtml
+++ b/doc/html/maui.shtml
@@ -9,6 +9,12 @@ online documents at Cluster Resources Inc.:
 <a href="http://www.clusterresources.com/products/maui/docs/mauiadmin.shtml">
 http://www.clusterresources.com/products/maui/docs/mauiadmin.shtml</a>.
 
+<p>Maui uses SLURM commands and a wiki interface to communicate. See the 
+<a href="http://www.clusterresources.com/products/mwm/docs/wiki/wikiinterface.shtml">
+Wiki Interface Specification</a> and
+<a href="http://www.clusterresources.com/products/mwm/docs/wiki/socket.shtml">
+Wiki Socket Protocol Description</a> for more information.</p>
+
 <h2>Configuration</h2>
 <p>First, download the Maui scheduler kit from their web site
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
@@ -32,12 +38,12 @@ Then build Maui from its source distribution. This is a two step process:</p>
 <p>The key of 42 is arbitrary. You can use any value, but it will need to 
 be a number no larger than 4,294,967,295 (2^32) and specify the same 
 value as a SLURM configuration parameter described below.
-Maui developers have assured us the authenticaion key will eventually be 
+Maui developers have assured us the authentication key will eventually be 
 set in a configuration file rather than at build time.</p>
 
 <p>Update the Maui configuration file <i>maui.conf</i> (Copy the file
 maui-3.2.6p9/maui.cfg.dist to maui.conf). Add the following configuration 
-paramters to maui.conf:</p>
+parameters to maui.conf:</p>
 <pre>
 RMCFG[host]       TYPE=WIKI
 RMPORT            7321            # selected port
@@ -94,10 +100,10 @@ SchedulerPort=7321
 SchedulerAuth=42 (for Slurm version 1.1 and earlier only)
 </pre>
 <p>In this case, "SchedulerAuth" has been set to 42, which was the 
-authenticaiton key specified when Maui was configured above. 
+authentication key specified when Maui was configured above. 
 Just make sure the numbers match.</p>
 
-<p>For Slurm version 1.2 or higher, the authentication key 
+<p>For SLURM version 1.2 or higher, the authentication key 
 is stored in a file specific to the wiki-plugin named 
 <i>wiki.conf</i>.
 This file should be protected from reading by users.
@@ -110,6 +116,18 @@ includes a description of keywords presently only
 supported by the sched/wiki2 plugin for use with the 
 Moab Scheduler.</p>
 
+<p>SLURM version 2.0 and higher have internal scheduling capabilities
+that are not compatable with Maui.
+<ol>
+<li>Do not configure SLURM to use the "priority/multifactor" plugin 
+as it would set job priorities which conflict with those set by Maui.</li>
+<li>Do not use SLURM's <a href="reservations.html">reservation</a> 
+mechanism, but use that offered by Maui.</li>
+<li>Do not use SLURM's <a href="resource_limits.html">resource limits</a>
+as those may conflict with those managed by Maui.</li>
+</ol></p>
+
+
 <p>The wiki.conf keywords currently supported by Maui include:</p>
 
 <p><b>AuthKey</b> is a DES based encryption key used to sign 
@@ -162,6 +180,6 @@ HidePartitionJobs=debug
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 4 November 2008</p>
+<p style="text-align:center;">Last modified 8 May 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/mc_support.shtml b/doc/html/mc_support.shtml
index 74197368b58a0b7d7eb9b5c862689c889d7dc137..0944446c7b7146bf7bb0d207cc0517b00f492e00 100644
--- a/doc/html/mc_support.shtml
+++ b/doc/html/mc_support.shtml
@@ -79,7 +79,7 @@ to dedicate to a job (minimum or range)
 </td></tr>
 <tr>
     <td> -B <i>S[:C[:T]]</i></td>
-    <td> Combined shorcut option for --sockets-per-node, --cores-per_cpu, --threads-per_core
+    <td> Combined shortcut option for --sockets-per-node, --cores-per_cpu, --threads-per_core
 </td></tr>
 <tr><td colspan=2>
 <b><a href="#srun_dist">New Distributions</b>
@@ -356,7 +356,7 @@ via <a href="configurator.html">configurator.html</a>.
 <p>The <tt>--ntasks-per-{node,socket,core}=<i>ntasks</i></tt> flags
 allow the user to request that no more than <tt><i>ntasks</i></tt>
 be invoked on each node, socket, or core.
-This is similiar to using <tt>--cpus-per-task=<i>ncpus</i></tt>
+This is similar to using <tt>--cpus-per-task=<i>ncpus</i></tt>
 but does not require knowledge of the actual number of cpus on
 each node.  In some cases, it is more convenient to be able to
 request that no more than a specific number of ntasks be invoked
@@ -896,7 +896,7 @@ JOBID ST TIME NODES MIN_PROCS MIN_SOCKETS MIN_CORES MIN_THREADS
 
 <p>The 'scontrol show job' command can be used to display
 the number of allocated CPUs per node as well as the socket, cores,
-and threads specified in the request and contraints.
+and threads specified in the request and constraints.
 
 <PRE>
 % srun -N 2 -B 2:1-1 sleep 100 &
diff --git a/doc/html/moab.shtml b/doc/html/moab.shtml
index 3ee5c7b151bfd329ababf5671dbac4d05f95c6d9..30bdfdae0be76a3082d74ba4361103aaef80c9a2 100644
--- a/doc/html/moab.shtml
+++ b/doc/html/moab.shtml
@@ -7,7 +7,17 @@ beyond the scope of any documents we could supply with SLURM.
 The best resource for Moab configuration information is the 
 online documents at Cluster Resources Inc.: 
 <a href="http://www.clusterresources.com/products/mwm/docs/slurmintegration.shtml">
-http://www.clusterresources.com/products/mwm/docs/slurmintegration.shtml</a>.
+http://www.clusterresources.com/products/mwm/docs/slurmintegration.shtml</a>.</p>
+
+<p>Moab uses SLURM commands and a wiki interface to communicate. See the 
+<a href="http://www.clusterresources.com/products/mwm/docs/wiki/wikiinterface.shtml">
+Wiki Interface Specification</a> and
+<a href="http://www.clusterresources.com/products/mwm/docs/wiki/socket.shtml">
+Wiki Socket Protocol Description</a> for more information.</p>
+
+<p>Somewhat more current information about SLURM's implementation of the 
+wiki interface was developed by Michal Novotny (Masaryk University, Czech Republic)
+and can be found <a href="http://www.fi.muni.cz/~xnovot19/wiki2.html">here</a>.</p>
 
 <h2>Configuration</h2>
 <p>First, download the Moab scheduler kit from their web site
@@ -36,6 +46,17 @@ partition configuration specifications.</p>
 
 <p>The default value of <i>SchedulerPort</i> is 7321.</p>
 
+<p>SLURM version 2.0 and higher have internal scheduling capabilities
+that are not compatable with Moab.
+<ol>
+<li>Do not configure SLURM to use the "priority/multifactor" plugin 
+as it would set job priorities which conflict with those set by Moab.</li>
+<li>Do not use SLURM's <a href="reservations.html">reservation</a> 
+mechanism, but use that offered by Moab.</li>
+<li>Do not use SLURM's <a href="resource_limits.html">resource limits</a>
+as those may conflict with those managed by Moab.</li>
+</ol></p>
+
 <h4>SLURM commands</h4>
 <p> Note that the <i>srun --immediate</i> option is not compatible 
 with Moab. 
@@ -259,6 +280,6 @@ Write the output to a file with the same name as the user in the
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 10 July 2008</p>
+<p style="text-align:center;">Last modified 14 May 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/mpi_guide.shtml b/doc/html/mpi_guide.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..9da0d09fc058e7cfa8b81a455c88958a7e43a90a
--- /dev/null
+++ b/doc/html/mpi_guide.shtml
@@ -0,0 +1,307 @@
+<!--#include virtual="header.txt"-->
+
+<h1>MPI Use Guide</h1>
+
+<p>MPI use depends upon the type of MPI being used. 
+There are three fundamentally different modes of operation used 
+by these various MPI implementation.
+<ol>
+<li>SLURM directly launches the tasks and performs initialization 
+of communications (Quadrics MPI, MPICH2, MPICH-GM, MPICH-MX, 
+MVAPICH, MVAPICH2, some MPICH1 modes, and future versions of OpenMPI).</li>
+<li>SLURM creates a resource allocation for the job and then
+mpirun launches tasks using SLURM's infrastructure (OpenMPI,
+LAM/MPI and HP-MPI).</li>
+<li>SLURM creates a resource allocation for the job and then 
+mpirun launches tasks using some mechanism other than SLURM, 
+such as SSH or RSH (BlueGene MPI and some MPICH1 modes). 
+These tasks initiated outside of SLURM's monitoring 
+or control. SLURM's epilog should be configured to purge 
+these tasks when the job's allocation is relinquished. </li>
+</ol>
+<p>Links to instructions for using several varieties of MPI 
+with SLURM are provided below.
+<ul>
+<li><a href="#bluegene_mpi">BlueGene MPI</a></li>
+<li><a href="#hp_mpi">HP-MPI</a></li>
+<li><a href="#lam_mpi">LAM/MPI</a></li>
+<li><a href="#mpich1">MPICH1</a></li>
+<li><a href="#mpich2">MPICH2</a></li>
+<li><a href="#mpich_gm">MPICH-GM</a></li>
+<li><a href="#mpich_mx">MPICH-MX</a></li>
+<li><a href="#mvapich">MVAPICH</a></li>
+<li><a href="#mvapich2">MVAPICH2</a></li>
+<li><a href="#open_mpi">Open MPI</a></li>
+<li><a href="#quadrics_mpi">Quadrics MPI</a></li>
+</ul></p>
+<hr size=4 width="100%">
+
+
+<h2><a name="open_mpi" href="http://www.open-mpi.org/"><b>Open MPI</b></a></h2>
+
+<p>Open MPI relies upon
+SLURM to allocate resources for the job and then mpirun to initiate the 
+tasks. When using <span class="commandline">salloc</span> command, 
+<span class="commandline">mpirun</span>'s -nolocal option is recommended. 
+For example:
+<pre>
+$ salloc -n4 sh    # allocates 4 processors 
+                   # and spawns shell for job
+&gt; mpirun -np 4 -nolocal a.out
+&gt; exit             # exits shell spawned by 
+                   # initial srun command
+</pre>
+<p>Note that any direct use of <span class="commandline">srun</span>
+will only launch one task per node when the LAM/MPI plugin is used.
+To launch more than one task per node using the
+<span class="commandline">srun</span> command, the <i>--mpi=none</i>
+option will be required to explicitly disable the LAM/MPI plugin.</p>
+
+<h2>Future Use</h2>
+<p>There is work underway in both SLURM and Open MPI to support task launch
+using the <span class="commandline">srun</span> command. 
+We expect this mode of operation to be supported late in 2009.
+It may differ slightly from the description below.
+It relies upon SLURM version 2.0 (or higher) managing 
+reservations of communication ports for the Open MPI's use.
+The system administrator must specify the range of ports to be reserved 
+in the <i>slurm.conf</i> file using the <i>MpiParams</i> parameter.
+For example: <br>
+<i>MpiParams=ports=12000-12999</i></p>
+
+<p>Launch tasks using the <span class="commandline">srun</span> command 
+plus the option <i>--resv-ports</i>.
+The ports reserved on every allocated node will be identified in an 
+environment variable available to the tasks as shown here: <br>
+<i>SLURM_STEP_RESV_PORTS=12000-12015</i></p>
+
+<p>If the ports reserved for a job step are found by the Open MPI library
+to be in use, a message of this form will be printed and the job step
+will be re-launched:<br>
+<i>srun: error: sun000: task 0 unble to claim reserved port, retrying</i><br>
+After three failed attempts, the job step will be aborted.
+Repeated failures should be reported to your system administrator in 
+order to rectify the problem by cancelling the processes holding those
+ports.</p>
+<hr size=4 width="100%">
+
+
+<h2><a name="quadrics_mpi" href="http://www.quadrics.com/"><b>Quadrics MPI</b></a></h2>
+
+<p>Quadrics MPI relies upon SLURM to 
+allocate resources for the job and <span class="commandline">srun</span> 
+to initiate the tasks. One would build the MPI program in the normal manner 
+then initiate it using a command line of this sort:</p>
+<pre>
+$ srun [options] &lt;program&gt; [program args]
+</pre>
+<hr size=4 width="100%">
+
+
+<h2><a name="lam_mpi" href="http://www.lam-mpi.org/"><b>LAM/MPI</b></a></h2>
+
+<p>LAM/MPI relies upon the SLURM 
+<span class="commandline">salloc</span> or <span class="commandline">sbatch</span>
+command to allocate. In either case, specify 
+the maximum number of tasks required for the job. Then execute the 
+<span class="commandline">lamboot</span> command to start lamd daemons. 
+<span class="commandline">lamboot</span> utilizes SLURM's 
+<span class="commandline">srun</span> command to launch these daemons. 
+Do not directly execute the <span class="commandline">srun</span> command 
+to launch LAM/MPI tasks. For example: 
+<pre>
+$ salloc -n16 sh  # allocates 16 processors 
+                  # and spawns shell for job
+&gt; lamboot
+&gt; mpirun -np 16 foo args
+1234 foo running on adev0 (o)
+2345 foo running on adev1
+etc.
+&gt; lamclean
+&gt; lamhalt
+&gt; exit            # exits shell spawned by 
+                  # initial srun command
+</pre>
+<p>Note that any direct use of <span class="commandline">srun</span> 
+will only launch one task per node when the LAM/MPI plugin is configured
+as the default plugin.  To launch more than one task per node using the 
+<span class="commandline">srun</span> command, the <i>--mpi=none</i>
+option would be required to explicitly disable the LAM/MPI plugin
+if that is the system default.</p>
+<hr size=4 width="100%">
+
+
+<h2><a name="hp_mpi" href="http://www.hp.com/go/mpi"><b>HP-MPI</b></a></h2>
+
+<p>HP-MPI uses the 
+<span class="commandline">mpirun</span> command with the <b>-srun</b> 
+option to launch jobs. For example:
+<pre>
+$MPI_ROOT/bin/mpirun -TCP -srun -N8 ./a.out
+</pre></p>
+<hr size=4 width="100%">
+
+
+<h2><a name="mpich2" href="http://www.mcs.anl.gov/research/projects/mpich2/"><b>MPICH2</b></a></h2>
+
+<p>MPICH2 jobs are launched using the <b>srun</b> command. Just link your program with 
+SLURM's implementation of the PMI library so that tasks can communicate
+host and port information at startup. (The system administrator can add
+these option to the mpicc and mpif77 commands directly, so the user will not 
+need to bother). For example:
+<pre>
+$ mpicc -L&lt;path_to_slurm_lib&gt; -lpmi ...
+$ srun -n20 a.out
+</pre>
+<b>NOTES:</b>
+<ul>
+<li>Some MPICH2 functions are not currently supported by the PMI 
+library integrated with SLURM</li>
+<li>Set the environment variable <b>PMI_DEBUG</b> to a numeric value 
+of 1 or higher for the PMI library to print debugging information</li>
+</ul></p>
+<hr size=4 width="100%">
+
+
+<h2><a name="mpich_gm" href="http://www.myri.com/scs/download-mpichgm.html"><b>MPICH-GM</b></a></h2>
+
+<p>MPICH-GM jobs can be launched directly by <b>srun</b> command.
+SLURM's <i>mpichgm</i> MPI plugin must be used to establish communications 
+between the launched tasks. This can be accomplished either using the SLURM 
+configuration parameter <i>MpiDefault=mpichgm</i> in <b>slurm.conf</b>
+or srun's <i>--mpi=mpichgm</i> option.
+<pre>
+$ mpicc ...
+$ srun -n16 --mpi=mpichgm a.out
+</pre>
+<hr size=4 width="100%">
+
+
+<h2><a name="mpich_mx" href="http://www.myri.com/scs/download-mpichmx.html"><b>MPICH-MX</b></a></h2>
+
+<p>MPICH-MX jobs can be launched directly by <b>srun</b> command.
+SLURM's <i>mpichmx</i> MPI plugin must be used to establish communications
+between the launched tasks. This can be accomplished either using the SLURM
+configuration parameter <i>MpiDefault=mpichmx</i> in <b>slurm.conf</b>
+or srun's <i>--mpi=mpichmx</i> option.
+<pre>
+$ mpicc ...
+$ srun -n16 --mpi=mpichmx a.out
+</pre>
+<hr size=4 width="100%">
+
+
+<h2><a name="mvapich" href="http://mvapich.cse.ohio-state.edu/"><b>MVAPICH</b></a></h2>
+
+<p>MVAPICH jobs can be launched directly by <b>srun</b> command.
+SLURM's <i>mvapich</i> MPI plugin must be used to establish communications 
+between the launched tasks. This can be accomplished either using the SLURM 
+configuration parameter <i>MpiDefault=mvapich</i> in <b>slurm.conf</b>
+or srun's <i>--mpi=mvapich</i> option.
+<pre>
+$ mpicc ...
+$ srun -n16 --mpi=mvapich a.out
+</pre>
+<b>NOTE:</b> If MVAPICH is used in the shared memory model, with all tasks
+running on a single node, then use the <i>mpich1_shmem</i> MPI plugin instead.<br>
+<b>NOTE (for system administrators):</b> Configure
+<i>PropagateResourceLimitsExcept=MEMLOCK</i> in <b>slurm.conf</b> and 
+start the <i>slurmd</i> daemons with an unlimited locked memory limit.
+For more details, see 
+<a href="http://mvapich.cse.ohio-state.edu/support/mvapich_user_guide.html#x1-420007.2.3">MVAPICH</a> 
+documentation for "CQ or QP Creation failure".</p>
+<hr size=4 width="100%">
+
+
+<h2><a name="mvapich2" href="http://nowlab.cse.ohio-state.edu/projects/mpi-iba"><b>MVAPICH2</b></a></h2>
+
+<p>MVAPICH2 jobs can be launched directly by <b>srun</b> command.
+SLURM's <i>none</i> MPI plugin must be used to establish communications 
+between the launched tasks. This can be accomplished either using the SLURM 
+configuration parameter <i>MpiDefault=none</i> in <b>slurm.conf</b> 
+or srun's <i>--mpi=none</i> option. The program must also be linked with
+SLURM's implementation of the PMI library so that tasks can communicate
+host and port information at startup. (The system administrator can add
+these option to the mpicc and mpif77 commands directly, so the user will not
+need to bother).  <b>Do not use SLURM's MVAPICH plugin for MVAPICH2.</b>
+<pre>
+$ mpicc -L&lt;path_to_slurm_lib&gt; -lpmi ...
+$ srun -n16 --mpi=none a.out
+</pre>
+<hr size=4 width="100%">
+
+
+<h2><a name="bluegene_mpi" href="http://www.research.ibm.com/bluegene/"><b>BlueGene MPI</b></a></h2>
+
+<p>BlueGene MPI relies upon SLURM to create the resource allocation and then 
+uses the native <span class="commandline">mpirun</span> command to launch tasks. 
+Build a job script containing one or more invocations of the 
+<span class="commandline">mpirun</span> command. Then submit 
+the script to SLURM using <span class="commandline">sbatch</span>.
+For example:</p>
+<pre>
+$ sbatch -N512 my.script
+</pre>
+<p>Note that the node count specified with the <i>-N</i> option indicates
+the base partition count.
+See <a href="bluegene.html">BlueGene User and Administrator Guide</a> 
+for more information.</p>
+<hr size=4 width="100%">
+
+
+<h2><a name="mpich1" href="http://www-unix.mcs.anl.gov/mpi/mpich1/"><b>MPICH1</b></a></h2>
+
+<p>MPICH1 development ceased in 2005. It is recommended that you convert to 
+MPICH2 or some other MPI implementation. 
+If you still want to use MPICH1, note that it has several different 
+programming models. If you are using the shared memory model 
+(<i>DEFAULT_DEVICE=ch_shmem</i> in the mpirun script), then initiate 
+the tasks using the <span class="commandline">srun</span> command 
+with the <i>--mpi=mpich1_shmem</i> option.</p>
+<pre>
+$ srun -n16 --mpi=mpich1_shmem a.out
+</pre>
+
+<p>If you are using MPICH P4 (<i>DEFAULT_DEVICE=ch_p4</i> in 
+the mpirun script) and SLURM version 1.2.11 or newer, 
+then it is recommended that you apply the patch in the SLURM 
+distribution's file <i>contribs/mpich1.slurm.patch</i>. 
+Follow directions within the file to rebuild MPICH. 
+Applications must be relinked with the new library.
+Initiate tasks using the 
+<span class="commandline">srun</span> command with the 
+<i>--mpi=mpich1_p4</i> option.</p>
+<pre>
+$ srun -n16 --mpi=mpich1_p4 a.out
+</pre>
+<p>Note that SLURM launches one task per node and the MPICH 
+library linked within your applications launches the other 
+tasks with shared memory used for communications between them.
+The only real anomaly is that all output from all spawned tasks
+on a node appear to SLURM as coming from the one task that it
+launched. If the srun --label option is used, the task ID labels
+will be misleading.</p>
+ 
+<p>Other MPICH1 programming models current rely upon the SLURM 
+<span class="commandline">salloc</span> or 
+<span class="commandline">sbatch</span> command to allocate resources.
+In either case, specify the maximum number of tasks required for the job.
+You may then need to build a list of hosts to be used and use that 
+as an argument to the mpirun command. 
+For example:
+<pre>
+$ cat mpich.sh
+#!/bin/bash
+srun hostname -s | sort -u >slurm.hosts
+mpirun [options] -machinefile slurm.hosts a.out
+rm -f slurm.hosts
+$ sbatch -n16 mpich.sh
+sbatch: Submitted batch job 1234
+</pre>
+<p>Note that in this example, mpirun uses the rsh command to launch 
+tasks. These tasks are not managed by SLURM since they are launched 
+outside of its control.</p>
+ 
+<p style="text-align:center;">Last modified 2 March 2009</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/mpiplugins.shtml b/doc/html/mpiplugins.shtml
index bffb028a7c1e8917a3217e70bc5a329d15095366..96c73e2d8bdfcc236af2452b307d2c336757b1a1 100644
--- a/doc/html/mpiplugins.shtml
+++ b/doc/html/mpiplugins.shtml
@@ -34,7 +34,7 @@ srun calls
 <br>
 <i>mpi_p_thr_create((srun_job_t *)job);</i>
 <br>
-which will set up the correct enviornment for the specified mpi.
+which will set up the correct environment for the specified mpi.
 <br>
 slurmd daemon runs
 <br>
@@ -48,7 +48,7 @@ which will set configure the slurmd to use the correct mpi as well to interact w
 <h2>Data Objects</h2>
 <p> These functions are expected to read and/or modify data structures directly in 
 the slurmd daemon's and srun memory. Slurmd is a multi-threaded program with independent 
-read and write locks on each data structure type. Thererfore the type of operations 
+read and write locks on each data structure type. Therefore the type of operations 
 permitted on various data structures is identified for each function.</p>
 
 <p class="footer"><a href="#top">top</a></p>
@@ -63,14 +63,14 @@ to that of the correct mpi.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br><span class="commandline"> job</span>&nbsp; 
 &nbsp;&nbsp;(input) Pointer to the slurmd_job that is running.  Cannot be NULL.<br>
 <span class="commandline"> rank</span>&nbsp;
-&nbsp;&nbsp;(input) Primarially there for MVAPICH.  Used to send the rank fo the mpirun job. 
+&nbsp;&nbsp;(input) Primarily there for MVAPICH.  Used to send the rank fo the mpirun job. 
 This can be 0 if no rank information is needed for the mpi type.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
 the plugin should return SLURM_ERROR.</p>
 
 <p class="commandline">int mpi_p_thr_create (srun_job_t *job);</p>
 <p style="margin-left:.2in"><b>Description</b>: Used by srun to spawn the thread for the mpi processes. 
-Most all the real proccessing happens here.</p>
+Most all the real processing happens here.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> job</span>&nbsp; 
 &nbsp;&nbsp;(input) Pointer to the srun_job that is running.  Cannot be NULL.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
diff --git a/doc/html/news.shtml b/doc/html/news.shtml
index 432b09786f12fc67b5194ef2f0498399ae1e096c..92b765f7f55bf5d7d59b483d1875505a78f3084d 100644
--- a/doc/html/news.shtml
+++ b/doc/html/news.shtml
@@ -4,68 +4,13 @@
 
 <h2>Index</h2>
 <ul>
-<li><a href="#11">SLURM Version 1.1, May 2006</a></li>
-<li><a href="#12">SLURM Version 1.2, February 2007</a></li>
 <li><a href="#13">SLURM Version 1.3, March 2008</a></li>
-<li><a href="#14">SLURM Version 1.4 and beyond</a></li>
-</ul>
-
-<h2><a name="11">Major Updates in SLURM Version 1.1</a></h2>
-<p>SLURM Version 1.1 became available in May 2006.
-Major enhancements include:
-<ul>
-<li>Communications enhancements, validated up to 16,384 node clusters.</li>
-<li>File broadcast support (new <i>sbcast</i> command).</li>
-<li>Support for distinct executables and arguments by task ID
-(see <i>srun --multi-prog</i> option).</li>
-<li>Support for binding tasks to the memory on a processor.</li>
-<li>The configuration parameter <i>HeartbeatInterval</i> is defunct.
-Half the values of configuration parameters <i>SlurmdTimeout</i> and 
-<i>SlurmctldTimeout</i> are used as the commununication frequency for 
-the slurmctld and slurmd daemons respecitively.</li>
-<li>Support for PAM to control resource limits by user on each 
-compute node used. See <i>UsePAM</i> configuration parameter.</li>
-<li>Support added for <i>xcpu</i> job launch.</li>
-<li>Add support for 1/16 midplane BlueGene blocks.</li>
-<li>Add support for overlapping BlueGene blocks.</li>
-<li>Add support for dynamic BlueGene block creation on demand.</li>
-<li>BlueGene node count specifications are now c-node counts
-rather than base partition counts.</li>
-</ul>
-
-<h2><a name="12">Major Updates in SLURM Version 1.2</a></h2>
-<p>SLURM Version 1.2 became available in February 2007.
-Major enhancements include:
-<ul>
-<li>More complete support for resource management down to the core level
-on a node.</li>
-<li>Treat memory as a consumable resource on a compute node.</li>
-<li>New graphical user interface provided, <i>sview</i>.</li>
-<li>Added support for OS X.</li>
-<li>Permit batch jobs to be requeued.</li>
-<li>Expanded support of Moab and Maui schedulers.</li>
-<li><i>Srun</i> command augmented by new commands for each operation:
-<i>salloc</i>, <i>sbatch</i>, and <i>sattach</i>.</li>
-<li>Sched/wiki plugin (for Moab and Maui Schedulers) rewritten to 
-provide vastly improved integration.</li>
-<li>BlueGene plugin permits use of different boot images per job 
-specification.</li>
-<li>Event trigger mechanism added with new tool <i>strigger</i>.</li>
-<li>Added support for task binding to CPUs or memory via <i>cpuset</i>
-mechanism.</li>
-<li>Added support for configurable 
-<a href="power_save.html">power savings</a> on idle nodes.</li>
-<li>Support for MPICH-MX, MPICH1/shmem and MPICH1/p4 added with 
-task launch directly from the <i>srun</i> command.</li>
-<li>Wrappers available for common Torque/PBS commands 
-(<i>psub</i>, <i>pstat</i>, and <i>pbsnodes</i>).</li>
-<li>Support for <a href="http://www-unix.globus.org/">Globus</a> 
-(using Torque/PBS command wrappers).</li>
-<li>Wrapper available for <i>mpiexec</i> command.</li>
+<li><a href="#20">SLURM Version 2.0, May 2009</a></li>
+<li><a href="#21">SLURM Version 2.1 and beyond</a></li>
 </ul>
 
 <h2><a name="13">Major Updates in SLURM Version 1.3</a></h2>
-<p>SLURM Version 1.3 was relased in March 2008.
+<p>SLURM Version 1.3 was released in March 2008.
 Major enhancements include:
 <ul>
 <li>Job accounting and completion data can be stored in a database 
@@ -76,27 +21,64 @@ database support across multiple clusters.</li>
 without an external scheduler).</li>
 <li>Cryptography logic moved to a separate plugin with the 
 option of using OpenSSL (default) or Munge (GPL).</li>
-<li>Improved scheduling of multple job steps within a job's allocation.</li>
+<li>Improved scheduling of multiple job steps within a job's allocation.</li>
 <li>Support for job specification of node features with node counts.</li> 
 <li><i>srun</i>'s --alloc, --attach, and --batch options removed (use 
 <i>salloc</i>, <i>sattach</i> or <i>sbatch</i> commands instead).</li>
-<li><i>srun --pty</i> option added to support remote pseudo terminial for 
+<li><i>srun --pty</i> option added to support remote pseudo terminal for 
 spawned tasks.</li>
 <li>Support added for a much richer job dependency specification
 including testing of exit codes and multiple dependencies.</li>
+<li>Support added for BlueGene/P systems and HTC (High Throughput
+Computing) mode.</li>
+</ul>
+
+<h2><a name="20">Major Updates in SLURM Version 2.0</a></h2>
+<p>SLURM Version 2.0 is scheduled for released in May 2009.
+Major enhancements include:
+<ul>
+<li>Sophisticated <a href="priority_multifactor.html">job prioritization 
+plugin</a> is now available. 
+Jobs can be prioritized based upon their age, size and/or fair-share resource 
+allocation using hierarchical bank accounts.</li>
+<li>An assortment of <a href="resource_limits.html">resource limits</a> 
+can be imposed upon individual users and/or hierarchical bank accounts 
+such as maximum job time limit, maximum job size, and maximum number of 
+running jobs.</li>
+<li><a href="reservations.html">Advanced reservations</a> can be made to 
+insure resources will be available when needed.</li>
+<li>Idle nodes can now be completely <a href="power_save.html">powered 
+down</a> when idle and automatically restarted when their is work 
+available.</li>
+<li>Jobs in higher priority partitions (queues) can automatically 
+<a href="preempt.html">preempt</a> jobs in lower priority queues. 
+The preempted jobs will automatically resume execution upon completion 
+of the higher priority job.</li>
+<li>Specific cores are allocated to jobs and jobs steps in order to effective 
+preempt or gang schedule jobs.</li>
+<li>A new configuration parameter, <i>PrologSlurmctld</i>, can be used to 
+support the booting of different operating systems for each job.</li>
+<li>Added switch topology configuration options to optimize job resource 
+allocation with respect to communication performance.</li>
+<li>Automatic <a href="checkpoint_blcr.html">Checkpoint/Restart using BRCR</a> 
+is now available.</li>
 </ul>
 
-<h2><a name="14">Major Updates in SLURM Version 1.4 and beyond</a></h2>
+<h2><a name="21">Major Updates in SLURM Version 2.1 and beyond</a></h2>
 <p> Detailed plans for release dates and contents of future SLURM releases have 
 not been finalized. Anyone desiring to perform SLURM development should notify
 <a href="mailto:slurm-dev@lists.llnl.gov">slurm-dev@lists.llnl.gov</a>
-to coordinate activies. Future development plans includes:
+to coordinate activities. Future development plans includes:
 <ul>
+<li>Optimized resource allocation based upon network topology (e.g.
+hierarchical switches).</li>
+<li>Modify more SLURM commands to operate between clusters.</li>
+<li>Support for BlueGene/Q systems.</li>
 <li>Permit resource allocations (jobs) to change size.</li>
 <li>Add Kerberos credential support including credential forwarding 
 and refresh.</li>
 </ul>
 
-<p style="text-align:center;">Last modified 11 March 2008</p>
+<p style="text-align:center;">Last modified 5 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/overview.shtml b/doc/html/overview.shtml
index 06c2270e558602904e5b4ac045b7a929b36bb8a6..614ee3af769e4e4bea5765ebc36dc4855e3a4d86 100644
--- a/doc/html/overview.shtml
+++ b/doc/html/overview.shtml
@@ -1,6 +1,7 @@
 <!--#include virtual="header.txt"-->
 
 <h1><a name="top">Overview</a></h1>
+
 <p>The Simple Linux Utility for Resource Management (SLURM) is an open source,
 fault-tolerant, and highly scalable cluster management and job scheduling system 
 for large and small Linux clusters. SLURM requires no kernel modifications for
@@ -8,15 +9,24 @@ its operation and is relatively self-contained. As a cluster resource manager,
 SLURM has three key functions. First, it allocates exclusive and/or non-exclusive 
 access to resources (compute nodes) to users for some duration of time so they
 can perform work. Second, it provides a framework for starting, executing, and
-monitoring work (normally a parallel job) on the set of allocated nodes. Finally, 
-it arbitrates contention for resources by managing a queue of pending work.</p>
+monitoring work (normally a parallel job) on the set of allocated nodes. 
+Finally, it arbitrates contention for resources by managing a queue of 
+pending work.
+Optional plugins can be used for 
+<a href="accounting.html">accounting</a>, 
+<a href="reservations.html">advanced reservation</a>, 
+<a href="gang_scheduling.html">gang scheduling</a> (time sharing for 
+parallel jobs), backfill scheduling, 
+<a href="resource_limits.html">resource limits</a> by user or bank account, 
+and sophisticated <a href="priority_multifactor.html"> multifactor job
+  prioritization</a> algorithms.
+
 
 <p>SLURM has been developed through the collaborative efforts of 
 <a href="https://www.llnl.gov/">Lawrence Livermore National Laboratory (LLNL)</a>,
 <a href="http://www.hp.com/">Hewlett-Packard</a>, 
 <a href="http://www.bull.com/">Bull</a>,
-<a href="http://www.lnxi.com/">Linux NetworX</a> and many other contributors.
-HP distributes and supports SLURM as a component in their XC System Software.</p>
+Linux NetworX and many other contributors.</p>
 
 <h2>Architecture</h2>
 <p>SLURM has a centralized manager, <b>slurmctld</b>, to monitor resources and 
@@ -24,18 +34,22 @@ work. There may also be a backup manager to assume those responsibilities in the
 event of failure. Each compute server (node) has a <b>slurmd</b> daemon, which 
 can be compared to a remote shell: it waits for work, executes that work, returns 
 status, and waits for more work. 
-The <b>slurmd</b> daemons provide fault-tolerant hierarchical communciations.
+The <b>slurmd</b> daemons provide fault-tolerant hierarchical communications.
 There is an optional <b>slurmdbd</b> (Slurm DataBase Daemon) which can be used
 to record accounting information for multiple Slurm-managed clusters in a 
 single database.
 User tools include <b>srun</b> to initiate jobs, 
-<b>scancel</b> to terminate queued or running jobs, <b>sinfo</b> to report system 
-status, <b>squeue</b> to report the status of jobs, <b>sacct</b> to get information 
-about jobs and job steps that are running or have completed. 
+<b>scancel</b> to terminate queued or running jobs, 
+<b>sinfo</b> to report system status, 
+<b>squeue</b> to report the status of jobs, and 
+<b>sacct</b> to get information about jobs and job steps that are running or have completed.
 The <b>smap</b> and <b>sview</b> commands graphically reports system and 
-job status including network topology. There is also an administrative 
-tool <b>scontrol</b> available to monitor and/or modify configuration and state 
-information. APIs are available for all functions.</p>
+job status including network topology. 
+There is an administrative tool <b>scontrol</b> available to monitor 
+and/or modify configuration and state information on the cluster. 
+The administrative tool used to manage the database is <b>sacctmgr</b>.
+It can be used to identify the clusters, valid users, valid bank accounts, etc.
+APIs are available for all functions.</p>
 
 <div class="figure">
   <img src="arch.gif" width="550"><br>
@@ -46,41 +60,61 @@ information. APIs are available for all functions.</p>
 infrastructures. This permits a wide variety of SLURM configurations using a 
 building block approach. These plugins presently include: 
 <ul>
+<li><a href="accounting_storageplugins.html">Accounting Storage</a>: 
+text file (default if jobacct_gather != none), 
+MySQL, PGSQL, SlurmDBD (Slurm Database Daemon) or none</li>
+
 <li><a href="authplugins.html">Authentication of communications</a>: 
 <a href="http://www.theether.org/authd/">authd</a>, 
 <a href="http://home.gna.org/munge/">munge</a>, or none (default).</li>
 
 <li><a href="checkpoint_plugins.html">Checkpoint</a>: AIX, OpenMPI, XLCH, or none.</li>
-<li><a href="crypto_plugins.html">Cryptography</a>: Munge or OpenSSL</li>
-<li><a href="jobacct_gatherplugins.html">Job Accounting Gather</a>: AIX, Linux, or none(default)</li>
 
-<li><a href="accounting_storageplugins.html">Accounting Storage</a>: 
-text file (default if jobacct_gather != none), 
-MySQL, PGSQL, SlurmDBD (Slurm Database Daemon) or none</li>
+<li><a href="crypto_plugins.html">Cryptography (Digital Signature Generation)</a>: 
+<a href="http://home.gna.org/munge/">munge</a> (default) or
+<a href="http://www.openssl.org/">OpenSSL</a>.</li>
+
+<li><a href="jobacct_gatherplugins.html">Job Accounting Gather</a>: AIX, Linux, or none(default)</li>
 
-<li><a href="jobcompplugins.html">Job completion logging</a>: 
+<li><a href="jobcompplugins.html">Job Completion Logging</a>: 
 text file, arbitrary script, MySQL, PGSQL, SlurmDBD, or none (default).</li>
 
 <li><a href="mpiplugins.html">MPI</a>: LAM, MPICH1-P4, MPICH1-shmem,
 MPICH-GM, MPICH-MX, MVAPICH, OpenMPI and none (default, for most 
 other versions of MPI including MPICH2 and MVAPICH2).</li>
 
-<li><a href="selectplugins.html">Node selection</a>: 
-Bluegene (a 3-D torus interconnect BGL or BGP), 
-<a href="cons_res.html">consumable resources</a> (to allocate 
-individual processors and memory) or linear (to dedicate entire nodes).</li>
+<li><a href="priority_plugins.html">Priority</a>:
+Assigns priorities to jobs as they arrive.
+Options include 
+<a href="priority_multifactor.html">multifactor job prioritization</a>
+(assigns job priority based upon fair-share allocate, size, age, QoS, and/or partition) and
+basic (assigns job priority based upon age for First In First Out ordering, default).</li>
 
 <li><a href="proctrack_plugins.html">Process tracking (for signaling)</a>: 
 AIX (using a kernel extension), Linux process tree hierarchy, process group ID, 
 RMS (Quadrics Linux kernel patch),
 and <a href="http://oss.sgi.com/projects/pagg/">SGI's Process Aggregates (PAGG)</a>.</li>
 
+<li><a href="selectplugins.html">Node selection</a>: 
+Bluegene (a 3-D torus interconnect BGL or BGP), 
+<a href="cons_res.html">consumable resources</a> (to allocate 
+individual processors and memory) or linear (to dedicate entire nodes).</li>
+
 <li><a href="schedplugins.html">Scheduler</a>: 
-FIFO (First In First Out, default), backfill, gang (time-slicing for parallel jobs),
+builtin (First In First Out, default), 
+backfill (starts jobs early if doing so does not delay the expected initiation
+time of any higher priority job), 
+gang (time-slicing for parallel jobs),
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
 The Maui Scheduler</a>, and  
 <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php">
 Moab Cluster Suite</a>.
+There is also a <a href="priority_multifactor.html">multifactor job
+prioritization</a> plugin 
+available for use with the basic, backfill and gang schedulers only. 
+Jobs can be prioritized by age, size, fair-share allocation, etc.
+Many <a href="resource_limits.html">resource limits</a> are also 
+configurable by user or bank account.</li>
 
 <li><a href="switchplugins.html">Switch or interconnect</a>: 
 <a href="http://www.quadrics.com/">Quadrics</a> 
@@ -89,6 +123,15 @@ Federation
 <a href="http://publib-b.boulder.ibm.com/Redbooks.nsf/f338d71ccde39f08852568dd006f956d/55258945787efc2e85256db00051980a?OpenDocument">Federation</a> (IBM High Performance Switch), 
 or none (actually means nothing requiring special handling, such as Ethernet or 
 <a href="http://www.myricom.com/">Myrinet</a>, default).</li>
+
+<li><a href="taskplugins.html">Task Affinity</a>:
+Affinity (bind tasks to processors or CPU sets) or none (no binding, the default).</li>
+
+<li><a href="topology_plugin.html">Network Topology</a>:
+3d_torus (optimize resource selection based upon a 3d_torus interconnect, default for Cray XT, Sun Constellation and IBM BlueGene), 
+tree (optimize resource selection based upon switch connections) or
+none (the default).</li>
+
 </ul>
 
 <p>The entities managed by these SLURM daemons, shown in Figure 2, include <b>nodes</b>,
@@ -146,7 +189,6 @@ SlurmdPort=7003
 SlurmdSpoolDir=/var/tmp/slurmd.spool
 SlurmdTimeout=120
 StateSaveLocation=/usr/local/slurm/slurm.state
-SwitchType=switch/elan
 TmpFS=/tmp
 #
 # Node Configurations
@@ -166,6 +208,6 @@ PartitionName=DEFAULT MaxTime=UNLIMITED MaxNodes=4096
 PartitionName=batch Nodes=lx[0041-9999]
 </pre>
 
-<p style="text-align:center;">Last modified 11 March 2008</p>
+<p style="text-align:center;">Last modified 31 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/plane_ex5.gif b/doc/html/plane_ex5.gif
index 5c466a8e85fa260b5f27eb5fed6ecb40eb0a99dd..ce8f9e7e0438d681b87ce4ad484d6cb8286486b0 100755
Binary files a/doc/html/plane_ex5.gif and b/doc/html/plane_ex5.gif differ
diff --git a/doc/html/platforms.shtml b/doc/html/platforms.shtml
index c2a83ce31f3adf1201a4ebf21ae37c7052d2b273..ee1a60a543f5201571eb53b7d22b9c48abcfa0ba 100644
--- a/doc/html/platforms.shtml
+++ b/doc/html/platforms.shtml
@@ -6,13 +6,16 @@
 <li><b>AIX</b>&#151;SLURM support for AIX has been thoroughly tested.</li>
 <li><b>Linux</b>&#151;SLURM has been thoroughly tested on most popular Linux 
 distributions using i386, ia64, and x86_64 architectures.</li>
-<li><b>OS X</b>&#151;SLURM support for OS X is available in version 1.2.</li>
+<li><b>OS X</b>&#151;SLURM support for OS X is available.</li>
 <li><b>Other</b>&#151;SLURM ports to other systems will be gratefully accepted.</li>
 </ul>
 <h2>Interconnects</h2>
 <ul>
-<li><b>Blue Gene</b>&#151;SLURM support for IBM's Blue Gene system has been
-thoroughly tested.</li>
+<li><b>BlueGene</b>&#151;SLURM support for IBM's BlueGene/L and BlueGene/P 
+systems has been thoroughly tested.</li>
+<li><b>Cray XT</b>&#151;Much of the infrastructure to support a Cray XT
+system is current in SLURM. The interface to ALPS/BASIL remains to be done.
+Please contact us if you would be interested in this work.</li>
 <li><b>Ethernet</b>&#151;Ethernet requires no special support from SLURM and has 
 been thoroughly tested.</li>
 <li><b>IBM Federation</b>&#151;SLURM support for IBM's Federation Switch 
@@ -21,9 +24,11 @@ has been thoroughly tested.</li>
 <li><b>Myrinet</b>&#151;Myrinet, MPICH-GM and MPICH-MX are supported.</li>
 <li><b>Quadrics Elan</b>&#151;SLURM support for Quadrics Elan 3 and Elan 4 switches 
 are available in all versions of SLURM and have been thoroughly tested.</li>
+<li><b>Sun Constellation</b>&#151;Resource allocation has been optimized
+for the three-dimensional torus interconnect.</li>
 <li><b>Other</b>&#151;SLURM ports to other systems will be gratefully accepted.</li>
 </ul>
 
-<p style="text-align:center;">Last modified 15 June 2007</p>
+<p style="text-align:center;">Last modified 9 February 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/plugins.shtml b/doc/html/plugins.shtml
index 7e0c45c5f3bbcd31851073f44ab3c8de58688d96..f1c9dcb53ae2459cb407acd200e52faa4433fdb9 100644
--- a/doc/html/plugins.shtml
+++ b/doc/html/plugins.shtml
@@ -101,7 +101,7 @@ are not available, so it is the installer's job to make sure the specified libra
 are available.</p>
 <h2>Performance</h2>
 <p>All plugin functions are expected to execute very quickly. If any function 
-entails delays (e.g. transations with other systems), it should be written to 
+entails delays (e.g. transactions with other systems), it should be written to 
 utilize a thread for that functionality. This thread may be created by the 
 <span class="commandline">init()</span> function and deleted by the 
 <span class="commandline">fini()</span> functions. See <b>plugins/sched/backfill</b>
diff --git a/doc/html/power_save.shtml b/doc/html/power_save.shtml
index 0f4a8de4867bd5b79588ee298179ca4b7c5147c0..763584b42d056b4273bd74422f879b3f4c76aa93 100644
--- a/doc/html/power_save.shtml
+++ b/doc/html/power_save.shtml
@@ -1,20 +1,18 @@
 <!--#include virtual="header.txt"-->
 
 <h1>Power Saving Guide</h1>
-<p>SLURM provides an integrated power saving mechanism beginning with 
-version 1.2.7.
+
+<p>SLURM provides an integrated power saving mechanism for idle nodes.
 Nodes that remain idle for an configurable period of time can be placed 
 in a power saving mode. 
 The nodes will be restored to normal operation once work is assigned to them.
-Power saving is accomplished using a <i>cpufreq</i> governor that can change
-CPU frequency and voltage.
-Note that the <i>cpufreq</i> driver must be enabled in the Linux kernel 
-configuration. 
-While the "ondemand" governor can be configured to operate at all
-times to automatically alter the CPU performance based upon workload, 
-SLURM provides somewhat greater flexibility for power management on a
-cluster. 
-Of particular note, SLURM can alter the governors across the cluster
+Beginning with version 2.0.0, nodes can be fully powered down.
+Earlier releases of SLURM do not support the powering down of nodes, 
+only support of reducing their performance and thus their power consumption.
+For example, power saving can be accomplished using a <i>cpufreq</i> governor 
+that can change CPU frequency and voltage (note that the <i>cpufreq</i> driver
+must be enabled in the Linux kernel configuration).
+Of particular note, SLURM can power nodes up or down 
 at a configurable rate to prevent rapid changes in power demands. 
 For example, starting a 1000 node job on an idle cluster could result 
 in an instantaneous surge in power demand of multiple megawatts without 
@@ -22,14 +20,21 @@ SLURM's support to increase power demands in a gradual fashion.</p>
 
 
 <h2>Configuration</h2>
+
 <p>A great deal of flexibility is offered in terms of when and 
-how idle nodes are put into or removed from power save mode.
-The following configuration paramters are available:
+how idle nodes are put into or removed from power save mode. 
+Note that the SLURM control daemon, <i>slurmctld</i>, must be 
+restarted to initially enable power saving mode. 
+Changes in the configuration parameters (e.g. <i>SuspendTime</i>)
+will take effect after modifying the <i>slurm.conf</i> configuration
+file and executing "<i>scontrol reconfig</i>".
+The following configuration parameters are available:
 <ul>
 
 <li><b>SuspendTime</b>:
-Nodes becomes elligible for power saving mode after being idle 
+Nodes becomes eligible for power saving mode after being idle 
 for this number of seconds. 
+The configured value should exceed the time to suspend and resume a node.
 A negative number disables power saving mode.
 The default value is -1 (disabled).</li>
 
@@ -41,11 +46,11 @@ The default value is 60.
 Use this to prevent rapid drops in power requirements.</li>
 
 <li><b>ResumeRate</b>:
-Maximum number of nodes to be placed into power saving mode 
+Maximum number of nodes to be removed from power saving mode 
 per minute. 
 A value of zero results in no limits being imposed.
-The default value is 60.
-Use this to prevent rapid increasses in power requirements.</li>
+The default value is 300.
+Use this to prevent rapid increases in power requirements.</li>
 
 <li><b>SuspendProgram</b>:
 Program to be executed to place nodes into power saving mode.
@@ -61,7 +66,31 @@ The program executes as <i>SlurmUser</i> (as configured in
 <i>slurm.conf</i>). 
 The argument to the program will be the names of nodes to 
 be removed from power savings mode (using SLURM's hostlist 
-expression format).</li>
+expression format).
+This program may use the <i>scontrol show node</i> command
+to insure that a node has booted and the <i>slurmd</i> 
+daemon started. 
+If the <i>slurmd</i> daemon fails to respond within the
+configured <b>SlurmdTimeout</b> value, the node will be 
+placed in a DOWN state and the job requesting the node
+will be requeued.
+For reasons of reliability, <b>ResumeProgram</b> may execute 
+more than once for a node when the <b>slurmctld</b> daemon 
+crashes and is restarted.</li>
+
+<li><b>SuspendTimeout</b>:
+Maximum time permitted (in second) between when a node suspend request 
+is issued and when the node shutdown is complete. 
+At that time the node must ready for a resume request to be issued 
+as needed for new workload.  
+The default value is 30 seconds.</li>
+
+<li><b>ResumeTimeout</b>:
+Maximum time permitted (in second) between when a node resume request 
+is issued and when the node is actually available for use. 
+Nodes which fail to respond in this time frame may be marked DOWN and
+the jobs scheduled on the node requeued.
+The default value is 60 seconds.</li>
 
 <li><b>SuspendExcNodes</b>:
 List of nodes to never place in power saving mode. 
@@ -74,35 +103,57 @@ Multiple partitions may be specified using a comma separator.
 By default, no nodes are excluded.</li>
 </ul></p>
 
-<p>While <i>SuspendProgram</i> and <i>ResumeProgram</i> execute as 
-<i>SlurmUser</i>. The program can take advantage of this to execute 
-programs directly on the nodes as user <i>root</i> through the 
-SLURM infrastructure.
-Example scripts are shown below:
+<p>Note that <i>SuspendProgram</i> and <i>ResumeProgram</i> execute as 
+<i>SlurmUser</i> on the node where the <i>slurmctld</i> daemon runs
+(primary and backup server nodes). 
+Use of <i>sudo</i> may be required for <i>SlurmUser</i>to power down 
+and restart nodes.
+If you need to convert SLURM's hostlist expression into individual node
+names, the <i>scontrol show hostnames</i> command may prove useful.
+The commands used to boot or shut down nodes will depend upon your
+cluster management tools.</p>
+
+<p>Note that <i>SuspendProgram</i> and <i>ResumeProgram</i> are not 
+subject to any time limits.
+They should perform the required action, ideally verify the action
+(e.g. node boot and start the <i>slurmd</i> daemon, thus the node is
+no longer non-responsive to <i>slurmctld</i>) and terminate. 
+Long running programs will be logged by <i>slurmctld</i>, but not
+aborted.</p>
+
 <pre>
 #!/bin/bash
-# Example SuspendProgram for cluster where every node has two CPUs
-srun --uid=0 --no-allocate --nodelist=$1 echo powersave >/sys/devices/system/cpu0/cpufreq
-srun --uid=0 --no-allocate --nodelist=$1 echo powersave >/sys/devices/system/cpu1/cpufreq
+# Example SuspendProgram
+hosts=`scontrol show hostnames $1`
+for host in "$hosts"
+do
+   sudo node_shutdown $host
+done
 
 #!/bin/bash
-# Example ResumeProgram for cluster where every node has two CPUs
-srun --uid=0 --no-allocate --nodelist=$1 echo performance >/sys/devices/system/cpu0/cpufreq
-srun --uid=0 --no-allocate --nodelist=$1 echo performance >/sys/devices/system/cpu1/cpufreq
+# Example ResumeProgram
+hosts=`scontrol show hostnames $1`
+for host in "$hosts"
+do
+   sudo node_startup $host
+done
 </pre>
 
-<p>The srun --no-allocate option permits SlurmUser and user root only to spawn 
-tasks directly on the compute nodes without actually creating a SLURM job. 
-No other users have this permission (their requests will generate an invalid 
-credential error message and the event will be logged).
-The srun --uid option permits SlurmUser and user root only to execute a job 
-as some other user. 
-Then SlurmUser uses the srun --uid option, the srun command will try to set 
-its user ID to that value in order to fully operate as the specified user.
-This will fail and srun will report an error to that effect. 
-This does not prevent the spawned programs from running as user root.
-No other users have this permission (their requests will generate an invalid 
-user id error message and the event will be logged).</p>
+<p>Subject to the various rates, limits and exclusions, the power save 
+code follows this logic:
+<ol>
+<li>Identifiy nodes which have been idle for at least <b>SuspendTime</b>.</li>
+<li>Execute <b>SuspendProgram</b> with an argument of the idle node names.</li>
+<li>Identify the nodes which are in power save mode (a flag in the node's 
+state field), but have been allocated to jobs.</li>
+<li>Execute <b>ResumeProgram</b> with an argument of the allocated node names.</li>
+<li>Once the <i>slurmd</i> responds, initiate the job and/or job steps 
+allocated to it.</li>
+<li>If the <i>slurmd</i> fails to respond within the value configured for 
+<b>SlurmdTimeout</b>, the node will be marked DOWN and the job requeued
+if possible.</li>
+<li>Repeat indefinitely.</li>
+</ol></p>
 
 <p>The slurmctld daemon will periodically (every 10 minutes) log how many 
 nodes are in power save mode using messages of this sort:
@@ -113,10 +164,61 @@ nodes are in power save mode using messages of this sort:
 ...
 [May 02 15:51:28] Power save mode 22 nodes
 </pre>
-<p>Using these logs you can easily see the effect of SLURM's power saving support.
-You can also configure SLURM without SuspendProgram or ResumeProgram values
-to assess the potential impact of power saving mode before enabling it.</p>
 
-<p style="text-align:center;">Last modified 14 May 2007</p>
+<p>Using these logs you can easily see the effect of SLURM's power saving 
+support.
+You can also configure SLURM with programs that perform no aciton as <b>SuspendProgram</b> and <b>ResumeProgram</b> to assess the potential 
+impact of power saving mode before enabling it.</p>
+
+<h2>Use of Allocations</h2>
+
+<p>A resource allocation request will be granted as soon as resources
+are selected for use, possibly before the nodes are all available 
+for use.
+The launching of job steps will be delayed until the required nodes 
+have been restored to service (it prints a warning about waiting for
+nodes to become available and periodically retries until they are 
+available).</p>
+
+<p>In the case of an <i>sbatch</i> command, the batch program will start
+when node zero of the allocation is ready for use and pre-processing can
+be performed as needed before using <i>srun</i> to launch job steps.
+The operation of <i>salloc</i> and <i>srun</i> follow a similar pattern 
+of getting an job allocation at one time, but possibly being unable to 
+launch job steps until later. 
+If <i>ssh</i> or some other tools is used by <i>salloc</i> it may be
+desirable to execute "<i>srun /bin/true</i>" or some other command
+first to insure that all nodes are booted and ready for use. 
+We plan to add a job and node state of <i>CONFIGURING</i> in SLURM
+version 2.1, which could be used to prevent salloc from executing
+any processes (including <i>ssh</i>) until all of the nodes are 
+ready for use.</p>
+
+<h2>Fault Tolerance</h2>
+
+<p>If the <i>slurmctld</i> daemon is terminated gracefully, it will
+wait up to <b>SuspendTimeout</b> or <b>ResumeTimeout</b> (whichever
+is larger) for any spawned <b>SuspendProgram</b> or 
+<b>ResumeProgram</b> to terminate before the daemon terminates. 
+If the spawned program does not terminate within that time period, 
+the event will be logged and <i>slurmctld</i> will exit in order to 
+permit another <i>slurmctld</i> daemon to be initiated.
+Syncrhonization problems could also occur when the <i>slurmctld</i> 
+daemon crashes (a rare event) and is restarted. </p>
+
+<p>In either event, the newly initiated <i>slurmctld</i> daemon (or 
+the backup server) will recover saved node state information that 
+may not accurately describe the actual node state.
+In the case of a failed <b>SuspendProgram</b>, the negative impact is 
+limited to increased power consumption, so no special action is 
+currently taken to execute <b>SuspendProgram</b> multiple times in 
+order to insure the node is in a reduced power mode.
+The case of a failed <b>ResumeProgram</b> is more serious in that the 
+node could be placed into a DOWN state and/or jobs could fail.
+In order to minimize this risk, when the <i>slurmctld</i> daemon is 
+started and node which should be allocated to a job fails to respond, 
+the <b>ResumeProgram</b> will be executed (possibly for a second time).</p>
+
+<p style="text-align:center;">Last modified 2 June 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/preempt.shtml b/doc/html/preempt.shtml
index 2f9bd34df495a63c656d520d171b288a56bc5857..ed7dc289d36bc58d74e7aa84827c4181b415fc98 100644
--- a/doc/html/preempt.shtml
+++ b/doc/html/preempt.shtml
@@ -1,248 +1,297 @@
-<!--#include virtual="header.txt"-->
-
-<H1>Preemption</H1>
-
-<P>
-SLURM version 1.2 and earlier supported dedication of resources
-to jobs based on a simple "first come, first served" policy with backfill.
-Beginning in SLURM version 1.3, priority-based <I>preemption</I> is supported. 
-Preemption is the act of suspending one or more "low-priority" jobs to let a
-"high-priority" job run uninterrupted until it completes. Preemption provides
-the ability to prioritize the workload on a cluster.
-</P>
-<P>
-The SLURM version 1.3.1 <I>sched/gang</I> plugin supports preemption. 
-When configured, 
-the plugin monitors each of the partitions in SLURM. If a new job in a
-high-priority partition has been allocated to resources that have already been
-allocated to one or more existing jobs from lower priority partitions, the
-plugin respects the partition priority and suspends the low-priority job(s). The
-low-priority job(s) remain suspended until the job from the high-priority
-partition completes. Once the high-priority job completes then the low-priority
-job(s) are resumed.
-</P>
-
-<H2>Configuration</H2>
-<P>
-There are several important configuration parameters relating to preemption:
-</P>
-<UL>
-<LI>
-<B>SelectType</B>: The SLURM <I>sched/gang</I> plugin supports nodes 
-allocated by the <I>select/linear</I> plugin and socket/core/CPU resources 
-allocated by the <I>select/cons_res</I> plugin. 
-See <A HREF="#future_work">Future Work</A> below for more
-information on "preemption with consumable resources".
-</LI>
-<LI>
-<B>SelectTypeParameter</B>: Since resources will be getting overallocated 
-with jobs (the preempted job will remain in memory), the resource selection
-plugin should be configured to track the amount of memory used by each job to
-ensure that memory page swapping does not occur. When <I>select/linear</I> is
-chosen, we recommend setting <I>SelectTypeParameter=CR_Memory</I>. When
-<I>select/cons_res</I> is chosen, we recommend including Memory as a resource
-(ex. <I>SelectTypeParameter=CR_Core_Memory</I>).
-</LI>
-<LI>
-<B>DefMemPerCPU</B>: Since job requests may not explicitly specify 
-a memory requirement, we also recommend configuring 
-<I>DefMemPerCPU</I> (default memory per allocated CPU) or 
-<I>DefMemPerNode</I> (default memory per allocated node). 
-It may also be desirable to configure 
-<I>MaxMemPerCPU</I> (maximum memory per allocated CPU) or 
+<!--#include virtual="header.txt"-->
+
+<H1>Preemption</H1>
+
+<P>
+SLURM version 1.2 and earlier supported dedication of resources
+to jobs based on a simple "first come, first served" policy with backfill.
+Beginning in SLURM version 1.3, priority partitions and priority-based
+<I>preemption</I> are supported. Preemption is the act of suspending one or more
+"low-priority" jobs to let a "high-priority" job run uninterrupted until it
+completes. Preemption provides the ability to prioritize the workload on a
+cluster.
+</P>
+<P>
+The SLURM version 1.3.1 <I>sched/gang</I> plugin supports preemption. 
+When configured, 
+the plugin monitors each of the partitions in SLURM. If a new job in a
+high-priority partition has been allocated to resources that have already been
+allocated to one or more existing jobs from lower priority partitions, the
+plugin respects the partition priority and suspends the low-priority job(s). The
+low-priority job(s) remain suspended until the job from the high-priority
+partition completes. Once the high-priority job completes then the low-priority
+job(s) are resumed.
+</P>
+
+<H2>Configuration</H2>
+<P>
+There are several important configuration parameters relating to preemption:
+</P>
+<UL>
+<LI>
+<B>SelectType</B>: The SLURM <I>sched/gang</I> plugin supports nodes 
+allocated by the <I>select/linear</I> plugin and socket/core/CPU resources 
+allocated by the <I>select/cons_res</I> plugin.
+</LI>
+<LI>
+<B>SelectTypeParameter</B>: Since resources will be getting overallocated 
+with jobs (suspended jobs remain in memory), the resource selection
+plugin should be configured to track the amount of memory used by each job to
+ensure that memory page swapping does not occur. When <I>select/linear</I> is
+chosen, we recommend setting <I>SelectTypeParameter=CR_Memory</I>. When
+<I>select/cons_res</I> is chosen, we recommend including Memory as a resource
+(ex. <I>SelectTypeParameter=CR_Core_Memory</I>).
+</LI>
+<LI>
+<B>DefMemPerCPU</B>: Since job requests may not explicitly specify 
+a memory requirement, we also recommend configuring 
+<I>DefMemPerCPU</I> (default memory per allocated CPU) or 
+<I>DefMemPerNode</I> (default memory per allocated node). 
+It may also be desirable to configure 
+<I>MaxMemPerCPU</I> (maximum memory per allocated CPU) or 
 <I>MaxMemPerNode</I> (maximum memory per allocated node) in <I>slurm.conf</I>.
 Users can use the <I>--mem</I> or <I>--mem-per-cpu</I> option
-at job submission time to specify their memory requirements.
-</LI>
-<LI>
-<B>JobAcctGatherType and JobAcctGatherFrequency</B>:
-If you wish to enforce memory limits, accounting must be enabled
-using the <I>JobAcctGatherType</I> and <I>JobAcctGatherFrequency</I>
-parameters. If accounting is enabled and a job exceeds its configured
-memory limits, it will be canceled in order to prevent it from 
-adversely effecting other jobs sharing the same resources.
-</LI>
-<LI>
-<B>SchedulerType</B>: Configure the <I>sched/gang</I> plugin by setting
-<I>SchedulerType=sched/gang</I> in <I>slurm.conf</I>.
-</LI>
-<LI>
-<B>Priority</B>: Configure the partition's <I>Priority</I> setting relative to
-other partitions to control the preemptive behavior. If two jobs from two
-different partitions are allocated to the same resources, the job in the
-partition with the greater <I>Priority</I> value will preempt the job in the
-partition with the lesser <I>Priority</I> value. If the <I>Priority</I> values
-of the two partitions are equal then no preemption will occur, and the two jobs
-will run simultaneously on the same resources. The default <I>Priority</I> value
-is 1.
-</LI>
-<LI>
-<B>Shared</B>: Configure the partitions <I>Shared</I> setting to 
-<I>FORCE</I> for all partitions that will preempt or that will be preempted. The
-<I>FORCE</I> setting is required to enable the select plugins to overallocate
-resources. Jobs submitted to a partition that does not share it's resources will
-not preempt other jobs, nor will those jobs be preempted. Instead those jobs
-will wait until the resources are free for non-shared use by each job.
-<BR>
-The <I>FORCE</I> option now supports an additional parameter that controls 
-how many jobs can share a resource within the partition (FORCE[:max_share]). By
-default the max_share value is 4. To disable timeslicing within a partition but
-enable preemption with other partitions, set <I>Shared=FORCE:1</I>.
-</LI>
-<LI>
-<B>SchedulerTimeSlice</B>: The default timeslice interval is 30 seconds. 
-To change this duration, set <I>SchedulerTimeSlice</I> to the desired interval 
-(in seconds) in <I>slurm.conf</I>. For example, to set the timeslice interval 
-to one minute, set <I>SchedulerTimeSlice=60</I>. Short values can increase 
-the overhead of gang scheduling. This parameter is only relevant if timeslicing
-within a partition will be configured. Preemption and timeslicing can occur at
-the same time.
-</LI>
-</UL>
-<P>
-To enable preemption after making the configuration changes described above,
-restart SLURM if it is already running. Any change to the plugin settings in
-SLURM requires a full restart of the daemons. If you just change the partition
-<I>Priority</I> or <I>Shared</I> setting, this can be updated with

-<I>scontrol reconfig</I>.
-</P>
-
-<H2>Preemption Design and Operation</H2>

-
-<P>
-When enabled, the <I>sched/gang</I> plugin keeps track of the resources
-allocated to all jobs. For each partition an "active bitmap" is maintained that
-tracks all concurrently running jobs in the SLURM cluster. Each partition also
-maintains a job list for that partition, and a list of "shadow" jobs. These
-"shadow" jobs are running jobs from higher priority partitions that "cast
-shadows" on the active bitmaps of the lower priority partitions. 
-</P>
-<P>
-Each time a new job is allocated to resources in a partition and begins running,
-the <I>sched/gang</I> plugin adds a "shadow" of this job to all lower priority
-partitions. The active bitmap of these lower priority partitions are then
-rebuilt, with the shadow jobs added first. Any existing jobs that were replaced
-by one or more "shadow" jobs are suspended (preempted). Conversely, when a 
-high-priority running job completes, it's "shadow" goes away and the active 
-bitmaps of the lower priority partitions are rebuilt to see if any suspended 
-jobs can be resumed.
-</P>
-<P>
-The gang scheduler plugin is primarily designed to be <I>reactive</I> to the
-resource allocation decisions made by the Selector plugins. This is why
-<I>Shared=FORCE</I> is required in each partition. The <I>Shared=FORCE</I>
-setting enables the <I>select/linear</I> and <I>select/cons_res</I> plugins to
-overallocate the resources between partitions. This keeps all of the node
-placement logic in the <I>select</I> plugins, and leaves the gang scheduler in
-charge of controlling which jobs should run on the overallocated resources. 
-</P>
-<P>
-The <I>sched/gang</I> plugin suspends jobs via the same internal functions that
-support <I>scontrol suspend</I> and <I>scontrol resume</I>. A good way to
-observe the act of preemption is by running <I>watch squeue</I> in a terminal
-window.
-</P>
-
-<H2>A Simple Example</H2>
-
-<P>
-The following example is configured with <I>select/linear</I>,
-<I>sched/gang</I>, and <I>Shared=FORCE:1</I>. This example takes place on a
-cluster of 5 nodes:
-</P>
-<PRE>
-[user@n16 ~]$ <B>sinfo</B>
-PARTITION AVAIL  TIMELIMIT NODES  STATE NODELIST
-active*      up   infinite     5   idle n[12-16]
-hipri        up   infinite     5   idle n[12-16]
-</PRE>
-<P>
-Here are the Partition settings:
-</P>
-<PRE>
-[user@n16 ~]$ <B>grep PartitionName /shared/slurm/slurm.conf</B>
-PartitionName=active Priority=1 Default=YES Shared=FORCE:1 Nodes=n[12-16]
-PartitionName=hipri  Priority=2             Shared=FORCE:1 Nodes=n[12-16]
-</PRE>
-<P>
-The <I>runit.pl</I> script launches a simple load-generating app that runs
-for the given number of seconds. Submit 5 single-node <I>runit.pl</I> jobs to
-run on all nodes:
-</P>
-<PRE>
-[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
-sbatch: Submitted batch job 485
-[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
-sbatch: Submitted batch job 486
-[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
-sbatch: Submitted batch job 487
-[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
-sbatch: Submitted batch job 488
-[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
-sbatch: Submitted batch job 489
-[user@n16 ~]$ <B>squeue</B>
-JOBID PARTITION     NAME   USER  ST   TIME  NODES NODELIST
-  485    active runit.pl   user   R   0:06      1 n12
-  486    active runit.pl   user   R   0:06      1 n13
-  487    active runit.pl   user   R   0:05      1 n14
-  488    active runit.pl   user   R   0:05      1 n15
-  489    active runit.pl   user   R   0:04      1 n16
-</PRE>
-<P>
-Now submit a short-running 3-node job to the <I>hipri</I> partition:
-</P>
-<PRE>
-[user@n16 ~]$ <B>sbatch -N3 -p hipri ./runit.pl 30</B>
-sbatch: Submitted batch job 490
-[user@n16 ~]$ <B>squeue</B>
-JOBID PARTITION     NAME   USER  ST   TIME  NODES NODELIST
-  488    active runit.pl   user   R   0:29      1 n15
-  489    active runit.pl   user   R   0:28      1 n16
-  485    active runit.pl   user   S   0:27      1 n12
-  486    active runit.pl   user   S   0:27      1 n13
-  487    active runit.pl   user   S   0:26      1 n14
-  490     hipri runit.pl   user   R   0:03      3 n[12-14]
-</PRE>
-<P>
-Job 490 in the <I>hipri</I> partition preempted jobs 485, 486, and 487 from
-the <I>active</I> partition. Jobs 488 and 489 in the <I>active</I> partition
-remained running.
-</P>
-<P>
-This state persisted until job 490 completed, at which point the preempted jobs
-were resumed:
-</P>
-<PRE>
-[user@n16 ~]$ <B>squeue</B>
-JOBID PARTITION     NAME   USER  ST   TIME  NODES NODELIST
-  485    active runit.pl   user   R   0:30      1 n12
-  486    active runit.pl   user   R   0:30      1 n13
-  487    active runit.pl   user   R   0:29      1 n14
-  488    active runit.pl   user   R   0:59      1 n15
-  489    active runit.pl   user   R   0:58      1 n16
-</PRE>
-
-
-<H2><A NAME="future_work">Future Work</A></H2>
-
-<P>
-<B>Preemption with consumable resources</B>: This implementation of preemption
-relies on intelligent job placement by the <I>select</I> plugins. As of SLURM
-1.3.1 the consumable resource <I>select/cons_res</I> plugin still needs
-additional enhancements to the job placement algorithm before it's preemption
-support can be considered "competent". The mechanics of preemption work, but the
-placement of preemptive jobs relative to any low-priority jobs may not be
-optimal. The work to improve the placement of preemptive jobs relative to
-existing jobs is currently in-progress. 
-</P>
-<P>
-<B>Requeue a preempted job</B>: In some situations is may be desirable to
-requeue a low-priority job rather than suspend it. Suspending a job leaves the
-job in memory. Requeuing a job involves terminating the job and resubmitting it
-again. This will be investigated at some point in the future. Requeuing a
-preempted job may make the most sense with <I>Shared=NO</I> partitions.
-</P>
-
-<p style="text-align:center;">Last modified 7 July 2008</p>
-
-<!--#include virtual="footer.txt"-->
+at job submission time to specify their memory requirements.
+</LI>
+<LI>
+<B>JobAcctGatherType and JobAcctGatherFrequency</B>: The "maximum data segment
+size" and "maximum virtual memory size" system limits will be configured for
+each job to ensure that the job does not exceed its requested amount of memory.
+If you wish to enable additional enforcement of memory limits, configure job
+accounting with the <I>JobAcctGatherType</I> and <I>JobAcctGatherFrequency</I>
+parameters. When accounting is enabled and a job exceeds its configured memory
+limits, it will be canceled in order to prevent it from adversely effecting
+other jobs sharing the same resources.
+</LI>
+<LI>
+<B>SchedulerType</B>: Configure the <I>sched/gang</I> plugin by setting
+<I>SchedulerType=sched/gang</I> in <I>slurm.conf</I>.
+</LI>
+<LI>
+<B>Priority</B>: Configure the partition's <I>Priority</I> setting relative to
+other partitions to control the preemptive behavior. If two jobs from two
+different partitions are allocated to the same resources, the job in the
+partition with the greater <I>Priority</I> value will preempt the job in the
+partition with the lesser <I>Priority</I> value. If the <I>Priority</I> values
+of the two partitions are equal then no preemption will occur. The default
+<I>Priority</I> value is 1.
+</LI>
+<LI>
+<B>SchedulerTimeSlice</B>: The default timeslice interval is 30 seconds. 
+To change this duration, set <I>SchedulerTimeSlice</I> to the desired interval 
+(in seconds) in <I>slurm.conf</I>. For example, to set the timeslice interval 
+to one minute, set <I>SchedulerTimeSlice=60</I>. Short values can increase 
+the overhead of gang scheduling. This parameter is only relevant if timeslicing
+within a partition will be configured. Preemption and timeslicing can occur at
+the same time.
+</LI>
+</UL>
+<P>
+To enable preemption after making the configuration changes described above,
+restart SLURM if it is already running. Any change to the plugin settings in
+SLURM requires a full restart of the daemons. If you just change the partition
+<I>Priority</I> or <I>Shared</I> setting, this can be updated with
+<I>scontrol reconfig</I>.
+</P>
+
+<H2>Preemption Design and Operation</H2>
+
+<P>
+When enabled, the <I>sched/gang</I> plugin keeps track of the resources
+allocated to all jobs. For each partition an "active bitmap" is maintained that
+tracks all concurrently running jobs in the SLURM cluster. Each partition also
+maintains a job list for that partition, and a list of "shadow" jobs. The
+"shadow" jobs are job allocations from higher priority partitions that "cast
+shadows" on the active bitmaps of the lower priority partitions. Jobs in lower
+priority partitions that are caught in these "shadows" will be suspended.
+</P>
+<P>
+Each time a new job is allocated to resources in a partition and begins running,
+the <I>sched/gang</I> plugin adds a "shadow" of this job to all lower priority
+partitions. The active bitmap of these lower priority partitions are then
+rebuilt, with the shadow jobs added first. Any existing jobs that were replaced
+by one or more "shadow" jobs are suspended (preempted). Conversely, when a 
+high-priority running job completes, it's "shadow" goes away and the active 
+bitmaps of the lower priority partitions are rebuilt to see if any suspended 
+jobs can be resumed.
+</P>
+<P>
+The gang scheduler plugin is designed to be <I>reactive</I> to the resource
+allocation decisions made by the "select" plugins. The "select" plugins have
+been enhanced to recognize when "sched/gang" has been configured, and to factor
+in the priority of each partition when selecting resources for a job. When
+choosing resources for each job, the selector avoids resources that are in use
+by other jobs (unless sharing has been configured, in which case it does some
+load-balancing). However, when "sched/gang" is enabled, the select plugins may
+choose resources that are already in use by jobs from partitions with a lower
+priority setting, even when sharing is disabled in those partitions.
+</P>
+<P>
+This leaves the gang scheduler in charge of controlling which jobs should run on
+the overallocated resources. The <I>sched/gang</I> plugin suspends jobs via the
+same internal functions that support <I>scontrol suspend</I> and <I>scontrol
+resume</I>. A good way to observe the act of preemption is by running <I>watch
+squeue</I> in a terminal window.
+</P>
+<P>
+The <I>sched/gang</I> plugin suspends jobs via the same internal functions that
+support <I>scontrol suspend</I> and <I>scontrol resume</I>. A good way to
+observe the act of preemption is by running <I>watch squeue</I> in a terminal
+window.
+</P>
+
+<H2>A Simple Example</H2>
+
+<P>
+The following example is configured with <I>select/linear</I> and
+<I>sched/gang</I>. This example takes place on a cluster of 5 nodes:
+</P>
+<PRE>
+[user@n16 ~]$ <B>sinfo</B>
+PARTITION AVAIL  TIMELIMIT NODES  STATE NODELIST
+active*      up   infinite     5   idle n[12-16]
+hipri        up   infinite     5   idle n[12-16]
+</PRE>
+<P>
+Here are the Partition settings:
+</P>
+<PRE>
+[user@n16 ~]$ <B>grep PartitionName /shared/slurm/slurm.conf</B>
+PartitionName=active Priority=1 Default=YES Shared=NO Nodes=n[12-16]
+PartitionName=hipri  Priority=2             Shared=NO Nodes=n[12-16]
+</PRE>
+<P>
+The <I>runit.pl</I> script launches a simple load-generating app that runs
+for the given number of seconds. Submit 5 single-node <I>runit.pl</I> jobs to
+run on all nodes:
+</P>
+<PRE>
+[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
+sbatch: Submitted batch job 485
+[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
+sbatch: Submitted batch job 486
+[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
+sbatch: Submitted batch job 487
+[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
+sbatch: Submitted batch job 488
+[user@n16 ~]$ <B>sbatch -N1 ./runit.pl 300</B>
+sbatch: Submitted batch job 489
+[user@n16 ~]$ <B>squeue -Si</B>
+JOBID PARTITION     NAME   USER  ST   TIME  NODES NODELIST
+  485    active runit.pl   user   R   0:06      1 n12
+  486    active runit.pl   user   R   0:06      1 n13
+  487    active runit.pl   user   R   0:05      1 n14
+  488    active runit.pl   user   R   0:05      1 n15
+  489    active runit.pl   user   R   0:04      1 n16
+</PRE>
+<P>
+Now submit a short-running 3-node job to the <I>hipri</I> partition:
+</P>
+<PRE>
+[user@n16 ~]$ <B>sbatch -N3 -p hipri ./runit.pl 30</B>
+sbatch: Submitted batch job 490
+[user@n16 ~]$ <B>squeue -Si</B>
+JOBID PARTITION     NAME   USER  ST   TIME  NODES NODELIST
+  485    active runit.pl   user   S   0:27      1 n12
+  486    active runit.pl   user   S   0:27      1 n13
+  487    active runit.pl   user   S   0:26      1 n14
+  488    active runit.pl   user   R   0:29      1 n15
+  489    active runit.pl   user   R   0:28      1 n16
+  490     hipri runit.pl   user   R   0:03      3 n[12-14]
+</PRE>
+<P>
+Job 490 in the <I>hipri</I> partition preempted jobs 485, 486, and 487 from
+the <I>active</I> partition. Jobs 488 and 489 in the <I>active</I> partition
+remained running.
+</P>
+<P>
+This state persisted until job 490 completed, at which point the preempted jobs
+were resumed:
+</P>
+<PRE>
+[user@n16 ~]$ <B>squeue</B>
+JOBID PARTITION     NAME   USER  ST   TIME  NODES NODELIST
+  485    active runit.pl   user   R   0:30      1 n12
+  486    active runit.pl   user   R   0:30      1 n13
+  487    active runit.pl   user   R   0:29      1 n14
+  488    active runit.pl   user   R   0:59      1 n15
+  489    active runit.pl   user   R   0:58      1 n16
+</PRE>
+
+
+<H2><A NAME="future_work">Future Ideas</A></H2>
+
+<P>
+<B>More intelligence in the select plugins</B>: This implementation of
+preemption relies on intelligent job placement by the <I>select</I> plugins. In
+SLURM 1.3.1 the <I>select/linear</I> plugin has a decent preemptive placement
+algorithm, but the consumable resource <I>select/cons_res</I> plugin had no
+preemptive placement support. In SLURM 1.4 preemptive placement support was
+added to the <I>select/cons_res</I> plugin, but there is still room for
+improvement.
+</P><P>
+Take the following example:
+</P>
+<PRE>
+[user@n8 ~]$ <B>sinfo</B>
+PARTITION AVAIL  TIMELIMIT NODES  STATE NODELIST
+active*      up   infinite     5   idle n[1-5]
+hipri        up   infinite     5   idle n[1-5]
+[user@n8 ~]$ <B>sbatch -N1 -n2 ./sleepme 60</B>
+sbatch: Submitted batch job 17
+[user@n8 ~]$ <B>sbatch -N1 -n2 ./sleepme 60</B>
+sbatch: Submitted batch job 18
+[user@n8 ~]$ <B>sbatch -N1 -n2 ./sleepme 60</B>
+sbatch: Submitted batch job 19
+[user@n8 ~]$ <B>squeue</B>
+  JOBID PARTITION     NAME     USER  ST       TIME  NODES NODELIST(REASON)
+     17    active  sleepme  cholmes   R       0:03      1 n1
+     18    active  sleepme  cholmes   R       0:03      1 n2
+     19    active  sleepme  cholmes   R       0:02      1 n3
+[user@n8 ~]$ <B>sbatch -N3 -n6 -p hipri ./sleepme 20</B>
+sbatch: Submitted batch job 20
+[user@n8 ~]$ <B>squeue -Si</B>
+  JOBID PARTITION     NAME     USER  ST       TIME  NODES NODELIST(REASON)
+     17    active  sleepme  cholmes   S       0:16      1 n1
+     18    active  sleepme  cholmes   S       0:16      1 n2
+     19    active  sleepme  cholmes   S       0:15      1 n3
+     20     hipri  sleepme  cholmes   R       0:03      3 n[1-3]
+[user@n8 ~]$ <B>sinfo</B>
+PARTITION AVAIL  TIMELIMIT NODES  STATE NODELIST
+active*      up   infinite     3  alloc n[1-3]
+active*      up   infinite     2   idle n[4-5]
+hipri        up   infinite     3  alloc n[1-3]
+hipri        up   infinite     2   idle n[4-5]
+</PRE>
+<P>
+It would be more ideal if the "hipri" job were placed on nodes n[3-5], which
+would allow jobs 17 and 18 to continue running. However, a new "intelligent"
+algorithm would have to include factors such as job size and required nodes in
+order to support ideal placements such as this, which can quickly complicate
+the design. Any and all help is welcome here!
+</P>
+<P>
+<B>Preemptive backfill</B>: the current backfill scheduler plugin
+("sched/backfill") is a nice way to make efficient use of otherwise idle
+resources. But SLURM only supports one scheduler plugin at a time. Fortunately,
+given the design of the new "sched/gang" plugin, there is no direct overlap
+between the backfill functionality and the gang-scheduling functionality. Thus,
+it's possible that these two plugins could technically be merged into a new
+scheduler plugin that supported preemption <U>and</U> backfill. <B>NOTE:</B>
+this is only an idea based on a code review so there would likely need to be
+some additional development, and plenty of testing!
+</P><P>
+
+</P>
+<P>
+<B>Requeue a preempted job</B>: In some situations is may be desirable to
+requeue a low-priority job rather than suspend it. Suspending a job leaves the
+job in memory. Requeuing a job involves terminating the job and resubmitting it
+again. The "sched/gang" plugin would need to be modified to recognize when a job
+is able to be requeued and when it can requeue a job (for preemption only, not
+for timeslicing!), and perform the requeue request.
+</P>
+
+<p style="text-align:center;">Last modified 5 December 2008</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/priority_multifactor.shtml b/doc/html/priority_multifactor.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..c27aed7f84dad29596e475928208273829b8eaa8
--- /dev/null
+++ b/doc/html/priority_multifactor.shtml
@@ -0,0 +1,542 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Multifactor Priority Plugin</h1>
+
+<b>Note:</b> This document describes features added to SLURM version 2.0.
+
+<h2>Contents</h2>
+<UL>
+<LI> <a href=#intro>Introduction</a>
+<LI> <a href=#mfjppintro>Multi-factor Job Priority Plugin</a>
+<LI> <a href=#general>Job Priority Factors In General</a>
+<LI> <a href=#age>Age Factor</a>
+<LI> <a href=#jobsize>Job Size Factor</a>
+<LI> <a href=#partition>Partition Factor</a>
+<LI> <a href=#qos>Quality of Service (QOS) Factor</a>
+<LI> <a href=#fairshare>Fair-share Factor</a>
+<LI> <a href=#sprio>The <i>sprio</i> utility</a>
+<LI> <a href=#config>Configuration</a>
+<LI> <a href=#configexample>Configuration Example</a>
+</UL>
+
+<!-------------------------------------------------------------------------->
+<a name=intro>
+<h2>Introduction</h2></a>
+
+<P> By default, SLURM assigns job priority on a First In, First Out (FIFO) basis.  FIFO scheduling should be configured when SLURM is controlled by an external scheduler.</P>
+
+<P> The <i>PriorityType</i> parameter in the slurm.conf file selects the priority plugin.  The default value for this variable is "priority/basic" which enables simple FIFO scheduling. (See <a href="#config">Configuration</a> below)</P>
+
+<P> SLURM version 2.0 includes the Multi-factor Job Priority plugin.  This plugin provides a very versatile facility for ordering the queue of jobs waiting to be scheduled.</P>
+
+<!-------------------------------------------------------------------------->
+<a name=mfjppintro>
+<h2>Multi-factor 'Factors'</h2></a>
+
+<P> There are five factors in the Multi-factor Job Priority plugin that influence job priority:</P>
+
+<DL>
+<DT> Age
+<DD> the length of time a job has been waiting in the queue, eligible to be scheduled
+<DT> Fair-share
+<DD> the difference between the portion of the computing resource that has been promised and the amount of resources that has been consumed
+<DT> Job size
+<DD> the number of nodes a job is allocated
+<DT> Partition
+<DD> a factor associated with each node partition
+<DT> QOS
+<DD> a factor associated with each Quality Of Service (Still under Development)
+</DL>
+
+<P> Additionally, a weight can be assigned to each of the above
+  factors.  This provides the ability to enact a policy that blends a
+  combination of any of the above factors in any portion desired.  For
+  example, a site could configure fair-share to be the dominant factor
+  (say 70%), set the job size and the age factors to each contribute
+  15%, and set the partition and QOS influences to zero.</P>
+
+<!-------------------------------------------------------------------------->
+<a name=general>
+<h2>Job Priority Factors In General</h2></a>
+
+<P> The job's priority at any given time will be a weighted sum of all the factors that have been enabled in the slurm.conf file.  Job priority can be expressed as:</P>
+<PRE>
+Job_priority =
+	(age_weight) * (age_factor) +
+	(fair-share_weight) * (fair-share_factor) +
+	(job_size_weight) * (job_size_factor) +
+	(partition_weight) * (partition_factor) +
+	(QOS_weight) * (QOS_factor)
+</PRE>
+
+<P> All of the factors in this formula are floating point numbers that
+  range from 0.0 to 1.0.  The weights are unsigned, 32 bit integers.
+  The job's priority is an integer that ranges between 0 and
+  4294967295.  The higher the number,  the higher the job will be
+  positioned in the queue, and the sooner the job will be scheduled.
+  A job's priority, and hence its order in the queue, will vary over
+  time.  For example, the longer a job sits in the queue, the higher
+  its priority will grow when the age_weight is non-zero.</P>
+
+<P> <b>IMPORTANT:</b> The weight values should be high enough to get a
+  good set of significant digits since all the factors are floating
+  point numbers from 0.0 to 1.0. For example, one job could have a
+  fair-share factor of .59534 and another job could have a fair-share
+  factor of .50002.  If the fair-share weight is only set to 10, both
+  jobs would have the same fair-share priority. Therefore, set the
+  weights high enough to avoid this scenario, starting around 1000 or
+  so for those factors you want to make predominant.</P>
+
+<!-------------------------------------------------------------------------->
+<a name=age>
+<h2>Age Factor</h2></a>
+
+<P> The age factor represents the length of time a job has been sitting in the queue and eligible to run.  In general, the longer a job waits in the queue, the larger its age factor grows.  However, the age factor for a dependent job will not change while it waits for the job it depends on to complete.  Also, the age factor will not change when scheduling is withheld for a job whose node or time limits exceed the cluster's current limits.</P>
+
+<P> At some configurable length of time (<i>PriorityMaxAge</i>), the age factor will max out to 1.0.</P>
+
+<!-------------------------------------------------------------------------->
+<a name=jobsize>
+<h2>Job Size Factor</h2></a>
+
+<P> The job size factor correlates to the number of nodes the job has requested.  This factor can be configured to favor larger jobs or smaller jobs based on the state of the <i>PriorityFavorSmall</i> boolean in the slurm.conf file.  When <i>PriorityFavorSmall</i> is NO, the larger the job, the greater its job size factor will be.  A job that requests all the nodes on the machine will get a job size factor of 1.0.  When the <i>PriorityFavorSmall</i> Boolean is YES, the single node job will receive the 1.0 job size factor.</P>
+
+<!-------------------------------------------------------------------------->
+<a name=partition>
+<h2>Partition Factor</h2></a>
+
+<P> Each node partition can be assigned a factor from 0.0 to 1.0.  The higher the number, the greater the job priority will be for jobs that are slated to run in this partition.</P>
+
+<!-------------------------------------------------------------------------->
+<a name=qos>
+<h2>Quality of Service (QOS) Factor</h2></a>
+
+<P> Each QOS can be assigned a factor from 0.0 to 1.0.  The higher the
+  number, the greater the job priority will be for jobs that request
+  this QOS. (Still under Development)</P>
+
+<!-------------------------------------------------------------------------->
+<a name=fairshare>
+<h2>Fair-share Factor</h2></a>
+
+<b>Note:</b> Computing the fair-share factor requires the installation
+and operation of the <a href="accounting.html">SLURM Accounting
+Database</a> to provide the assigned shares and the consumed,
+computing resources described below.
+
+<P> The fair-share component to a job's priority influences the order in which a user's queued jobs are scheduled to run based on the portion of the computing resources they have been allocated and the resources their jobs have already consumed.  The fair-share factor does not involve a fixed allotment, whereby a user's access to a machine is cut off once that allotment is reached.</P>
+
+<P> Instead, the fair-share factor serves to prioritize queued jobs such that those jobs charging accounts that are under-serviced are scheduled first, while jobs charging accounts that are over-serviced are scheduled when the machine would otherwise go idle.</P>
+
+<P> SLURM's fair-share factor is a floating point number between 0.0 and 1.0 that reflects the shares of a computing resource that a user has been allocated and the amount of computing resources the user's jobs have consumed.  The higher the value, the higher is the placement in the queue of jobs waiting to be scheduled.</P>
+
+<P> The computing resource is currently defined to be computing cycles delivered by a machine in the units of processor*seconds.  Future versions of the fair-share factor may additionally include a memory integral component.</P>
+
+<h3> Normalized Shares</h3>
+
+<P> The fair-share hierarchy represents the portions of the computing resource that have been allocated to multiple projects.  These allocations are assigned to an account.  There can be multiple levels of allocations made as allocations of a given account are further divided to sub-accounts:</P>
+
+<div class="figure">
+  <img src=AllocationPies.gif width=400 ><BR>
+  Figure 1. Machine Allocation
+</div>
+
+<P> The chart above shows the resources of the machine allocated to four accounts, A, B, C and D.  Furthermore, account A's shares are further allocated to sub accounts, A1 through A4.  Users are granted permission (through sacctmgr) to submit jobs against specific accounts.  If there are 10 users given equal shares in Account A3, they will each be allocated 1% of the machine.</P>
+
+<P> A user's normalized shares is simply</P>
+
+<PRE>
+S =	(S<sub>user</sub> / S<sub>sibblings</sub>) *
+	(S<sub>account</sub> / S<sub>sibbling-accounts</sub>) *
+	(S<sub>parent</sub> / S<sub>parent-sibblings</sub>) * ...
+</PRE>
+
+Where:
+
+<DL>
+<DT> S
+<DD> is the user's normalized share, between zero and one
+<DT> S<sub>user</sub>
+<DD> are the number of shares of the account allocated to the user
+<DT> S<sub>sibblings</sub>
+<DD> are the total number of shares allocated to all users permitted to charge the account (including S<sub>user</sub>)
+<DT> S<sub>account</sub>
+<DD> are the number of shares of the parent account allocated to the account
+<DT> S<sub>sibbling-accounts</sub>
+<DD> are the total number of shares allocated to all sub-accounts of the parent account
+<DT> S<sub>parent</sub>
+<DD> are the number of shares of the grandparent account allocated to the parent
+<DT> S<sub>parent-sibblings</sub>
+<DD> are the total number of shares allocated to all sub-accounts of the grandparent account
+</DL>
+
+<h3> Normalized Usage</h3>
+
+<P> The total number of processor*seconds that a machine is able to deliver over a fixed time period (for example, a day) is a fixed quantity.  The processor*seconds allocated to every job are tracked and saved to the SLURM database in real-time.  If one only considered usage over a fixed time period, then calculating a user's normalized usage would be a simple quotient:</P>
+
+<PRE>
+	U<sub>N</sub> = U<sub>user</sub> / R<sub>available</sub>
+</PRE>
+
+Where:
+
+<DL>
+<DT> U<sub>N</sub>
+<DD> is normalized usage, between zero and one
+<DT> U<sub>user</sub>
+<DD> is the processor*seconds consumed by all of a user's jobs in a given account for over a fixed time period
+<DT> R<sub>available</sub>
+<DD> is the total number of processor*seconds a machine can deliver during that same time period
+</DL>
+
+<P> However, significant real-world usage quantities span multiple time periods.  Rather than treating usage over a number of weeks or months with equal importance, SLURM's fair-share priority calculation places more importance on the most recent resource usage and less importance on usage from the distant past.</P>
+
+<P> The SLURM usage metric is based off a half-life formula that favors the most recent usage statistics.  Usage statistics from the past decrease in importance based on a single decay factor, D:</P>
+
+<PRE>
+	U<sub>H</sub> = U<sub>current_period</sub> +
+	     ( D * U<sub>last_period</sub>) + (D * D * U<sub>period-2</sub>) + ...
+</PRE>
+
+Where:
+
+<DL>
+<DT> U<sub>H</sub>
+<DD> is the historical usage subject to the half-life decay
+<DT> U<sub>current_period</sub>
+<DD> is the usage charged over the current measurement period
+<DT> U<sub>last_period</sub>
+<DD> is the usage charged over the last measurement period
+<DT> U<sub>period-2</sub>
+<DD> is the usage charged over the second last measurement period
+<DT> D
+<DD> is a decay factor between zero and one that delivers the
+  half-life decay based off the <i>PriorityDecayHalfLife</i> setting
+  in the slurm.conf file.  Without accruing additional usage, a user's
+  U<sub>H</sub> usage will decay to 1/2 value after a time period
+  of <i>PriorityDecayHalfLife</i> seconds.
+</DL>
+
+<P> In practice, the <i>PriorityDecayHalfLife</i> could be a matter of
+  seconds or days as appropriate for each site.  The measurement
+  period is nominally 5 minutes.  The decay factor, D, is assigned the
+  value that will achieve the half-life decay rate specified by
+  the <i>PriorityDecayHalfLife</i> parameter.</P> 
+
+<P> The historical resources a machine has available could be similarly aggregated with the same decay factor:</P>
+
+<PRE>
+	R<sub>H</sub> = R<sub>current_period</sub> +
+	    ( D * R<sub>last_period</sub>) + (D * D * R<sub>period-2</sub>) + ...
+</PRE>
+
+<P> However, A simpler formula is:</P>
+
+<PRE>
+	R<sub>H</sub> = num_procs * half_life * 2
+</PRE>
+
+Where:
+
+<DL>
+<DT> R<sub>H</sub>
+<DD> is the historical resources available subject to the same half-life decay as the usage formula.
+<DT> num_procs</sub>
+<DD> is the total number of processors in the cluster
+<DT> half_life</sub>
+<DD> is the configured half-life(<i>PriorityDecayHalfLife</i>)
+</DL>
+
+<P> A user's normalized usage that spans multiple time periods then becomes:</P>
+
+<PRE>
+	U = U<sub>H</sub> / R<sub>H</sub>
+</PRE>
+
+
+<h3>Simplified Fair-Share Formula</h3>
+
+<P> The simplified formula for calculating the fair-share factor for usage that spans multiple time periods and subject to a half-life decay is:</P>
+
+<PRE>
+	F = (S - U + 1) / 2
+</PRE>
+
+Where:
+
+<DL compact>
+<DT> F
+<DD> is the fair-share factor
+<DT> S
+<DD> is the normalized shares
+<DT> U
+<DD> is the normalized usage factoring in half-life decay
+</DL>
+
+<P> The fair-share factor will therefore range from zero to one, where one represents the highest priority for a job.  A fair-share factor of 0.5 indicates that the user's jobs have used exactly the portion of the machine that they have been allocated.  A fair-share factor of above 0.5 indicates that the user's jobs have consumed less than their allocated share while a fair-share factor below 0.5 indicates that the user's jobs have consumed more than their allocated share of the computing resources.</P>
+
+<h3>The Fair-share Factor Under An Account Hierarchy</h3>
+
+<P> The method described above presents a system whereby the priority of a user's job is calculated based on the portion of the machine allocated to the user and the historical usage of all the jobs run by that user under a specific account.</P>
+
+<P> Another layer of "fairness" is necessary however, one that factors in the usage of other users drawing from the same account.  This allows a job's fair-share factor to be influenced by the computing resources delivered to jobs of other users drawing from the same account.</P>
+
+<P> If there are two members of a given account, and if one of those users has run many jobs under that account, the job priority of a job submitted by the user who has not run any jobs will be negatively affected.  This ensures that the combined usage charged to an account matches the portion of the machine that is allocated to that account.</P>
+
+<P> In the example below, when user 3 submits their first job using account C, they will want their job's priority to reflect all the resources delivered to account B.  They do not care that user 1 has been using up a significant portion of the cycles allocated to account B and user 2 has yet to run a job out of account B.  If user 2 submits a job using account B and user 3 submits a job using account C, user 3 expects their job to be scheduled before the job from user 2.</P>
+
+<div class="figure">
+  <img src=UsagePies.gif width=400 ><BR>
+  Figure 2. Usage Example
+</div>
+
+<h3>The SLURM Fair-Share Formula</h3>
+
+<P> The SLURM fair-share formula has been designed to provide fair scheduling to users based on the allocation and usage of every account.</P>
+
+<P> The actual formula used is a refinement of the formula presented above:</P>
+
+<PRE>
+	F = (S - U<sub>E</sub> + 1) / 2
+</PRE>
+
+<P> The difference is that the usage term is effective usage, which is defined as:</P>
+
+<PRE>
+	U<sub>E</sub> = U<sub>Achild</sub> +
+		  ((U<sub>Eparent</sub> - U<sub>Achild</sub>) * S<sub>child</sub>/S<sub>all_siblings</sub>)
+</PRE>
+
+Where:
+
+<DL>
+<DT> U<sub>E</sub>
+<DD> is the effective usage of the child user or child account
+<DT> U<sub>Achild</sub>
+<DD> is the actual usage of the child user or child account
+<DT> U<sub>Eparent</sub>
+<DD> is the effective usage of the parent account
+<DT> S<sub>child</sub>
+<DD> is the shares allocated to the child user or child account
+<DT> S<sub>all_siblings</sub>
+<DD> is the shares allocated to all the children of the parent account
+</DL>
+
+<P> This formula only applies with the second tier of accounts below root.  For the tier of accounts just under root, their effective usage equals their actual usage.</P>
+
+<P> Because the formula for effective usage includes a term of the effective usage of the parent, the calculation for each account in the tree must start at the second tier of accounts and proceed downward:  to the children accounts, then grandchildren, etc.  The effective usage of the users will be the last to be calculated.</P>
+
+<P> Plugging in the effective usage into the fair-share formula above yields a fair-share factor that reflects the aggregated usage charged to each of the accounts in the fair-share hierarchy.</P>
+
+<h3>Example</h3>
+
+<P> The following example demonstrates the effective usage calculations and resultant fair-share factors. (See Figure 3 below.)</P>
+
+<P> The machine's computing resources are allocated to accounts A and D with 40 and 60 shares respectively.  Account A is further divided into two children accounts, B with 30 shares and C with 10 shares.  Account D is further divided into two children accounts, E with 25 shares and F with 35 shares.</P>
+
+<P> Note:  the shares at any given tier in the Account hierarchy do not need to total up to 100 shares.  This example shows them totaling up to 100 to make the arithmetic easier to follow in your head.</P>
+
+<P> User 1 is granted permission to submit jobs against the B account.  Users 2 and 3 are granted one share each in the C account.  User 4 is the sole member of the E account and User 5 is the sole member of the F account.</P>
+
+<P> Note:  accounts A and D do not have any user members in this example, though users could have been assigned.</P>
+
+<P> The shares assigned to each account make it easy to determine normalized shares of the machine's complete resources.  Account A has .4 normalized shares, B has .3 normalized shares, etc.  Users who are sole members of an account have the same number of normalized shares as the account.  (E.g., User 1 has .3 normalized shares).  Users who share accounts have a portion of the normalized shares based on their shares.  For example, if user 2 had been allocated 4 shares instead of 1, user 2 would have had .08 normalized shares.  With users 2 and 3 each holding 1 share, they each have a normalized share of  0.05.</P>
+
+<P> Users 1, 2, and 4 have run jobs that have consumed the machine's computing resources.  User 1's actual usage is 0.2 of the machine; user 2 is 0.25,  and user 4 is 0.25.</P>
+
+<P> The actual usage charged to each account is represented by the solid arrows.  The actual usage charged to each account is summed as one goes up the tree.  Account C's usage is the sum of the usage of Users 2 and 3; account A's actual usage is the sum of its children, accounts B and C.</P>
+
+<div class="figure">
+  <img src=ExampleUsage.gif width=400 ><BR>
+  Figure 3. Fair-share Example
+</div>
+
+<UL>
+<LI> User 1 normalized share: 0.3
+<LI> User 2 normalized share: 0.05
+<LI> User 3 normalized share: 0.05
+<LI> User 4 normalized share: 0.25
+<LI> User 5 normalized share: 0.35
+</UL>
+
+<P> As stated above, the effective usage is computed from the formula:</P>
+
+<PRE>
+	U<sub>E</sub> = U<sub>Achild</sub> +
+		  ((U<sub>Eparent</sub> - U<sub>Achild</sub>) * S<sub>child</sub>/S<sub>all_siblings</sub>)
+</PRE>
+
+<P> The effective usage for all accounts at the first tier under the root allocation is always equal to the actual usage:</P>
+
+Account A's effective usage is therefore equal to .45.  Account D's effective usage is equal to .25.
+
+<UL>
+<LI> Account B effective usage: 0.2 + ((0.45 - 0.2) * 30 / 40) = 0.3875
+<LI> Account C effective usage: 0.25 + ((0.45 - 0.25) * 10 / 40) = 0.3
+<LI> Account E effective usage: 0.25 + ((0.25 - 0.25) * 25 / 60) = 0.25
+<LI> Account F effective usage: 0.0 + ((0.25 - 0.0) * 35 / 60) = 0.1458
+</UL>
+
+<P> The effective usage of each user is calculated using the same formula:</P>
+
+<UL>
+<LI> User 1 effective usage: 0.2 + ((0.3875 - 0.2) * 1 / 1) = 0.3875
+<LI> User 2 effective usage: 0.25 + ((0.3 - 0.25) * 1 / 2) =  0.275
+<LI> User 3 effective usage: 0.0 + ((0.3 - 0.0) * 1 / 2) =  0.15
+<LI> User 4 effective usage: 0.25 + ((0.25 - 0.25) * 1 / 1) = 0.25
+<LI> User 5 effective usage: 0.0 + ((.1458 - 0.0) * 1 / 1) =  0.1458
+</UL>
+
+<P> Using the SLURM fair-share formula,</P>
+
+<PRE>
+	F = (S - U<sub>E</sub> + 1) / 2
+</PRE>
+
+<P> the fair-share factor for each user is:</P>
+
+<UL>
+<LI> User 1 fair-share factor: (.3 - .3875 + 1) / 2 =  0.45625
+<LI> User 2 fair-share factor: (.05 - .275 + 1) / 2 =  0.3875
+<LI> User 3 fair-share factor: (.05 - .15 + 1) / 2 =  0.45
+<LI> User 4 fair-share factor: (.25 - .25 + 1) / 2 =  0.5
+<LI> User 5 fair-share factor: (.35 - .1458 + 1) / 2 =  0.6021
+</UL>
+
+<P> From this example, once can see that users 1,2, and 3 are over-serviced while user 5 is under-serviced.  Even though user 3 has yet to submit a job, his/her fair-share factor is negatively influenced by the jobs users 1 and 2 have run.</P>
+
+<P> Based on the fair-share factor alone, if all 5 users were to submit a job charging their respective accounts, user 5's job would be granted the highest scheduling priority.</P>
+
+<!-------------------------------------------------------------------------->
+<a name=sprio>
+<h2>The <i>sprio</i> utility</h2></a>
+
+<P> The <i>sprio</i> command provides a summary of the five factors
+that comprise each job's scheduling priority.  While <i>squeue</i> has
+format options (%p and %Q) that display a job's composite priority,
+sprio can be used to display a breakdown of the priority components
+for each job.  In addition, the <i>sprio -w</i> option displays the
+weights (PriorityWeightAge, PriorityWeightFairshare, etc.) for each
+factor as it is currently configured.</P>
+
+<!-------------------------------------------------------------------------->
+<a name=config>
+<h2>Configuration</h2></a>
+
+<P> The following slurm.conf (SLURM_CONFIG_FILE) parameters are used to configure the Multi-factor Job Priority Plugin.  See slurm.conf(5) man page for more details.</P>
+
+<DL>
+<DT> PriorityType
+<DD> Set this value to "priority/multifactor" to enable the Multi-factor Job Priority Plugin.  The default value for this variable is "priority/basic" which enables simple FIFO scheduling.
+<DT> PriorityDecayHalfLife
+<DD> This determines the contribution of historical usage on the
+  composite usage value.  The higher the number, the longer past usage
+  affects fair-share.  If set to 0 no decay will be applied.  This is helpful if
+  you want to enforce hard time limits per association.  If set to 0
+  PriorityUsageResetPeriod must be set to some interval. 
+  The unit is a time string (i.e. min, hr:min:00, days-hr:min:00, or
+  days-hr).  The default value is 7-0 (7 days). 
+<DT> PriorityUsageResetPeriod
+<DD> At this interval the usage of associations will be reset to 0.
+  This is used if you want to enforce hard limits of time usage per
+  association.  If PriorityDecayHalfLife is set to be 0 no decay will
+  happen and this is the only way to reset the usage accumulated by
+  running jobs.  By default this is turned off and it is advised to
+  use the PriorityDecayHalfLife option to avoid not having anything
+  running on your cluster, but if your schema is set up to only allow
+  certain amounts of time on your system this is the way to do it.
+  The unit is a time string (i.e. min, hr:min:00, days-hr:min:00, or
+  days-hr). The default value is not set (turned off). 
+
+<DT> PriorityFavorSmall
+<DD> A boolean that sets the polarity of the job size factor.  The
+  default setting is NO which results in larger node sizes having a
+  larger job size factor.  Setting this parameter to YES means that
+  the smaller the job, the greater the job size factor will be.
+<DT> PriorityMaxAge
+<DD> Specifies the queue wait time at which the age factor maxes out.
+  The unit is a time string (i.e. min, hr:min:00, days-hr:min:00, or
+  days-hr).  The default value is 7-0 (7 days). 
+<DT> PriorityWeightAge
+<DD> An unsigned integer that scales the contribution of the age factor.
+<DT> PriorityWeightFairshare
+<DD> An unsigned integer that scales the contribution of the fair-share factor.
+<DT> PriorityWeightJobSize
+<DD> An unsigned integer that scales the contribution of the job size factor.
+<DT> PriorityWeightPartition
+<DD> An unsigned integer that scales the contribution of the partition factor.
+<DT> PriorityWeightQOS
+<DD> An unsigned integer that scales the contribution of the quality of service factor.
+</DL>
+
+<P> Note:  As stated above, the five priority factors range from 0.0 to 1.0.  As such, the PriorityWeight terms may need to be set to a high enough value (say, 1000) to resolve very tiny differences in priority factors.  This is especially true with the fair-share factor, where two jobs may differ in priority by as little as .001. (or even less!)</P>
+
+<!-------------------------------------------------------------------------->
+<a name=configexample>
+<h2>Configuration Example</h2></a>
+
+<P> The following are sample slurm.conf file settings for the
+  Multi-factor Job Priority Plugin.</P>
+
+<P> The first example is for running the plugin applying decay over
+  time to reduce usage.  Hard limits can be used in this
+  configuration, but will have less effect since usage will decay
+  over time instead of having no decay over time.</P> 
+<PRE>
+# Activate the Multi-factor Job Priority Plugin with decay
+PriorityType=priority/multifactor
+
+# 2 week half-life
+PriorityDecayHalfLife=14-0
+
+# The larger the job, the greater its job size priority.
+PriorityFavorSmall=NO
+
+# The job's age factor reaches 1.0 after waiting in the
+# queue for 2 weeks.
+PriorityMaxAge=14-0
+
+# This next group determines the weighting of each of the
+# components of the Multi-factor Job Priority Plugin.
+# The default value for each of the following is 1.
+PriorityWeightAge=1000
+PriorityWeightFairshare=10000
+PriorityWeightJobSize=1000
+PriorityWeightPartition=1000
+PriorityWeightQOS=0 # don't use the qos factor
+</PRE>
+
+<P> This example is for running the plugin with no decay on usage,
+  thus making a reset of usage necessary.</P>
+<PRE>
+# Activate the Multi-factor Job Priority Plugin with decay
+PriorityType=priority/multifactor
+
+# apply no decay
+PriorityDecayHalfLife=0
+
+# reset usage after 28 days
+PriorityUsageResetPeriod=28-0
+
+# The larger the job, the greater its job size priority.
+PriorityFavorSmall=NO
+
+# The job's age factor reaches 1.0 after waiting in the
+# queue for 2 weeks.
+PriorityMaxAge=14-0
+
+# This next group determines the weighting of each of the
+# components of the Multi-factor Job Priority Plugin.
+# The default value for each of the following is 1.
+PriorityWeightAge=1000
+PriorityWeightFairshare=10000
+PriorityWeightJobSize=1000
+PriorityWeightPartition=1000
+PriorityWeightQOS=0 # don't use the qos factor
+</PRE>
+
+<!-------------------------------------------------------------------------->
+<p style="text-align:center;">Last modified 12 June 2009</p>
+
+<!--#include virtual="footer.txt"-->
+
diff --git a/doc/html/priority_plugins.shtml b/doc/html/priority_plugins.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..222e494ece62c749dcf12ca8ed7c4e09b824b743
--- /dev/null
+++ b/doc/html/priority_plugins.shtml
@@ -0,0 +1,129 @@
+<!--#include virtual="header.txt"-->
+
+<h1><a name="top">SLURM Priority Plugin API</a></h1>
+
+<h2> Overview</h2>
+<p> This document describes SLURM priority plugins and the API that defines
+them. It is intended as a resource to programmers wishing to write their own
+SLURM priority plugins. This is version 100 of the API.</p>
+
+<p>SLURM priority plugins are SLURM plugins that implement the SLURM priority
+API described herein. They must conform to the SLURM Plugin API with the
+following specifications:</p>
+
+<p><span class="commandline">const char
+plugin_type[]="<i>major/minor</i>"</span><br>
+The major type must be &quot;priority.&quot; The minor type can be any
+recognizable abbreviation for the type of priority.
+We recommend, for example:</p>
+
+<ul>
+<li><b>basic</b>&#151;A plugin that implements the API and provides basic FIFO
+job priority.</li>
+<li><b>multifactor</b>&#151;The multi-factor job priority plugin.</li>
+</ul>
+
+<p>The <span class="commandline">plugin_name</span> and
+<span class="commandline">plugin_version</span> symbols required by the SLURM
+Plugin API require no specialization for job priority support.
+Note carefully, however, the versioning discussion below.</p>
+
+<p>The programmer is urged to study
+<span class="commandline">src/plugins/priority/basic/priority_basic.c</span> 
+for an example implementation of a SLURM priority plugin.</p>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<h2>Data Objects</h2>
+<p>The implementation must maintain (though not necessarily directly export) an
+enumerated <b>errno</b> to allow SLURM to discover as practically as possible
+the reason for any failed API call.  Plugin-specific enumerated integer values
+may be used when appropriate.</p>
+
+<p>These values must not be used as return values in integer-valued functions
+in the API. The proper error return value from integer-valued functions is
+SLURM_ERROR. The implementation should endeavor to provide useful and pertinent
+information by whatever means is practical. Successful API calls are not
+required to reset any errno to a known value. However, the initial value of any
+errno, prior to any error condition arising, should be SLURM_SUCCESS. </p>
+
+<p class="commandline"> job_record</p>
+<p style="margin-left:.2in"><b>Description</b>: A slurmctld structure that
+contains details about a job.</p>
+
+<p class="commandline"> acct_association_rec_t</p>
+<p style="margin-left:.2in"><b>Description</b>: A slurm_accounting_storage
+structure that contains details about an association.</p>
+
+<p class="commandline"> priority_factors_object_t</p>
+<p style="margin-left:.2in"><b>Description</b>: A structure that contains a
+job's priority factors.</p>
+
+<p class="commandline"> priority_factors_request_msg_t</p>
+<p style="margin-left:.2in"><b>Description</b>: Used to request job priority
+factors.  Contains a list of specific job and user ids of the jobs the user
+wants to see.</p>
+
+<p class="commandline"> priority_factors_response_msg_t</p>
+<p style="margin-left:.2in"><b>Description</b>: Used to return the list of
+priority_factors_object_t's containing the job priority factors the user has
+asked to see.</p>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<h2>API Functions</h2>
+<p>The following functions must appear. Functions which are not implemented should
+be stubbed.</p>
+
+<p class="commandline">uint32_t priority_p_set(uint32_t last_prio, struct job_record *job_ptr)</p>
+<p style="margin-left:.2in"><b>Description</b>: Sets the priority of the job.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline">last_prio</span> (input) the priority assigned to the
+last job<br>
+<span class="commandline">job_ptr</span> (input) pointer to the job record.</p>
+<p style="margin-left:.2in"><b>Returns</b>: the priority assigned to the job</p>
+
+<p class="commandline">void priority_p_reconfig(void)</p>
+<p style="margin-left:.2in"><b>Description</b>: Refresh the plugin's
+configuration. Called whenever slurmctld is reconfigured.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline">none</span></p>
+<p style="margin-left:.2in"><b>Returns</b>: void</p>
+
+<p class="commandline">int priority_p_set_max_cluster_usage(uint32_t procs, uint32_t half_life)</p>
+<p style="margin-left:.2in"><b>Description</b>: Conveys the maximum
+raw usage to the priority plugin.  This establishes the reference by
+which each job's usage can be normalized.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline">procs</span> (input) the machine's processor count<br>
+<span class="commandline">half_life</span> (input) the configured half-life</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful,
+otherwise SLURM_ERROR</p>
+
+<p class="commandline">void priority_p_set_assoc_usage(acct_association_rec_t *assoc)</p>
+<p style="margin-left:.2in"><b>Description</b>: Set the normalized and
+effective usage for an association.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline">assoc</span> (input/output) pointer to the association.</p>
+<p style="margin-left:.2in"><b>Returns</b>: void</p>
+
+<p class="commandline">List priority_p_get_priority_factors_list(priority_factors_request_msg_t *req_msg)</p>
+<p style="margin-left:.2in"><b>Description</b>: Retrieves the priority factors
+for all or specified jobs.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline">req_msg</span> (input) pointer to the message request
+that contains the specific jobs or users of interest (of any).</p>
+<p style="margin-left:.2in"><b>Returns</b>: a list of priority_factors_object_t's
+containing the requested job priority factors</p>
+
+<h2>Versioning</h2>
+<p> This document describes version 100 of the SLURM Priority API. Future
+releases of SLURM may revise this API. A priority plugin conveys its ability
+to implement a particular API version using the mechanism outlined for SLURM
+plugins.</p>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<p style="text-align:center;">Last modified 20 February 2009</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/proctrack_plugins.shtml b/doc/html/proctrack_plugins.shtml
index 260a3380424ff0c9373c73a01bd1e5a2b1e029eb..9c529a8a55b5e6f3d784a36b251b86476723c368 100644
--- a/doc/html/proctrack_plugins.shtml
+++ b/doc/html/proctrack_plugins.shtml
@@ -74,7 +74,7 @@ be stubbed.</p>
 <p style="margin-left:.2in"><b>Description</b>: Create a container.
 The container should be valid   
 <span class="commandline">slurm_container_destroy()</span> is called.
-This function must put the container ID directoy in the job structure's 
+This function must put the container ID directory in the job structure's 
 variable <i>cont_id</i>.</p>
 <p style="margin-left:.2in"><b>Argument</b>: 
 <span class="commandline"> job</span>&nbsp; &nbsp;&nbsp;(input/output) 
diff --git a/doc/html/programmer_guide.shtml b/doc/html/programmer_guide.shtml
index 50a402d14ba689d65a1067d745ac9635746925ff..6e9c59e8f633560f84ee48e9cd72dc526247fb7d 100644
--- a/doc/html/programmer_guide.shtml
+++ b/doc/html/programmer_guide.shtml
@@ -36,7 +36,7 @@ by all of the different infrastructures of a particular variety. When a SLURM
 daemon is initiated, it reads the configuration file to determine which of the 
 available plugins should be used. A <a href="plugins.html">plugin developer's 
 guide</a> is available with general information about plugins. Most plugin 
-types also have their own documenation available, such as 
+types also have their own documentation available, such as 
 <a href="authplugins.html">SLURM Authentication Plugin API</a> and
 <a href="jobcompplugins.html">SLURM Job Completion Logging API</a>.</p>
 
@@ -74,12 +74,14 @@ are here.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Documentation</h2>
-<p>All of the documentation is in the subdirectory <b>doc</b>. Man pages for the 
-APIs, configuration file, commands, and daemons are in <b>doc/man</b>. Various 
-documents suitable for public consumption are in <b>doc/html</b>. Overall SLURM 
-design documents including various figures are in <b>doc/pubdesign</b>. Various 
-design documents (many of which are dated) can be found in <b>doc/slides</b> and 
-<b>doc/txt</b>. A survey of available resource managers as of 2001 is in <b>doc/survey</b>. 
+<p>All of the documentation is in the subdirectory <b>doc</b>.
+Two directories are of particular interest:</p>
+
+<p style="margin-left:.2in">
+<b>doc/man</b>&#151; contains the man pages for the APIs, 
+configuration file, commands, and daemons.<br>
+<b>doc/html</b>&#151; contains the web pages.</p>
+ 
 <h2>Source Code</h2>
 
 <p>Functions are divided into several categories, each in its own subdirectory. 
@@ -92,21 +94,27 @@ the SLURM code. Used to send and get SLURM information from the central manager.
 These are the functions user applications might utilize.<br>
 <b>common</b>&#151;General purpose functions for widespread use throughout 
 SLURM.<br>
-<b>plugins</b>&#151;Plugin functions for various infrastructure. A separate 
-subdirectory is used for each plugin class:<br> 
+<b>database</b>&#151;Various database files that support the accounting
+ storage plugin.<br>
+<b>plugins</b>&#151;Plugin functions for various infrastructures or optional
+behavior. A separate subdirectory is used for each plugin class:<br>
 <ul>
 <li><b>accounting_storage</b> for specifing the type of storage for accounting,<br>
-<li><b>auth</b> for user authentication,<br> 
+<li><b>auth</b> for user authentication,<br>
 <li><b>checkpoint</b> for system-initiated checkpoint and restart of user jobs,<br>
 <li><b>crypto</b> for cryptographic functions,<br>
 <li><b>jobacct_gather</b> for job accounting,<br>
 <li><b>jobcomp</b> for job completion logging,<br>
 <li><b>mpi</b> for MPI support,<br>
+<li><b>priority</b> calculates job priority based on a number of factors
+including fair-share,<br>
 <li><b>proctrack</b> for process tracking,<br>
-<li><b>sched</b> for job scheduler,<br> 
+<li><b>sched</b> for job scheduler,<br>
 <li><b>select</b> for a job's node selection,<br>
 <li><b>switch</b> for switch (interconnect) specific functions,<br>
-<li><b>task</b> for task affinity to processors.<br>
+<li><b>task</b> for task affinity to processors,<br>
+<li><b>topology</b> methods for assigning nodes to jobs based on node
+topology.<br>
 </ul>
 <p style="margin-left:.2in">
 <b>sacct</b>&#151;User command to view accounting information about jobs.<br>
@@ -123,15 +131,23 @@ with an existing SLURM job.<br>
 <b>slurmctld</b>&#151;SLURM central manager daemon code.<br>
 <b>slurmd</b>&#151;SLURM daemon code to manage the compute server nodes including 
 the execution of user applications.<br>
+<b>slurmdbd</b>&#151;SLURM database daemon managing access to the accounting
+storage database.<br>
 <b>smap</b>&#151;User command to view layout of nodes, partitions, and jobs.
 This is particularly valuable on systems like Bluegene, which has a three
 dimension torus topography.<br>
+<b>sprio</b>&#151;User command to see the breakdown of a job's priority
+calculation when the Multifactor Job Priority plugin is installed.<br>
 <b>squeue</b>&#151;User command to get information on SLURM jobs and job steps.<br>
 <b>sreport</b>&#151;User command to view various reports about past
 usage across the enterprise.<br>
 <b>srun</b>&#151;User command to submit a job, get an allocation, and/or 
 initiation a parallel job step.<br>
-<b>sstat</b>&#151;User tool to status running jobs.<br>
+<b>srun_cr</b>&#151;Checkpoint/Restart wrapper for srun.<br>
+<b>sshare</b>&#151;User command to view shares and usage when the Multifactor
+Job Priority plugin is installed.<br>
+<b>sstat</b>&#151;User command to view detailed statistics about running
+jobs when a Job Accounting Gather plugin is installed.<br>
 <b>strigger</b>&#151;User and administrator tool to manage event triggers.<br>
 <b>sview</b>&#151;User command to view and update node, partition, and job 
 job state information.<br>
@@ -158,7 +174,7 @@ and options including stress tests.  The file <b>testsuite/expect/globals</b>
 contains default paths and procedures for all of the individual tests.  At
 the very least, you will need to set the <i>slurm_dir</i> variable to the correct
 value.  To avoid conflicts with other developers, you can override variable settings
-in a seperate file named <b>testsuite/expect/globals.local</b>.</p>
+in a separate file named <b>testsuite/expect/globals.local</b>.</p>
 
 <p>Set your working directory to <b>testsuite/expect</b> before 
 starting these tests.  Tests may be executed individually by name
@@ -187,7 +203,7 @@ simultaneous job steps to avoid overloading the
 <span class="commandline">slurmd</span> daemon executing them all.</p>
 
 <h3><a name="multiple_slurmd_support">Multiple slurmd support</a></h3>
-<p>It is possible to run mutiple slurmd daemons on a single node, each using
+<p>It is possible to run multiple slurmd daemons on a single node, each using
 a different port number and NodeName alias.  This is very useful for testing
 networking and protocol changes, or anytime you want to simulate a larger
 cluster than you really have.  The author uses this on his desktop to simulate
@@ -235,6 +251,6 @@ host1> slurmd -N foo21
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 7 October 2008</p>
+<p style="text-align:center;">Last modified 27 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/publications.shtml b/doc/html/publications.shtml
index 2dde255da3a6b334e1f00449e1bc333771271a1f..49c34f8f7585e736b1b5fd75b2b992f39464fe22 100644
--- a/doc/html/publications.shtml
+++ b/doc/html/publications.shtml
@@ -7,8 +7,13 @@
 <h2>Presentations</h2>
 
 <ul>
+<li><a href="slurm.sc08.bof.pdf">High Scalability Resource Management with 
+SLURM</a> (Supercomputing 2008, November 2008)</li>
+<li><a href="slurm.sc08.status.pdf">SLURM Status Report</a>
+(Supercomputing 2008, November 2008)</li>
 <li><a href="slurm_v1.3.pdf">SLURM Version 1.3</a> (May 2008)</li>
-<li><a href="slurm_moab.pdf">Managing Clusters with Moab and SLURM</a> (May 2008)</li>
+<li><a href="slurm_moab.pdf">Managing Clusters with Moab and SLURM</a> 
+(May 2008)</li>
 <li><a href="slurm_v1.2.pdf">Resource Management at LLNL, SLURM Version 1.2</a>
 (April 2007)</li>
 <li><a href="lci.7.tutorial.pdf">Resource Management Using SLURM</a>,
@@ -16,7 +21,15 @@
 </ul>
 
 <h2>Publications</h2>
-<b>Enhancing an Open Source Resource Manager with Multi-Core/Multi-threaded Support</b>,
+
+<p><a href="http://www.linux-mag.com/id/7239/1/">
+<b>Caos NSA and Perceus: All-in-one Cluster Software Stack</b></a>,
+Jeffrey B. Layton,
+<i>Linux Magazine</i>,
+5 February 2009.
+</p>
+
+<p><b>Enhancing an Open Source Resource Manager with Multi-Core/Multi-threaded Support</b>,
 S. M. Balle and D. Palermo,
 <i>Job Scheduling Strategies for Parallel Processing</i>,
 2007.</p>
@@ -27,13 +40,20 @@ M. Jette and M. Grondona,
 <i>Proceedings of ClusterWorld Conference and Expo</i>,
 San Jose, California, June 2003.</p>
 
-<b>SLURM: Simple Linux Utility for Resource Management</b>,
+<p><b>SLURM: Simple Linux Utility for Resource Management</b>,
 A. Yoo, M. Jette, and M. Grondona,
 <i>Job Scheduling Strategies for Parallel Processing</i>,
 volume 2862 of <i>Lecture Notes in Computer Science</i>, 
 pages 44-60,
 Springer-Verlag, 2003.</p>
 
-<p style="text-align:center;">Last modified 3 June 2008</p>
+<h2>Interview</h2>
+
+<p><a href="http://www.rce-cast.com/index.php/Podcast/rce-10-slurm.html">
+RCE 10: SLURM (podcast)</a>:
+Brock Palen and Jeff Squyres speak with Morris Jette and 
+Danny Auble of LLNL about SLURM.</p>
+
+<p style="text-align:center;">Last modified 27 May 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/quickstart.shtml b/doc/html/quickstart.shtml
index af47c69404cd1dcde3efc18806716c4f63d2bb82..d93ff92077107ce4cc23d9cf6891018b8e885187 100644
--- a/doc/html/quickstart.shtml
+++ b/doc/html/quickstart.shtml
@@ -25,7 +25,7 @@ and <b>sview</b>.
 All of the commands can run anywhere in the cluster.</p>
 
 <div class="figure">
-  <img src="arch.gif" width="600"><br />
+  <img src="arch.gif" width=550></br>
   Figure 1. SLURM components
 </div>
 
@@ -45,7 +45,7 @@ a single job step may be started that utilizes all nodes allocated to the job,
 or several job steps may independently use a portion of the allocation.</p>
 
 <div class="figure">
-  <img src="entities.gif" width="291" height="218"><br />
+  <img src="entities.gif" width=500></br>
   Figure 2. SLURM entities
 </div>
 
@@ -346,11 +346,11 @@ adev0: scancel 473
 adev0: squeue
 JOBID PARTITION NAME USER ST TIME  NODES NODELIST(REASON)
 </pre>
-<p class="footer"><a href="#top">top</a></p>
-
 
+<p class="footer"><a href="#top">top</a></p>
 
 <h2><a name="mpi">MPI</a></h2>
+
 <p>MPI use depends upon the type of MPI being used. 
 There are three fundamentally different modes of operation used 
 by these various MPI implementation.
@@ -368,215 +368,22 @@ These tasks initiated outside of SLURM's monitoring
 or control. SLURM's epilog should be configured to purge 
 these tasks when the job's allocation is relinquished. </li>
 </ol>
-<p>Instructions for using several varieties of MPI with SLURM are
-provided below.</p> 
-
-<p> <a href="http://www.open-mpi.org/"><b>Open MPI</b></a> relies upon
-SLURM to allocate resources for the job and then mpirun to initiate the 
-tasks. When using <span class="commandline">salloc</span> command, 
-<span class="commandline">mpirun</span>'s -nolocal option is recommended. 
-For example:
-<pre>
-$ salloc -n4 sh    # allocates 4 processors 
-                   # and spawns shell for job
-&gt; mpirun -np 4 -nolocal a.out
-&gt; exit             # exits shell spawned by 
-                   # initial srun command
-</pre>
-<p>Note that any direct use of <span class="commandline">srun</span>
-will only launch one task per node when the LAM/MPI plugin is used.
-To launch more than one task per node using the
-<span class="commandline">srun</span> command, the <i>--mpi=none</i>
-option will be required to explicitly disable the LAM/MPI plugin.</p>
-
-<p> <a href="http://www.quadrics.com/"><b>Quadrics MPI</b></a> relies upon SLURM to 
-allocate resources for the job and <span class="commandline">srun</span> 
-to initiate the tasks. One would build the MPI program in the normal manner 
-then initiate it using a command line of this sort:</p>
-<pre>
-$ srun [options] &lt;program&gt; [program args]
-</pre>
-
-<p> <a href="http://www.lam-mpi.org/"><b>LAM/MPI</b></a> relies upon the SLURM 
-<span class="commandline">salloc</span> or <span class="commandline">sbatch</span>
-command to allocate. In either case, specify 
-the maximum number of tasks required for the job. Then execute the 
-<span class="commandline">lamboot</span> command to start lamd daemons. 
-<span class="commandline">lamboot</span> utilizes SLURM's 
-<span class="commandline">srun</span> command to launch these daemons. 
-Do not directly execute the <span class="commandline">srun</span> command 
-to launch LAM/MPI tasks. For example: 
-<pre>
-$ salloc -n16 sh  # allocates 16 processors 
-                  # and spawns shell for job
-&gt; lamboot
-&gt; mpirun -np 16 foo args
-1234 foo running on adev0 (o)
-2345 foo running on adev1
-etc.
-&gt; lamclean
-&gt; lamhalt
-&gt; exit            # exits shell spawned by 
-                  # initial srun command
-</pre>
-<p>Note that any direct use of <span class="commandline">srun</span> 
-will only launch one task per node when the LAM/MPI plugin is configured
-as the default plugin.  To launch more than one task per node using the 
-<span class="commandline">srun</span> command, the <i>--mpi=none</i>
-option would be required to explicitly disable the LAM/MPI plugin
-if that is the system default.</p>
-
-<p class="footer"><a href="#top">top</a></p>
-
-<p><a href="http://www.hp.com/go/mpi"><b>HP-MPI</b></a> uses the 
-<span class="commandline">mpirun</span> command with the <b>-srun</b> 
-option to launch jobs. For example:
-<pre>
-$MPI_ROOT/bin/mpirun -TCP -srun -N8 ./a.out
-</pre></p>
-
-<p><a href="http://www.mcs.anl.gov/research/projects/mpich2/"><b>
-MPICH2</b></a> jobs 
-are launched using the <b>srun</b> command. Just link your program with 
-SLURM's implementation of the PMI library so that tasks can communicate
-host and port information at startup. (The system administrator can add
-these option to the mpicc and mpif77 commands directly, so the user will not 
-need to bother). For example:
-<pre>
-$ mpicc -L&lt;path_to_slurm_lib&gt; -lpmi ...
-$ srun -n20 a.out
-</pre>
-<b>NOTES:</b>
+<p>Links to instructions for using several varieties of MPI 
+with SLURM are provided below.
 <ul>
-<li>Some MPICH2 functions are not currently supported by the PMI 
-library integrated with SLURM</li>
-<li>Set the environment variable <b>PMI_DEBUG</b> to a numeric value 
-of 1 or higher for the PMI library to print debugging information</li>
+<li><a href="mpi_guide.html#bluegene_mpi">BlueGene MPI</a></li>
+<li><a href="mpi_guide.html#hp_mpi">HP-MPI</a></li>
+<li><a href="mpi_guide.html#lam_mpi">LAM/MPI</a></li>
+<li><a href="mpi_guide.html#mpich1">MPICH1</a></li>
+<li><a href="mpi_guide.html#mpich2">MPICH2</a></li>
+<li><a href="mpi_guide.html#mpich_gm">MPICH-GM</a></li>
+<li><a href="mpi_guide.html#mpich_mx">MPICH-MX</a></li>
+<li><a href="mpi_guide.html#mvapich">MVAPICH</a></li>
+<li><a href="mpi_guide.html#mvapich2">MVAPICH2</a></li>
+<li><a href="mpi_guide.html#open_mpi">Open MPI</a></li>
+<li><a href="mpi_guide.html#quadrics_mpi">Quadrics MPI</a></li>
 </ul></p>
 
-<p><a href="http://www.myri.com/scs/download-mpichgm.html"><b>MPICH-GM</b></a>
-jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>mpichgm</i> MPI plugin must be used to establish communications 
-between the launched tasks. This can be accomplished either using the SLURM 
-configuration parameter <i>MpiDefault=mpichgm</i> in <b>slurm.conf</b>
-or srun's <i>--mpi=mpichgm</i> option.
-<pre>
-$ mpicc ...
-$ srun -n16 --mpi=mpichgm a.out
-</pre>
-
-<p><a href="http://www.myri.com/scs/download-mpichmx.html"><b>MPICH-MX</b></a>
-jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>mpichmx</i> MPI plugin must be used to establish communications
-between the launched tasks. This can be accomplished either using the SLURM
-configuration parameter <i>MpiDefault=mpichmx</i> in <b>slurm.conf</b>
-or srun's <i>--mpi=mpichmx</i> option.
-<pre>
-$ mpicc ...
-$ srun -n16 --mpi=mpichmx a.out
-</pre>
-
-<p><a href="http://mvapich.cse.ohio-state.edu/"><b>MVAPICH</b></a>
-jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>mvapich</i> MPI plugin must be used to establish communications 
-between the launched tasks. This can be accomplished either using the SLURM 
-configuration parameter <i>MpiDefault=mvapich</i> in <b>slurm.conf</b>
-or srun's <i>--mpi=mvapich</i> option.
-<pre>
-$ mpicc ...
-$ srun -n16 --mpi=mvapich a.out
-</pre>
-<b>NOTE:</b> If MVAPICH is used in the shared memory model, with all tasks
-running on a single node, then use the <i>mpich1_shmem</i> MPI plugin instead.<br>
-<b>NOTE (for system administrators):</b> Configure
-<i>PropagateResourceLimitsExcept=MEMLOCK</i> in <b>slurm.conf</b> and 
-start the <i>slurmd</i> daemons with an unlimited locked memory limit.
-For more details, see 
-<a href="http://mvapich.cse.ohio-state.edu/support/mvapich_user_guide.html#x1-420007.2.3">MVAPICH</a> 
-documentation for "CQ or QP Creation failure".</p>
-
-<p><a href="http://nowlab.cse.ohio-state.edu/projects/mpi-iba"><b>MVAPICH2</b></a>
-jobs can be launched directly by <b>srun</b> command.
-SLURM's <i>none</i> MPI plugin must be used to establish communications 
-between the launched tasks. This can be accomplished either using the SLURM 
-configuration parameter <i>MpiDefault=none</i> in <b>slurm.conf</b> 
-or srun's <i>--mpi=none</i> option. The program must also be linked with
-SLURM's implementation of the PMI library so that tasks can communicate
-host and port information at startup. (The system administrator can add
-these option to the mpicc and mpif77 commands directly, so the user will not
-need to bother).  <b>Do not use SLURM's MVAPICH plugin for MVAPICH2.</b>
-<pre>
-$ mpicc -L&lt;path_to_slurm_lib&gt; -lpmi ...
-$ srun -n16 --mpi=none a.out
-</pre>
-
-<p><a href="http://www.research.ibm.com/bluegene/"><b>BlueGene MPI</b></a> relies 
-upon SLURM to create the resource allocation and then uses the native
-<span class="commandline">mpirun</span> command to launch tasks. 
-Build a job script containing one or more invocations of the 
-<span class="commandline">mpirun</span> command. Then submit 
-the script to SLURM using <span class="commandline">sbatch</span>.
-For example:</p>
-<pre>
-$ sbatch -N512 my.script
-</pre>
-<p>Note that the node count specified with the <i>-N</i> option indicates
-the base partition count.
-See <a href="bluegene.html">BlueGene User and Administrator Guide</a> 
-for more information.</p>
-
-<p><a href="http://www-unix.mcs.anl.gov/mpi/mpich1/"><b>MPICH1</b></a>
-development ceased in 2005. It is recommended that you convert to 
-MPICH2 or some other MPI implementation. 
-If you still want to use MPICH1, note that it has several different 
-programming models. If you are using the shared memory model 
-(<i>DEFAULT_DEVICE=ch_shmem</i> in the mpirun script), then initiate 
-the tasks using the <span class="commandline">srun</span> command 
-with the <i>--mpi=mpich1_shmem</i> option.</p>
-<pre>
-$ srun -n16 --mpi=mpich1_shmem a.out
-</pre>
-
-<p>If you are using MPICH P4 (<i>DEFAULT_DEVICE=ch_p4</i> in 
-the mpirun script) and SLURM version 1.2.11 or newer, 
-then it is recommended that you apply the patch in the SLURM 
-distribution's file <i>contribs/mpich1.slurm.patch</i>. 
-Follow directions within the file to rebuild MPICH. 
-Applications must be relinked with the new library.
-Initiate tasks using the 
-<span class="commandline">srun</span> command with the 
-<i>--mpi=mpich1_p4</i> option.</p>
-<pre>
-$ srun -n16 --mpi=mpich1_p4 a.out
-</pre>
-<p>Note that SLURM launches one task per node and the MPICH 
-library linked within your applications launches the other 
-tasks with shared memory used for communications between them.
-The only real anomaly is that all output from all spawned tasks
-on a node appear to SLURM as coming from the one task that it
-launched. If the srun --label option is used, the task ID labels
-will be misleading.</p>
- 
-<p>Other MPICH1 programming models current rely upon the SLURM 
-<span class="commandline">salloc</span> or 
-<span class="commandline">sbatch</span> command to allocate resources.
-In either case, specify the maximum number of tasks required for the job.
-You may then need to build a list of hosts to be used and use that 
-as an argument to the mpirun command. 
-For example:
-<pre>
-$ cat mpich.sh
-#!/bin/bash
-srun hostname -s | sort -u >slurm.hosts
-mpirun [options] -machinefile slurm.hosts a.out
-rm -f slurm.hosts
-$ sbatch -n16 mpich.sh
-sbatch: Submitted batch job 1234
-</pre>
-<p>Note that in this example, mpirun uses the rsh command to launch 
-tasks. These tasks are not managed by SLURM since they are launched 
-outside of its control.</p>
- 
-<p style="text-align:center;">Last modified 16 July 2008</p>
+<p style="text-align:center;">Last modified 26 February 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/quickstart_admin.shtml b/doc/html/quickstart_admin.shtml
index 08ff660f096d304840f06f5530ad4e61f97733de..588ac7a1163bca222806ffc02a7fe415a36c97e5 100644
--- a/doc/html/quickstart_admin.shtml
+++ b/doc/html/quickstart_admin.shtml
@@ -2,20 +2,24 @@
 
 <h1>Quick Start Administrator Guide</h1>
 <h2>Overview</h2>
-Please see the <a href="quickstart.html">Quick Start User Guide</a> for a general 
-overview. 
+Please see the <a href="quickstart.html">Quick Start User Guide</a> for a
+general overview. 
 
 <h2>Super Quick Start</h2>
 <ol>
 <li>Make sure that you have synchronized clocks plus consistent users and groups
-across the cluster.</li>
+(UIDs and GIDs) across the cluster.</li>
+<li>Install <a href="http://home/gna.org/munge">MUNGE</a> for 
+authentication. Make sure that all nodes in your cluster have the
+same <i>munge.key</i>. Make sure the MUNGE daemon, <i>munged</i>
+is started before you start the SLURM daemons.</li>
 <li>bunzip2 the distributed tar-ball and untar the files:<br>
 <i>tar --bzip -x -f slurm*tar.bz2</i></li>
 <li><i>cd</i> to the directory containing the SLURM source and type
 <i>./configure</i> with appropriate options, typically <i>--prefix=</i> 
 and <i>--sysconfdir=</i></li>
 <li>Type <i>make</i> to compile SLURM.</li>
-<li>Type <i>make install</i> to install the programs, documentation, libaries,
+<li>Type <i>make install</i> to install the programs, documentation, libraries,
 header files, etc.</li>
 <li>Build a configuration file using your favorite web browser and
 <i>doc/html/configurator.html</i>.<br>
@@ -27,22 +31,15 @@ They must be created and made writable by <i>SlurmUser</i> as needed prior to
 starting SLURM daemons.</li>
 <li>Install the configuration file in <i>&lt;sysconfdir&gt;/slurm.conf</i>.<br>
 NOTE: You will need to install this configuration file on all nodes of the cluster.</li>
-<li>Create OpenSSL keys:<br>
-<i>openssl genrsa -out &lt;sysconfdir&gt;/slurm.key 1024</i><br>
-<i>openssl rsa -in  &lt;sysconfdir&gt/slurm.key -pubout -out  &lt;sysconfdir&gt;/slurm.cert</i><br>
-NOTE: You will build the OpenSSL key files on one node and distribute <i>slurm.cert</i>
-to all of the nodes in the cluster. <i>slurm.key</i> must be readable only by
-<i>SlurmUser<i> and is only needed where the <i>slurmctld</i> (SLURM controller
-daemon) executes, typically just a couple of nodes.</li>
 <li>Start the <i>slurmctld</i> and <i>slurmd</i> daemons.</li>
 </ol>
-<p>NOTE: Items 2 through 5 can be replaced with</p>
+<p>NOTE: Items 3 through 6 can be replaced with</p>
 <ol>
 <li><i>rpmbuild -ta slurm*.tar.bz2</i></li>
 <li><i>rpm --install &lt;the rpm files&gt;</i></li>
 </ol>
 
-<h2>Building and Installing</h2>
+<h2>Building and Installing SLURM</h2>
 
 <p>Instructions to build and install SLURM manually are shown below. 
 See the README and INSTALL files in the source distribution for more details.
@@ -51,13 +48,14 @@ See the README and INSTALL files in the source distribution for more details.
 <li>bunzip2 the distributed tar-ball and untar the files:</br> 
 <i>tar --bzip -x -f slurm*tar.bz2</i>
 <li><i>cd</i> to the directory containing the SLURM source and type 
-<i>./configure</i> with appropriate options.</li>
+<i>./configure</i> with appropriate options (see below).</li>
 <li>Type <i>make</i> to compile SLURM.</li>
-<li>Type <i>make install</i> to install the programs, documentation, libaries, 
+<li>Type <i>make install</i> to install the programs, documentation, libraries, 
 header files, etc.</li>
 </ol>
-<p>The most commonly used arguments to the <span class="commandline">configure</span> 
-command include: </p>
+<p>A full list of <i>configure</i> options will be returned by the command
+<i>configure --help</i>. The most commonly used arguments to the 
+<i>configure</i> command include: </p>
 <p style="margin-left:.2in"><span class="commandline">--enable-debug</span><br>
 Enable additional debugging logic within SLURM.</p>
 <p style="margin-left:.2in"><span class="commandline">--prefix=<i>PREFIX</i></span><br>
@@ -69,20 +67,20 @@ Specify location of SLURM configuration file. The default value is PREFIX/etc</p
 
 <p>If required libraries or header files are in non-standard locations, 
 set CFLAGS and LDFLAGS environment variables accordingly.
-Type <i>configure --help</i> for a more complete description of options.
 Optional SLURM plugins will be built automatically when the
 <span class="commandline">configure</span> script detects that the required 
 build requirements are present. Build dependencies for various plugins
 and commands are denoted below.
 </p>
 <ul>
-<li> <b>Munge</b> The auth/munge plugin will be built if the Munge authentication
-                  library is installed. </li>
+<li> <b>MUNGE</b> The auth/munge plugin will be built if the MUNGE authentication
+                  library is installed. MUNGE is used as the default
+                  authentication mechanism.</li>
 <li> <b>Authd</b> The auth/authd plugin will be built and installed if 
                   the libauth library and its dependency libe are installed. 
 		  </li>
 <li> <b>Federation</b> The switch/federation plugin will be built and installed
-		  if the IBM Federation switch libary is installed.
+		  if the IBM Federation switch library is installed.
 <li> <b>QsNet</b> support in the form of the switch/elan plugin requires
                   that the qsnetlibs package (from Quadrics) be installed along
 		  with its development counterpart (i.e. the qsnetheaders
@@ -127,11 +125,11 @@ Some macro definitions that may be used in building SLURM include:
 <dt>slurm_sysconfdir
 <dd>Pathname of directory containing the slurm.conf configuration file
 <dt>with_munge
-<dd>Specifies munge (authentication library) installation location
+<dd>Specifies the MUNGE (authentication library) installation location
 <dt>with_proctrack
 <dd>Specifies AIX process tracking kernel extension header file location
 <dt>with_ssl
-<dd>Specifies SSL libary installation location
+<dd>Specifies SSL library installation location
 </dl>
 <p>To build SLURM on our AIX system, the following .rpmmacros file is used:
 <pre>
@@ -172,20 +170,26 @@ readable or writable by the user <b>SlurmUser</b> (the slurm configuration
 files must be readable; the log file directory and state save directory 
 must be writable).</p>
 
-<p>The <b>slurmd</b> daemon executes on every compute node. It resembles a remote 
-shell daemon to export control to SLURM. Because slurmd initiates and manages 
-user jobs, it must execute as the user root.</p>
+<p>The <b>slurmd</b> daemon executes on every compute node. It resembles a 
+remote shell daemon to export control to SLURM. Because slurmd initiates and 
+manages user jobs, it must execute as the user root.</p>
+
+<p>If you want to archive job accounting records to a database, the 
+<b>slurmdbd</b> (SLURM DataBase Daemon) should be used. We recommend that 
+you defer adding accounting support until after basic SLURM functionality has 
+established you your system. An <a href="accounting.html">Accounting</a> web 
+page contains more information.</p>
 
-<p><b>slurmctld</b> and/or <b>slurmd</b> should be initiated at node startup time 
-per the SLURM configuration.
+<p><b>slurmctld</b> and/or <b>slurmd</b> should be initiated at node startup 
+time per the SLURM configuration.
 A file <b>etc/init.d/slurm</b> is provided for this purpose. 
 This script accepts commands <b>start</b>, <b>startclean</b> (ignores 
 all saved state), <b>restart</b>, and <b>stop</b>.</p>
 
 <h2>Infrastructure</h2>
 <h3>User and Group Identification</h3>
-<p>There must be a uniform user and group name space across the 
-cluster. 
+<p>There must be a uniform user and group name space (including 
+UIDs and GIDs) across the cluster. 
 It is not necessary to permit user logins to the control hosts
 (<b>ControlMachine</b> or <b>BackupController</b>), but the 
 users and groups must be configured on those hosts.</p>
@@ -197,46 +201,67 @@ plugin chosen at runtame via the <b>AuthType</b> keyword in the SLURM
 configuration file.  Currently available authentication types include
 <a href="http://www.theether.org/authd/">authd</a>, 
 <a href="http://home.gna.org/munge/">munge</a>, and none.
-The default authentication infrastructure is "none". This permits any user to execute 
-any job as another user. This may be fine for testing purposes, but certainly not for production 
-use. <b>Configure some AuthType value other than "none" if you want any security.</b>
-We recommend the use of Munge unless you are experienced with authd.
-</p>
+The default authentication infrastructure is "munge", but this does 
+require the installation of the MUNGE package.
+An authentication type of "none" requires no infrastructure, but permits
+any user to execute any job as another user with limited programming effort. 
+This may be fine for testing purposes, but certainly not for production use. 
+<b>Configure some AuthType value other than "none" if you want any security.</b>
+We recommend the use of MUNGE unless you are experienced with authd.
+If using MUNGE, all nodes in the cluster must be configured with the
+same <i>munge.key</i> file. 
+The MUNGE daemon, <i>munged</i>, must also be started before SLURM daemons.</p>
+
 <p>While SLURM itself does not rely upon synchronized clocks on all nodes
 of a cluster for proper operation, its underlying authentication mechanism 
-may have this requirement. For instance, if SLURM is making use of the
-auth/munge plugin for communication, the clocks on all nodes will need to 
-be synchronized. </p>
+do have this requirement.</p>
 
 <h3>MPI support</h3>
 <p>SLURM supports many different SLURM implementations. 
 For more information, see <a href="quickstart.html#mpi">MPI</a>.
 
 <h3>Scheduler support</h3>
-<p>The scheduler used by SLURM is controlled by the <b>SchedType</b> configuration 
-parameter. This is meant to control the relative importance of pending jobs and 
-several options are available
-SLURM's default scheduler is <u>FIFO (First-In First-Out)</u>. 
-SLURM offers a backfill scheduling plugin.
-<u>Backfill scheduling</u> will initiate a lower-priority jobs
-if doing so does not delay the expected initiation time of higher priority jobs; 
-essentially using smaller jobs to fill holes in the resource allocation plan. 
-Effective backfill scheduling does require users to specify job time limits.
-SLURM offers a <u>gang scheduler</u>, which time-slices jobs in the same partition/queue
-and can be used to preempt jobs from lower-priority queues in order to execute
-jobs in higher priority queues.
-SLURM also supports a plugin for use of 
+<p>SLURM can be configured with rather simple or quite sophisticated
+scheduling algorithms depending upon your needs and willingness to 
+manage the configuration (much of which requires a database).
+The first configuration parameter of interest is <b>PriorityType</b>
+with two options available: <i>basic</i> (first-in-first-out) and
+<i>multifactor</i>. 
+The <i>multifactor</i> plugin will assign a priority to jobs based upon
+a multitude of configuration parameters (age, size, fair-share allocation, 
+etc.) and its details are beyond the scope of this document.
+See the <a href="priority_multifactor.html">Multifactor Job Priority Plugin</a>
+document for details.</p>
+
+<p>The <b>SchedType</b> configuration parameter controls how queued
+jobs are scheduled and several options are available.
+<ul>
+<li><i>builtin</i>  will initiate jobs strictly in their priority order,
+typically (first-in-first-out) </li>
+<li><i>backfill</i> will initiate a lower-priority job if doing so does 
+not delay the expected initiation time of higher priority jobs; essentially 
+using smaller jobs to fill holes in the resource allocation plan. Effective 
+backfill scheduling does require users to specify job time limits.</li>
+<li><i>gang</i> time-slices jobs in the same partition/queue and can be 
+used to preempt jobs from lower-priority queues in order to execute
+jobs in higher priority queues.</li>
+<li><i>wiki</i> is an interface for use with
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
-The Maui Scheduler</a> or 
+The Maui Scheduler</a></li>
+<li><i>wiki2</i> is an interface for use with the
 <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php">
-Moab Cluster Suite</a> which offer sophisticated scheduling algorithms.
-For more information about these options see
+Moab Cluster Suite</a>
+</ul>
+
+<p>For more information about scheduling options see
 <a href="gang_scheduling.html">Gang Scheduling</a>,
-<a href="preempt.html">Preemption</a> and
+<a href="preempt.html">Preemption</a>,
+<a href="reservations.html">Resource Reservation Guide</a>,
+<a href="resource_limits.html">Resource Limits</a> and
 <a href="cons_res_share.html">Sharing Consumable Resources</a>.</p> 
 
-<h3>Node selection</h3>
-<p>The node selection mechanism used by SLURM is controlled by the 
+<h3>Resource selection</h3>
+<p>The resource selection mechanism used by SLURM is controlled by the 
 <b>SelectType</b> configuration parameter. 
 If you want to execute multiple jobs per node, but apportion the processors, 
 memory and other resources, the <i>cons_res</i> (consumable resources) 
@@ -273,7 +298,7 @@ This is meant to be exploited by any parallel debugger (notably, TotalView),
 and support is unconditionally compiled into SLURM code. 
 </p>
 <p>We use a patched version of TotalView that looks for a "totalview_jobid" 
-symbol in <b>srun</b> that it then uses (configurably) to perform a bulk 
+symbol in <b>srun</b> that it then uses (per configuration) to perform a bulk 
 launch of the <b>tvdsvr</b> daemons via a subsequent <b>srun</b>. Otherwise
 it is difficult to get TotalView to use <b>srun</b> for a bulk launch, since 
 <b>srun</b> will be unable to determine for which job it is launching tasks.
@@ -411,22 +436,20 @@ PartitionName=pbatch Nodes=mcr[192-1151]
 </pre>
  
 <h2>Security</h2>
-<p>The use of <a href="http://www.openssl.org/">OpenSSL</a> is 
-recommended to provide a digital signature on job step credentials.
-<a href="http://home.gna.org/munge/">Munge</a> can alternately
-be used with somewhat slower performance.
+<p>Besides authentication of SLURM communications based upon the value
+of the <b>AuthType</b>, digital signatures are used in job step 
+credentials. 
 This signature is used by <i>slurmctld</i> to construct a job step
 credential, which is sent to <i>srun</i> and then forwarded to
 <i>slurmd</i> to initiate job steps.
 This design offers improved performance by removing much of the 
 job step initiation overhead from the <i> slurmctld </i> daemon.
-The mechanism to be used is controlled through the <b>CryptoType</b>
-configuration parameter (newly added in SLURM version 1.3, 
-earlier versions always use OpenSSL).</p>
+The digital signature mechanism is specified by the <b>CryptoType</b> 
+configuration parameter and the default mechanism is MUNGE. </p>
 
 <h3>OpenSSL</h3>
-<p>If using OpenSSL digital signatures, unique job credential keys 
-must be created for your site using the program 
+<p>If using <a href="http://www.openssl.org/">OpenSSL</a> digital signatures, 
+unique job credential keys must be created for your site using the program 
 <a href="http://www.openssl.org/">openssl</a>. 
 <b>You must use openssl and not ssh-genkey to construct these keys.</b>
 An example of how to do this is shown below. Specify file names that 
@@ -447,22 +470,23 @@ credential, which is sent to <i>srun</i> and then forwarded to
 <i>openssl rsa -in &lt;sysconfdir&gt;/slurm.key -pubout -out  &lt;sysconfdir&gt;/slurm.cert</i> 
 </p>
 
-<h3>Munge</h3>
-<p>If using Munge digital signatures, no SLURM keys are required.
-This will be address in the installation and configuration of Munge.</p>
+<h3>MUNGE</h3>
+<p>If using MUNGE digital signatures, no SLURM keys are required.
+This will be address in the installation and configuration of MUNGE.</p>
 
 <h3>Authentication</h3>
 <p>Authentication of communications (identifying who generated a particular
 message) between SLURM components can use a different security mechanism 
 that is configurable.  
-You must specify one &quot;auth&quot; plugin for this purpose (<b>AuthType</b>. 
+You must specify one &quot;auth&quot; plugin for this purpose using the
+<b>AuthType</b> configuration parameter. 
 Currently, only three authentication plugins are supported: 
 <b>auth/none</b>, <b>auth/authd</b>, and <b>auth/munge</b>. 
-The auth/none plugin is built and used by default, but either 
+The auth/none plugin is built by default, but either 
 Brent Chun's <a href="http://www.theether.org/authd/">authd</a>, 
-or LLNL's <a href="http://home.gna.org/munge/">munge</a> 
+or LLNL's <a href="http://home.gna.org/munge/">MUNGE</a> 
 should be installed in order to get properly authenticated communications. 
-Unless you are experience with authd, we recommend the use of munge.
+Unless you are experience with authd, we recommend the use of MUNGE.
 The configure script in the top-level directory of this distribution will 
 determine which authentication plugins may be built. 
 The configuration file specifies which of the available plugins will be utilized. </p>
@@ -613,9 +637,10 @@ SLURM and restart the daemons.
 An exception to this is that jobs may be lost when installing new pre-release 
 versions (e.g. 1.3.0-pre1 to 1.3.0-pre2). We'll try to note these cases 
 in the NEWS file.
+Contents of major releases are also described in the RELEASE_NOTES file.
 
 </pre> <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 5 June 2008</p>
+<p style="text-align:center;">Last modified 7 January 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/reservations.shtml b/doc/html/reservations.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..14c1cb6a52e2292513e7ce4becfbf5ce49ff66c2
--- /dev/null
+++ b/doc/html/reservations.shtml
@@ -0,0 +1,165 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Resource Reservation Gude</h1>
+
+<p>SLURM verstion 2.0 has the ability to reserve resources for jobs
+being executed by select users and/or select bank accounts.
+A resource reservation identifies the nodes of a resource reservation 
+and a time period during which the reservation is available.
+Note that resource reservations are not compatable with SLURM's
+gang scheduler plugin since the termination time of running jobs
+is not possible to accurately predict.</p>
+
+<p>Reservations can be created, updated, or destroyed only by user root
+or the configured <i>SlurmUser</i> using the <i>scontrol</i> command. 
+The <i>scontrol</i>, <i>smap</i> and <i>sview</i> commands can be used
+to view reservations. 
+The man pages for the various commands contain details.</p>
+
+<p>Note for users of Maui or Moab schedulers: <br>
+Maui and Moab are not integrated with SLURM's resource reservation system,
+but should use their own advanced reservation system.</p>
+
+<h2>Reservation Creation</h2>
+
+<p>One common mode of operation for a reservation would be to reserve
+an entire computer at a particular time for a system down time.
+The example below shows the creation of a full-system reservation 
+at 16:00 hours on 6 February and lasting for 120 minutes. 
+The "maint" flag is used to identify the reservation for accounting 
+purposes as system maintenance. As the reservation time approaches, 
+only jobs that can complete by the reservation time will be 
+initiated.</p>
+<pre>
+$ scontrol create reservation starttime=2009-02-06T16:00:00 \
+   duration=120 user=root flags=maint nodes=ALL
+Reservation created: root_4
+
+$ scontrol show reservation
+ReservationName=root_4 StartTime=2009-02-06T16:00:00 
+   EndTime=2009-02-06T18:00:00 Duration=120
+   Nodes=ALL NodeCnt=20 Features=(null) PartitionName=(null)
+   Flags=MAINT,SPEC_NODES Users=root Accounts=(null)
+</pre>
+
+<p>Another mode of operation would be to reserve specific nodes
+for an indefinite period in order to study problems on those 
+nodes. This could also be accomplished using a SLURM partition
+specifically for this purpose, but that would fail to capture
+the maintenance nature of their use.</p>
+<pre>
+$ scontrol create reservation user=root starttime=now \
+   duration=infinite flags=maint nodes=sun000
+Reservation created: root_5
+
+$ scontrol show res
+ReservationName=root_5 StartTime=2009-02-04T16:22:57 
+   EndTime=2009-02-04T16:21:57 Duration=4294967295
+   Nodes=sun000 NodeCnt=1 Features=(null) PartitionName=(null)
+   Flags=MAINT,SPEC_NODES Users=root Accounts=(null)
+</pre>
+
+<p>Our final example is to reserve ten nodes in the default 
+SLURM partition starting at noon and with a duration of 60 
+minutes occuring daily. The reservation will be available
+only to users alan and brenda.</p>
+<pre>
+$ scontrol create reservation user=alan,brenda \
+   starttime=noon duration=60 flags=daily nodecnt=10
+Reservation created: alan_6
+
+$ scontrol show res
+ReservationName=alan_6 StartTime=2009-02-05T12:00:00 
+   EndTime=2009-02-05T13:00:00 Duration=60
+   Nodes=sun[000-003,007,010-013,017] NodeCnt=10 
+   Features=(null) PartitionName=pdebug Flags=DAILY
+   Users=alan,brenda Accounts=(null)
+</pre>
+
+<p>Note that specific nodes to be associated with the reservation are
+made immediately after creation of the reservation. This permits
+users to stage files to the nodes in preparation for use during the
+reservation. Note that the reservation creation request can also 
+identify the partition from which to select the nodes or _one_
+feature that every selected node must contain.</p>
+
+<p>Reservations must not overlap, with the exception of 
+maintenance mode as described below. They must either include 
+different nodes or operate at different times. If specific nodes
+are not specified when a reservation is created, SLURM will 
+automatically select nodes to avoid overlap and insure that
+the selected nodes are available when the reservation begins.
+For ease of system maintenance, you can create a reservation
+with the "maint" flag that overlaps existing reservations.
+This permits an administrator to easily create a maintenance 
+reservation for an entire cluster without needing to remove 
+or reschedule pre-existing reservations.</p>
+
+<h2>Reservation Use</h2>
+
+<p>The reservation create response includes the reservation's name.
+This name is automatically generated by SLURM based upon the first
+user or account name and a numeric suffix. In order to use the 
+reservation, the job submit request must explicitly specify that
+reservation name. The job must be contained completely within the
+named reservation. The job will be cancelled after the reservation 
+reaches its EndTime. If letting the job continue execution after
+the reservation EndTime, a configuration option <i>ResvOverRun</i>
+can be set to control how long the job can continue execution.</p>
+<pre>
+$ sbatch --reservation=alan_6 -N4 my.script
+sbatch: Submitted batch job 65540
+</pre>
+
+<h2>Reservation Modification</h2>
+
+<p>Reservations can be modified by user root as desired. 
+For example their duration could be altered or the users
+granted access changed as shown below:</p>
+<pre>
+$ scontrol update ReservationName=root_4 \
+   duration=150 users=admin
+Reservation updated.
+
+bash-3.00$ scontrol show ReservationName=root_4
+ReservationName=root_4 StartTime=2009-02-06T16:00:00 
+   EndTime=2009-02-06T18:30:00 Duration=150
+   Nodes=ALL NodeCnt=20 Features=(null) 
+   PartitionName=(null) Flags=MAINT,SPEC_NODES
+   Users=jette Accounts=(null)
+</pre>
+
+<h2>Reservation Deletion</h2>
+
+<p>Reservations are automatically purged after their end time. 
+They may also be manually deleted as shown below. 
+Note that a reservation can not be deleted while there are
+jobs running in it.</p>
+<pre>
+$ scontrol delete ReservationName=alan_6
+</pre>
+
+<h2>Reservation Accounting</h2>
+
+<p>Jobs executed within a reservation are accounted for using the appropriate
+user and bank account. If resources within a reservation are not used, those 
+resources will be accounted for as being used by all users or bank accounts
+associated with the reservation on an equal basis (e.g. if two users are
+eligible to use a reservation and neither does, each user will be reported 
+to have used half of the reserved resources).</p>
+
+<h2>Future Work</h2>
+
+<p>Several enhancements are anticipated at some point in the future.
+<ol>
+<li>The automatic selection of nodes for a reservation create request may be
+sub-optimal in terms of locality (for optimized application communication).</li>
+<li> The feature specification in the reservation creation request should be 
+made more flexible to accept multiple features with AND or OR operators.</li> 
+</ol>
+
+
+<p style="text-align: center;">Last modified 2 June 2009</p>
+
+<!--#include virtual="footer.txt"-->
+
diff --git a/doc/html/resource_limits.shtml b/doc/html/resource_limits.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..afcb619afe1d367054ffa93b09945970f0a9f493
--- /dev/null
+++ b/doc/html/resource_limits.shtml
@@ -0,0 +1,223 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Resource Limits</h1>
+
+<p>SLURM scheduling policy support was significantly changed
+in version 2.0 in order to take advantage of the database 
+integration used for storing accounting information.
+This document describes the capabilities available in 
+SLURM version 2.0.
+New features are under active development. 
+Familiarity with SLURM's <a href="accounting">Accounting</a> web page
+is strongly recommended before use of this document.</p>
+
+<p>Note for users of Maui or Moab schedulers: <br>
+Maui and Moab are not integrated with SLURM's resource limits,
+but should use their own resource limits mechanisms.</p>
+
+<h2>Configuration</h2>
+
+<p>Scheduling policy information must be stored in a database 
+as specified by the <b>AccountingStorageType</b> configuration parameter
+in the <b>slurm.conf</b> configuration file.
+Information can be recorded in either <a href="http://www.mysql.com/">MySQL</a> 
+or <a href="http://www.postgresql.org/">PostgreSQL</a>.
+For security and performance reasons, the use of 
+SlurmDBD (SLURM Database Daemon) as a front-end to the 
+database is strongly recommended. 
+SlurmDBD uses a SLURM authentication plugin (e.g. Munge).
+SlurmDBD also uses an existing SLURM accounting storage plugin
+to maximize code reuse.
+SlurmDBD uses data caching and prioritization of pending requests
+in order to optimize performance.
+While SlurmDBD relies upon existing SLURM plugins for authentication 
+and database use, the other SLURM commands and daemons are not required 
+on the host where SlurmDBD is installed. 
+Only the <i>slurmdbd</i> and <i>slurm-plugins</i> RPMs are required
+for SlurmDBD execution.</p>
+
+<p>Both accounting and scheduling policy are configured based upon
+an <i>association</i>. An <i>association</i> is a 4-tuple consisting 
+of the cluster name, bank account, user and (optionally) the SLURM 
+partition.
+In order to enforce scheduling policy, set the value of 
+<b>AccountingStorageEnforce</b>:
+This option contains a comma separated list of options you may want to
+ enforce.  The valid options are 
+<ul>
+<li>associations - This will prevent users from running jobs if
+their <i>association</i> is not in the database. This option will
+prevent users from accessing invalid accounts.  
+</li>
+<li>limits - This will enforce limits set to associations.  By setting
+  this option, the 'associations' option is also set.
+</li>
+<li>wckeys - This will prevent users from running jobs under a wckey
+  that they don't have access to.  By using this option, the
+  'associations' option is also set.  The 'TrackWCKey' option is also
+  set to true.
+</li>
+</ul>
+(NOTE: The association is a combination of cluster, account, 
+user names and optional partition name.)
+<br>
+Without AccountingStorageEnforce being set (the default behavior) 
+jobs will be executed based upon policies configured in SLURM on each
+cluster.
+<br> 
+It is advisable to run without the option 'limits' set when running a
+scheduler on top of SLURM, like Moab, that does not update in real
+time their limits per association.</li>
+</p>
+
+<h2>Tools</h2>
+
+<p>The tool used to manage accounting policy is <i>sacctmgr</i>.
+It can be used to create and delete cluster, user, bank account, 
+and partition records plus their combined <i>association</i> record.
+See <i>man sacctmgr</i> for details on this tools and examples of 
+its use.</p>
+
+<p>A web interface with graphical output is currently under development.</p>
+
+<p>Changes made to the scheduling policy are uploaded to 
+the SLURM control daemons on the various clusters and take effect 
+immediately. When an association is delete, all jobs running or 
+pending which belong to that association are immediately canceled.
+When limits are lowered, running jobs will not be canceled to 
+satisfy the new limits, but the new lower limits will be enforced.</p>
+
+<h2>Policies supported</h2>
+
+<p> A limited subset of scheduling policy options are currently 
+supported. 
+The available options are expected to increase as development 
+continues. 
+Most of these scheduling policy options are available not only 
+for a user association, but also for each cluster and account. 
+If a new association is created for some user and some scheduling 
+policy options is not specified, the default will be the option 
+for the cluster plus account pair and if that is not specified 
+then the cluster and if that is not specified then no limit 
+will apply.</p>
+
+<p>Currently available scheduling policy options:</p>
+<ul>
+<li><b>Fairshare=</b> Used for determining priority.  Essentially
+  this is the amount of claim this association and it's children have
+  to the above system.</li>
+</li>
+
+<!-- For future use
+<li><b>GrpCPUMins=</b> A hard limit of cpu minutes to be used by jobs
+  running from this association and its children.  If this limit is
+  reached all jobs running in this group will be killed, and no new
+  jobs will be allowed to run.
+</li>
+-->
+
+<!-- For future use
+<li><b>GrpCPUs=</b> The total count of cpus able to be used at any given
+  time from jobs running from this association and its children.  If
+  this limit is reached new jobs will be queued but only allowed to
+  run after resources have been relinquished from this group.
+</li>
+-->
+
+<li><b>GrpJobs=</b> The total number of jobs able to run at any given
+  time from this association and its children.  If
+  this limit is reached new jobs will be queued but only allowed to
+  run after previous jobs complete from this group.
+</li>
+
+<li><b>GrpNodes=</b> The total count of nodes able to be used at any given
+  time from jobs running from this association and its children.  If
+  this limit is reached new jobs will be queued but only allowed to
+  run after resources have been relinquished from this group.
+</li>
+
+<li><b>GrpSubmitJobs=</b> The total number of jobs able to be submitted
+  to the system at any given time from this association and its children.  If
+  this limit is reached new submission requests will be denied until
+  previous jobs complete from this group.
+</li>
+
+<li><b>GrpWall=</b> The maximum wall clock time any job submitted to
+  this group can run for.  If this limit is reached submission requests
+  will be denied. 
+</li>
+
+<!-- For future use
+<li><b>MaxCPUMinsPerJob=</b> A limit of cpu minutes to be used by jobs
+  running from this association.  If this limit is
+  reached the job will be killed will be allowed to run.
+</li>
+-->
+
+<!-- For future use
+<li><b>MaxCPUsPerJob=</b> The maximum size in cpus any given job can
+  have from this association.  If this limit is reached the job will
+  be denied at submission.
+</li>
+-->
+
+<li><b>MaxJobs=</b> The total number of jobs able to run at any given
+  time from this association.  If this limit is reached new jobs will
+  be queued but only allowed to run after previous jobs complete from
+  this association.
+</li>
+
+<li><b>MaxNodesPerJob=</b> The maximum size in nodes any given job can
+  have from this association.  If this limit is reached the job will
+  be denied at submission.
+</li>
+ 
+<li><b>MaxSubmitJobs=</b> The maximum number of jobs able to be submitted
+  to the system at any given time from this association.  If
+  this limit is reached new submission requests will be denied until
+  previous jobs complete from this association.
+</li>
+
+<li><b>MaxWallDurationPerJob=</b> The maximum wall clock time any job
+  submitted to this association can run for.  If this limit is reached
+  the job will be denied at submission.
+</li>
+
+<li><b>QOS=</b> comma separated list of QOS's this association is
+  able to run.
+</li>
+</ul>
+
+<p>The <b>MaxNodes</b> and <b>MaxWall</b> options already exist in 
+SLURM's configuration on a per-partition basis, but these options 
+provide the ability to establish limits on a per-user basis.
+The <b>MaxJobs</b> option provides an entirely new mechanism 
+for SLURM to control the workload any individual may place on
+a cluster in order to achieve some balance between users.</p>
+
+<p>Fair-share scheduling based upon the hierarchical bank account
+data is already maintained in the SLURM database.  This information
+  can now be leveraged from
+  the <a href="priority_multifactor.html">priority/multifactor</a> plugin.
+<p>
+The priorities of pending jobs will be adjusted in order to 
+deliver resources in proportion to each association's fair-share.
+<br>
+Consider the trivial example of a single bank account with 
+two users named Alice and Brian. 
+<br>
+We might allocate Alice 60 percent of the resources and Brian the 
+remaining 40 percent.
+<br>
+If Alice has actually used 80 percent of available resources in the 
+recent past, then Brian's pending jobs will automatically be given a
+higher priority than Alice's in order to deliver resources in 
+proportion to the fair-share target. 
+<br>
+The time window considered in fair-share scheduling will be configurable 
+as well as the relative importance of job age (time waiting to run), 
+but this this example illustrates the concepts involved.</p>
+
+<p style="text-align: center;">Last modified 2 March 2009</p>
+
+</ul></body></html>
diff --git a/doc/html/review_release.html b/doc/html/review_release.html
index 607be0949bb4b547b983aef97d2073fd21d5e3d6..3ddf7f4755512ce14f68e424f6e69c65f315a7c0 100644
--- a/doc/html/review_release.html
+++ b/doc/html/review_release.html
@@ -2,6 +2,7 @@
 
 <head>
 <title>SLURM Web pages for Review and Release</title>
+<!-- Updated 6 March 2009 -->
 </head>
 
 <body>
@@ -14,10 +15,13 @@
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/authplugins.html">authplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/big_sys.html">big_sys.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/bluegene.html">bluegene.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/checkpoint_blcr.html">checkpoint_blcr.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/checkpoint_plugins.html">checkpoint_plugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/configurator.html">configurator.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/cons_res.html">cons_res.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/cons_res_share.html">cons_res_share.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/cray.html">cray.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/crypto_plugins.html">crypto_plugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/dist_plane.html">dist_plane.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/documentation.html">documentation.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/download.html">download.html</a></li>
@@ -31,24 +35,33 @@
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/maui.html">maui.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/mc_support.html">mc_support.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/moab.html">moab.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/mpi_guide.html">mpi_guide.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/mpiplugins.html">mpiplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/news.html">news.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/overview.html">overview.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/platforms.html">platforms.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/plugins.html">plugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/power_save.html">power_save.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/preempt.html">preempt.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/priority_multifactor.html">priority_multifactor.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/priority_plugins.html">priority_plugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/proctrack_plugins.html">proctrack_plugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/programmer_guide.html">programmer_guide.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/publications.html">publications.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/quickstart_admin.html">quickstart_admin.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/quickstart.html">quickstart.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/reservations.html">reservations.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/resource_limits.html">resource_limits.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/schedplugins.html">schedplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/selectplugins.html">selectplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/slurm.html">slurm.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/sun_const.html">sun_const.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/switchplugins.html">switchplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/taskplugins.html">taskplugins.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/team.html">team.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/testimonials.html">testimonials.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/topology.html">topology.html</a></li>
+<li><a href="https://computing-pre.llnl.gov/linux/slurm/topology_plugin.html">topology_plugin.html</a></li>
 <li><a href="https://computing-pre.llnl.gov/linux/slurm/troubleshoot.html">troubleshoot.html</a></li>
 </ul>
 </body>
diff --git a/doc/html/sched_policy.shtml b/doc/html/sched_policy.shtml
deleted file mode 100644
index 53aed3fd9dd91bfcad87b8ec11bfb1bb76c319a4..0000000000000000000000000000000000000000
--- a/doc/html/sched_policy.shtml
+++ /dev/null
@@ -1,116 +0,0 @@
-<!--#include virtual="header.txt"-->
-
-<h1>Sheduling Policy</h1>
-
-<p>SLURM scheduling policy support was significantly changed
-in version 1.3 in order to take advantage of the database 
-integration used for storing accounting information.
-This document describes the capabilities available in 
-SLURM version 1.3.4.
-New features are under active development. 
-Familiarity with SLURM's <a href="accounting">Accounting</a> web page
-is strongly recommended before use of this document.</p>
-
-<h2>Configuration</h2>
-
-<p>Scheduling policy information must be stored in a database 
-as specified by the <b>AccountingStorageType</b> configuration parameter
-in the <b>slurm.conf</b> configuration file.
-Information can be recorded in either <a href="http://www.mysql.com/">MySQL</a> 
-or <a href="http://www.postgresql.org/">PostgreSQL</a>.
-For security and performance reasons, the use of 
-SlurmDBD (SLURM Database Daemon) as a front-end to the 
-database is strongly recommended. 
-SlurmDBD uses a SLURM authentication plugin (e.g. Munge).
-SlurmDBD also uses an existing SLURM accounting storage plugin
-to maximize code reuse.
-SlurmDBD uses data caching and prioritization of pending requests
-in order to optimize performance.
-While SlurmDBD relies upon existing SLURM plugins for authentication 
-and database use, the other SLURM commands and daemons are not required 
-on the host where SlurmDBD is installed. 
-Only the <i>slurmdbd</i> and <i>slurm-plugins</i> RPMs are required
-for SlurmDBD execution.</p>
-
-<p>Both accounting and scheduling policy are configured based upon
-an <i>association</i>. An <i>association</i> is a 4-tuple consisting 
-of the cluster name, bank account, user and (optionally) the SLURM 
-partition.
-In order to enforce scheduling policy, set the value of 
-<b>AccountingStorageEnforce</b> to "1" in <b>slurm.conf</b>.
-This prevents users from running any jobs without an valid
-<i>association</i> record in the database and enforces scheduling 
-policy limits that have been configured.  
-In order to enforce association limits along with scheduling policy,
-set the value of <b>AccountingStorageEnforce</b> to "2"
-in <b>slurm.conf</b>.  When set to "1" association limits will not be
-enforced.  It is a good idea to run in this mode when running a
-scheduler on top of slurm, like Moab, that does not update in real
-time their limits per association.
-</p>
-
-<h2>Tools</h2>
-
-<p>The tool used to manage accounting policy is <i>sacctmgr</i>.
-It can be used to create and delete cluster, user, bank account, 
-and partition records plus their combined <i>association</i> record.
-See <i>man sacctmgr</i> for details on this tools and examples of 
-its use.</p>
-
-<p>A web interface with graphical output is currently under development.</p>
-
-<p>Changes made to the scheduling policy are uploaded to 
-the SLURM control daemons on the various clusters and take effect 
-immediately. When an association is delete, all jobs running or 
-pending which belong to that association are immediately cancelled.
-When limits are lowered, running jobs will not be cancelled to 
-satisfy the new limits, but the new lower limits will be enforced.</p>
-
-<h2>Policies supported</h2>
-
-<p> A limited subset of scheduling policy options are currently 
-supported. 
-The available options are expected to increase as development 
-continues. 
-Most of these scheduling policy options are available not only 
-for an association, but also for each cluster and account. 
-If a new association is created for some user and some scheduling 
-policy options is not specified, the default will be the option 
-for the cluster plus account pair and if that is not specified 
-then the cluster and if that is not specified then no limit 
-will apply.</p>
-
-<p>Currently available cheduling policy options:</p>
-<ul>
-<li><b>MaxJobs</b> Maxiumum number of running jobs for this association</li>
-<li><b>MaxNodes</b> Maxiumum number of nodes for any single jobs in this association</li>
-<li><b>MaxWall</b> Maxiumum wall clock time limit for any single jobs in this association</li>
-</ul>
-
-<p>The <b>MaxNodes</b> and <b>MaxWall</b> options already exist in 
-SLURM's configuration on a per-partition basis, but these options 
-provide the ability to establish limits on a per-user basis.
-The <b>MaxJobs</b> option provides an entirely new mechanism 
-for SLURM to control the workload any individual may place on
-a cluster in order to achieve some balance between users.</p>
-
-<p>The next scheduling policy expected to be added is the concept 
-of fair-share scheduling based upon the hierarchical bank account
-data is already maintained in the SLURM database. 
-The priorities of pending jobs will be adjusted in order to 
-deliver resources in proportion to each association's fair-share.
-Consider the trivial example of a single bank account with 
-two users named Alice and Brian. 
-We might allocate Alice 60 percent of the resources and Brian the 
-remaining 40 precent.
-If Alice has actually used 80 percent of available resources in the 
-recent past, then Brian's pending jobs will automatically be given a
-higher priority than Alice's in order to deliver resources in 
-proportion to the fair-share target. 
-The time window considered in fair-share scheduling will be configurable 
-as well as the relative importance of job age (time waiting to run), 
-but this this example illustrates the concepts involved.</p>
-
-<p style="text-align: center;">Last modified 24 June 2008</p>
-
-</ul></body></html>
diff --git a/doc/html/selectplugins.shtml b/doc/html/selectplugins.shtml
index 7ef6a4088b83364dc577c7d5046ddd86c6b1956f..6796452ef2e5952b9456e2c7b86792322b1b50a4 100644
--- a/doc/html/selectplugins.shtml
+++ b/doc/html/selectplugins.shtml
@@ -1,11 +1,11 @@
 <!--#include virtual="header.txt"-->
 
-<h1><a name="top">SLURM Node Selection Plugin API</a></h1>
+<h1><a name="top">Resource Selection Plugin Programmer Guide</a></h1>
 
-<h2> Overview</h2>
-<p> This document describes SLURM node selection plugins and the API that defines 
+<h2>Overview</h2>
+<p>This document describes SLURM resource selection plugins and the API that defines 
 them. It is intended as a resource to programmers wishing to write their own SLURM 
-node selection plugins. This is version 0 of the API.</p>
+node selection plugins. This is version 100 of the API.</p>
 
 <p>SLURM node selection plugins are SLURM plugins that implement the SLURM node selection
 API described herein. They are intended to provide a mechanism for both selecting 
@@ -19,7 +19,7 @@ abbreviation for the type of node selection algorithm. We recommend, for example
 <li><b>linear</b>&#151;A plugin that selects nodes assuming a one-dimensional 
 array of nodes. The nodes are selected so as to minimize the number of consecutive 
 sets of nodes utilizing a best-fit algorithm. While supporting shared nodes, 
-this plugin does not allocate individual processors, memory, etc. 
+this plugin does not allocate individual processors, but can allocate memory to jobs. 
 This plugin is recommended for systems without shared nodes.</li>
 <li><b>cons_res</b>&#151;A plugin that can allocate individual processors, 
 memory, etc. within nodes. This plugin is recommended for systems with 
@@ -36,51 +36,48 @@ Note carefully, however, the versioning discussion below.</p>
 
 <p>A simplified flow of logic follows:
 <pre>
-slurmctld daemon starts
-if (<i>select_p_state_restore)</i>() != SLURM_SUCCESS)
+/* slurmctld daemon starts, recover state */
+if ((<i>select_p_node_init)</i>()     != SLURM_SUCCESS) ||
+    (<i>select_p_block_init)</i>()    != SLURM_SUCCESS) ||
+    (<i>select_p_state_restore)</i>() != SLURM_SUCCESS) ||
+    (<i>select_p_job_init)</i>()      != SLURM_SUCCESS))
    abort
 
-slurmctld reads the rest of its configuration and state information
-if (<i>select_p_node_init</i>() != SLURM_SUCCESS)
-   abort
-if (<i>select_p_block_init</i>() != SLURM_SUCCESS)
-   abort
-
-wait for job 
+/* wait for job arrival */
 if (<i>select_p_job_test</i>(all available nodes) != SLURM_SUCCESS) {
    if (<i>select_p_job_test</i>(all configured nodes) != SLURM_SUCCESS)
-      reject the job and tell the user it can never run
+      /* reject the job and tell the user it can never run */
    else
-      leave the job queued for later execution
+      /* leave the job queued for later execution */
 } else {
-   update job's node list and node bitmap
+   /* update job's node list and node bitmap */
    if (<i>select_p_job_begin</i>() != SLURM_SUCCESS)
-      leave the job queued for later execution
+      /* leave the job queued for later execution */
    else {
       while (!<i>select_p_job_ready</i>())
         wait
-      execute the job
-      wait for job to end or be terminated
+      /* execute the job */
+      /* wait for job to end or be terminated */
       <i>select_p_job_fini</i>()
     }
 }
 
-wait for slurmctld shutdown request
+/* wait for slurmctld shutdown request */
 <i>select_p_state_save</i>()
 </pre>
 <p>Depending upon failure modes, it is possible that 
 <span class="commandline">select_p_state_save()</span>
-will not be called at slurmctld terminatation.
+will not be called at slurmctld termination.
 When slurmctld is restarted, other function calls may be replayed.
 <span class="commandline">select_p_node_init()</span> may be used 
-to syncronize the plugin's state with that of slurmctld.</p>
+to synchronize the plugin's state with that of slurmctld.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
 <p> These functions are expected to read and/or modify data structures directly in 
 the slurmctld daemon's memory. Slurmctld is a multi-threaded program with independent 
-read and write locks on each data structure type. Thererfore the type of operations 
+read and write locks on each data structure type. Therefore the type of operations 
 permitted on various data structures is identified for each function.</p>
 
 <p>These functions make use of bitmaps corresponding to the nodes in a table. 
@@ -95,7 +92,8 @@ manipulations (these functions are directly accessible from the plugin).</p>
 <p>The following functions must appear. Functions which are not implemented should 
 be stubbed.</p>
 
-<h3>Global Node Selection Functions</h3>
+<h3>State Save Functions</h3>
+
 <p class="commandline">int select_p_state_save (char *dir_name);</p>
 <p style="margin-left:.2in"><b>Description</b>: Save any global node selection state 
 information to a file within the specified directory. The actual file name used is plugin specific. 
@@ -118,15 +116,19 @@ from which user SlurmUser (as defined in slurm.conf) can read. Cannot be NULL.</
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
 the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 
+<p class="footer"><a href="#top">top</a></p>
+
+<h3>State Initialization Functions</h3>
+
 <p class="commandline">int select_p_node_init (struct node_record *node_ptr, int node_cnt);</p>
 <p style="margin-left:.2in"><b>Description</b>: Note the initialization of the node record data 
 structure. This function is called when the node records are initially established and again 
 when any nodes are added to or removed from the data structure. </p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> node_ptr</span>&nbsp;&nbsp;&nbsp;(input) pointer 
-to the node data records. Data in these records can read. Nodes deleted after initiialization 
+to the node data records. Data in these records can read. Nodes deleted after initialization 
 may have their the <i>name</i> field in the record cleared (zero length) rather than 
-rebuilding the node records and bitmaps. <br>
+rebuilding the node records and bitmaps.<br><br>
 <span class="commandline"> node_cnt</span>&nbsp; &nbsp;&nbsp;(input) number 
 of node data records.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
@@ -143,33 +145,89 @@ consider that nodes can be removed from one partition and added to a different p
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
 the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 
+<p class="commandline">int select_p_job_init(List job_list);<p>
+<p style="margin-left:.2in"><b>Description</b>: Used at slurm startup to 
+synchronize plugin (and node) state with that of currently active jobs.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline"> job_list</span>&nbsp; &nbsp;&nbsp;(input) 
+list of slurm jobs from slurmctld job records.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+<p class="footer"><a href="#top">top</a></p>
+
+<h3>State Synchronization Functions</h3>
+
 <p class="commandline">int select_p_update_block (update_part_msg_t *part_desc_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: This function is called when the admin needs to manually update the state of a block. </p>
+<p style="margin-left:.2in"><b>Description</b>: This function is called when the admin needs 
+to manually update the state of a block. </p>
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> part_desc_ptr</span>&nbsp;&nbsp;&nbsp;(input) partitition
+<span class="commandline"> part_desc_ptr</span>&nbsp;&nbsp;&nbsp;(input) partition
 description variable.  Containing the block name and the state to set the block.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure, 
 the plugin should return SLURM_ERROR.</p>
 
-<p class="commandline">int select_p_pack_node_info (time_t last_query_time, Buf *buffer_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: pack node specific information into a buffer.</p>
+<p class="commandline">int select_p_update_nodeinfo(struct node_record *node_ptr);</p>
+<p style="margin-left:.2in"><b>Description</b>: Update plugin-specific information
+related to the specified node. This is called after changes in a node's configuration.</p>
+<p style="margin-left:.2in"><b>Argument</b>:
+<span class="commandline"> node_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
+to the node for which information is requested.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR.</p>
+
+<p class="commandline">int select_p_update_node_state (int index, uint16_t state);</p>
+<p style="margin-left:.2in"><b>Description</b>: push a change of state
+into the plugin the index should be the index from the slurmctld of
+the entire system.  The state should be the same state the node_record
+was set to in the slurmctld.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> index</span>&nbsp;&nbsp;&nbsp;(input) index
+of the node in reference to the entire system.<br><br>
+<span class="commandline"> state</span>&nbsp;&nbsp;&nbsp;(input) new
+state of the node.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
+
+<p class="commandline">int select_p_update_sub_node (update_part_msg_t *part_desc_ptr);</p>
+<p style="margin-left:.2in"><b>Description</b>: update the state of a portion of
+a SLURM node. Currently used on BlueGene systems to place node cards within a 
+midplane into or out of an error state.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline">
-last_query_time</span>&nbsp;&nbsp;&nbsp;(input) time that the data was
-last saved.<br>
-<span class="commandline"> buffer_ptr</span>&nbsp;&nbsp;&nbsp;(input/output) buffer into 
-which the node data is appended.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, 
-SLURM_NO_CHANGE_IN_DATA if data has not changed since last packed, otherwise SLURM_ERROR</p>
+<span class="commandline"> part_desc_ptr</span>&nbsp;&nbsp;&nbsp;(input) partition
+description variable.  Containing the sub-block name and its new state.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
+
+<p class="commandline">int select_p_alter_node_cnt (enum
+select_node_cnt type, void *data);</p>
+<p style="margin-left:.2in"><b>Description</b>: Used for systems like
+a Bluegene system where SLURM sees 1 node where many nodes really
+exists, in Bluegene's case 1 node reflects 512 nodes in real live, but
+since usually 512 is the smallest allocatable block slurm only handles
+it as 1 node.  This is a function so the user can issue a 'real'
+number and the function will alter it so slurm can understand what the
+user really means in slurm terms.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline"> type</span>&nbsp;&nbsp;&nbsp;(input) enum
+telling the plug in what the user is really wanting.<br><br>
+<span class="commandline"> data</span>&nbsp;&nbsp;&nbsp;(input/output)
+Is a void * so depending on the type sent in argument 1 this should 
+adjust the variable returning what the user is asking for.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
+
+<p class="commandline">int select_p_reconfigure (void);</p>
+<p style="margin-left:.2in"><b>Description</b>: Used to notify plugin 
+of change in partition configuration or general configuration change.
+The plugin will test global variables for changes as appropriate.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<h3>Job-Specific Node Selection Functions</h3>
+<h3>Job-Specific Functions</h3>
+
 <p class="commandline">int select_p_job_test (struct job_record *job_ptr,
-bitstr_t *bitmap, int min_nodes, int max_nodes, int req_nodes, bool test_only);</p>
+bitstr_t *bitmap, int min_nodes, int max_nodes, int req_nodes, int mode);</p>
 <p style="margin-left:.2in"><b>Description</b>: Given a job's scheduling requirement 
 specification and a set of nodes which might  be used to satisfy the request, identify 
-the nodes which "best" satify the request. Note that nodes being considered for allocation 
+the nodes which "best" satisfy the request. Note that nodes being considered for allocation 
 to the job may include nodes already allocated to other jobs, even if node sharing is 
 not permitted. This is done to ascertain whether or not job may be allocated resources 
 at some later time (when the other jobs complete). This permits SLURM to reject 
@@ -179,29 +237,43 @@ the job with appropriate constraints.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer 
 to the job being considered for scheduling. Data in this job record may safely be read.
-Data of particular interst include <i>details->contiguous</i> (set if allocated nodes 
+Data of particular interest include <i>details->contiguous</i> (set if allocated nodes 
 should be contiguous), <i>num_procs</i> (minimum processors in allocation) and 
-<i>details->req_node_bitmap</i> (specific required nodes).<br>
+<i>details->req_node_bitmap</i> (specific required nodes).<br><br>
 <span class="commandline"> bitmap</span>&nbsp; &nbsp;&nbsp;(input/output) 
 bits representing nodes which might be allocated to the job are set on input.
 This function should clear the bits representing nodes not required to satisfy 
 job's scheduling request.
 Bits left set will represent nodes to be used for this job. Note that the job's 
 required nodes (<i>details->req_node_bitmap</i>) will be a superset 
-<i>bitmap</i> when the function is called.<br>
+<i>bitmap</i> when the function is called.<br><br>
 <span class="commandline"> min_nodes</span>&nbsp; &nbsp;&nbsp;(input) 
 minimum number of nodes to allocate to this job. Note this reflects both job 
-and partition specifications.<br>
+and partition specifications.<br><br>
 <span class="commandline"> max_nodes</span>&nbsp; &nbsp;&nbsp;(input) 
 maximum number of nodes to allocate to this job. Note this reflects both job 
-and partition specifications.<br>
+and partition specifications.<br><br>
 <span class="commandline"> req_nodes</span>&nbsp; &nbsp;&nbsp;(input)
 the requested (desired)  of nodes to allocate to this job. This reflects job's
-maximum node specification (if supplied).<br>
-<span class="commandline"> test_only</span>&nbsp; &nbsp;&nbsp;(input)
-if set then we only want to test our ability to run the job at some time, 
-not necesarily now with currently available resources.<br>
-</p>
+maximum node specification (if supplied).<br><br>
+<span class="commandline"> mode</span>&nbsp; &nbsp;&nbsp;(input)
+controls the mode of operation. Valid options are 
+SELECT_MODE_RUN_NOW: try to schedule job now<br>
+SELECT_MODE_TEST_ONLY: test if job can ever run<br>
+SELECT_MODE_WILL_RUN: determine when and where job can run</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and future attempts may be made to schedule 
+the job.</p>
+
+<p class="commandline">int select_p_job_list_test (List req_list);</p>
+<p style="margin-left:.2in"><b>Description</b>: This is a variation of the
+select_p_job_test function meant to determine when an ordered list of jobs
+can be initiated.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline"> req_list</span>&nbsp; &nbsp;&nbsp;(input/output) 
+priority ordered list of <i>select_will_run_t</i> records (a structure
+containing the arguments to the select_p_job_test function).
+Expected start time of each job will be set.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR and future attempts may be made to schedule 
 the job.</p>
@@ -209,7 +281,7 @@ the job.</p>
 <p class="commandline">int select_p_job_begin (struct job_record *job_ptr);</p>
 <p style="margin-left:.2in"><b>Description</b>: Note the initiation of the specified job
 is about to begin. This function is called immediately after 
-<span class="commandline">select_p_job_test()</span> sucessfully completes for this job.
+<span class="commandline">select_p_job_test()</span> successfully completes for this job.
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer 
 to the job being initialized. Data in this job record may safely be read or written.
@@ -266,114 +338,47 @@ identify the nodes which were selected for this job to use.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On
 failure, the plugin should return a SLURM error code.</p>
 
-<p class="commandline">int select_p_get_job_cores (uint32_t job_id, int
-alloc_index, int s);</p>
-<p style="margin-left:.2in"><b>Description</b>: Get socket-specific core
-information from a job.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> job_id</span>&nbsp; &nbsp;&nbsp;(input) ID of the job
-from which to obtain the data.<br>
-<span class="commandline"> alloc_index</span>&nbsp; &nbsp;&nbsp;(input) index of
-the allocated node to the job from which to obtain the data.<br>
-<span class="commandline"> s</span>&nbsp; &nbsp;&nbsp;(input) socket index from
-which to obtain the data.</p>
-<p style="margin-left:.2in"><b>Returns</b>: the number of cores allocated to the
-given socket on the given node for the given job. On failure, the plugin should
-return zero.</p>
-
 <p class="footer"><a href="#top">top</a></p>
 
-<h3>Get/set plugin information</h3>
-<p class="commandline">int select_p_get_extra_jobinfo(struct node_record *node_ptr,
-struct job_record *job_ptr, enum select_data_info info, void *data);</p>
-<p style="margin-left:.2in"><b>Description</b>: Get plugin-specific information 
-related to the specified job and/or node.</p>
+<h3>Get Information Functions</h3>
+
+<p class="commandline">int select_p_get_info_from_plugin(enum select_data_info info,
+struct job_record *job_ptr, void *data);</p>
+<p style="margin-left:.2in"><b>Description</b>: Get plugin-specific information
+about a job.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> node_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
-to the node for which information is requested.<br>
-<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
-to the job for which information is requested.<br>
 <span class="commandline"> info</span>&nbsp; &nbsp;&nbsp;(input) identifies 
-the type of data requested.<br>
+the type of data to be updated.<br><br>
+<span class="commandline"> job_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer to
+the job related to the query (if applicable; may be NULL).<br><br>
 <span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(output) the requested data.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
+<p class="commandline">int select_p_pack_node_info (time_t last_query_time, Buf *buffer_ptr);</p>
+<p style="margin-left:.2in"><b>Description</b>: Pack node specific information into a buffer.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:
+<span class="commandline">
+last_query_time</span>&nbsp;&nbsp;&nbsp;(input) time that the data was
+last saved.<br>
+<span class="commandline"> buffer_ptr</span>&nbsp;&nbsp;&nbsp;(input/output) buffer into 
+which the node data is appended.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, 
+SLURM_NO_CHANGE_IN_DATA if data has not changed since last packed, otherwise SLURM_ERROR</p>
+
 <p class="commandline">int select_p_get_select_nodeinfo(struct node_record *node_ptr,
 enum select_data_info info, void *data);</p>
 <p style="margin-left:.2in"><b>Description</b>: Get plugin-specific information
 related to the specified node.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> node_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
-to the node for which information is requested.<br>
+to the node for which information is requested.<br><br>
 <span class="commandline"> info</span>&nbsp; &nbsp;&nbsp;(input) identifies
-the type of data requested.<br>
-<span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(output) the requested data.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
-the plugin should return SLURM_ERROR.</p>
-
-<p class="commandline">int select_p_update_nodeinfo(struct node_record *node_ptr);</p>
-<p style="margin-left:.2in"><b>Description</b>: Update plugin-specific information
-related to the specified node. This is called after changes in a node's configuration.</p>
-<p style="margin-left:.2in"><b>Argument</b>:
-<span class="commandline"> node_ptr</span>&nbsp; &nbsp;&nbsp;(input) pointer
-to the node for which information is requested.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
-the plugin should return SLURM_ERROR.</p>
-
-<p class="commandline">int select_p_get_info_from_plugin(enum select_data_info info, void *data);</p>
-<p style="margin-left:.2in"><b>Description</b>: Get plugin-specific information.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<br>
-<span class="commandline"> info</span>&nbsp; &nbsp;&nbsp;(input) identifies 
-the type of data to be updated.<br>
+the type of data requested.<br><br>
 <span class="commandline"> data</span>&nbsp; &nbsp;&nbsp;(output) the requested data.</p>
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
 the plugin should return SLURM_ERROR.</p>
 
-<p class="commandline">int select_p_job_init(List job_list);<p>
-<p style="margin-left:.2in"><b>Description</b>: Used at slurm startup to 
-syncrhonize plugin (and node) state with that of currectly active jobs.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> job_list</span>&nbsp; &nbsp;&nbsp;(input) 
-list of slurm jobs from slurmctld job records.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
-the plugin should return SLURM_ERROR.</p>
-
-<p class="commandline">int select_p_update_node_state (int index, uint16_t state);</p>
-<p style="margin-left:.2in"><b>Description</b>: push a change of state
-into the plugin the index should be the index from the slurmctld of
-the entire system.  The state should be the same state the node_record
-was set to in the slurmctld.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> index</span>&nbsp;&nbsp;&nbsp;(input) index
-of the node in reference to the entire system. <br>
-<span class="commandline"> state</span>&nbsp;&nbsp;&nbsp;(input) new
-state of the node.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
-
-<p class="commandline">int select_p_alter_node_cnt (enum
-select_node_cnt type, void *data);</p>
-<p style="margin-left:.2in"><b>Description</b>: Used for systems like
-a Bluegene system where SLURM sees 1 node where many nodes really
-exists, in Bluegene's case 1 node reflects 512 nodes in real live, but
-since usually 512 is the smallest allocatable block slurm only handles
-it as 1 node.  This is a function so the user can issue a 'real'
-number and the fuction will alter it so slurm can understand what the
-user really means in slurm terms.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:
-<span class="commandline"> type</span>&nbsp;&nbsp;&nbsp;(input) enum
-telling the plug in what the user is really wanting.<br>
-<span class="commandline"> data</span>&nbsp;&nbsp;&nbsp;(input/output)
-Is a void * so depending on the type sent in argument 1 this should 
-adjust the variable returning what the user is asking for.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
-
-<p class="commandline">int select_p_reconfigure (void);</p>
-<p style="margin-left:.2in"><b>Description</b>: Used to notify plugin 
-of change in partition configuration or general configuration change.
-The plugin will test global variables for changes as appropriate.</p>
-<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful, otherwise SLURM_ERROR</p>
-
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Versioning</h2>
@@ -386,6 +391,6 @@ to maintain data format compatibility across different versions of the plugin.</
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 October 2007</p>
+<p style="text-align:center;">Last modified 25 February 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/slurm.sc08.bof.pdf b/doc/html/slurm.sc08.bof.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..39611c34f438f0dc260e7d68d8c21923de3dd82b
Binary files /dev/null and b/doc/html/slurm.sc08.bof.pdf differ
diff --git a/doc/html/slurm.sc08.status.pdf b/doc/html/slurm.sc08.status.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..51dfea8dab2ccee655b906a4470e738197e97be4
Binary files /dev/null and b/doc/html/slurm.sc08.status.pdf differ
diff --git a/doc/html/slurm.shtml b/doc/html/slurm.shtml
index acb1b46fa99dec8fc486cea386d617511b547483..c9a01af13c21656b57834c00aa5e79684222c52d 100644
--- a/doc/html/slurm.shtml
+++ b/doc/html/slurm.shtml
@@ -1,32 +1,49 @@
 <!--#include virtual="header.txt"-->
 
 <h1>SLURM: A Highly Scalable Resource Manager</h1>
-<p>SLURM is an open-source resource manager designed for Linux clusters of all 
-sizes. It provides three key functions. First it allocates exclusive and/or non-exclusive 
-access to resources (computer nodes) to users for some duration of time so they 
-can perform work. Second, it provides a framework for starting, executing, and 
-monitoring work (typically a parallel job) on a set of allocated nodes. Finally, 
-it arbitrates contention for resources by managing a queue of pending work. </p>
 
-<p>SLURM is not a sophisticated batch system, but it does provide an Applications 
-Programming Interface (API) for integration with external schedulers such as 
+<p>SLURM is an open-source resource manager designed for Linux clusters of 
+all sizes. 
+It provides three key functions. 
+First it allocates exclusive and/or non-exclusive access to resources 
+(computer nodes) to users for some duration of time so they can perform work. 
+Second, it provides a framework for starting, executing, and monitoring work 
+(typically a parallel job) on a set of allocated nodes. 
+Finally, it arbitrates contention for resources by managing a queue of 
+pending work. </p>
+
+<p>SLURM's design is very modular with dozens of optional plugins.
+In its simplest configuration, it can be installed and configured in a 
+couple of minutes (see <a href="http://www.linux-mag.com/id/7239/1/">
+Caos NSA and Perceus: All-in-one Cluster Software Stack</a> 
+by Jeffrey B. Layton).
+More complex configurations rely upon a 
+<a href="http://www.mysql.com/">MySQL</a> database for archiving 
+<a href="accounting.html">accounting</a> records, managing 
+<a href="resource_limits.html">resource limits</a> by user or bank account, 
+or supporting sophisticated 
+<a href="priority_multifactor.html">job prioritization</a> algorithms.
+SLURM also provides an Applications Programming Interface (API) for 
+integration with external schedulers such as 
 <a href="http://www.clusterresources.com/pages/products/maui-cluster-scheduler.php">
-The Maui Scheduler</a> and 
+The Maui Scheduler</a> or 
 <a href="http://www.clusterresources.com/pages/products/moab-cluster-suite.php">
-Moab Cluster Suite</a>.
-While other resource managers do exist, SLURM is unique in several respects: 
+Moab Cluster Suite</a>.</p>
+
+<p>While other resource managers do exist, SLURM is unique in several 
+respects: 
 <ul>
 <li>Its source code is freely available under the 
 <a href="http://www.gnu.org/licenses/gpl.html">GNU General Public License</a>.</li>
-<li>It is designed to operate in a heterogeneous cluster with up to 65,536 nodes.</li>
-<li>It is portable; written in C with a GNU autoconf configuration engine. While 
-initially written for Linux, other UNIX-like operating systems should be easy 
-porting targets. A plugin mechanism exists to support various interconnects, authentication 
-mechanisms, schedulers, etc.</li>
+<li>It is designed to operate in a heterogeneous cluster with up to 65,536 nodes
+and hundreds of thousands of processors.</li>
+<li>It is portable; written in C with a GNU autoconf configuration engine. 
+While initially written for Linux, other UNIX-like operating systems should 
+be easy porting targets.</li>
 <li>SLURM is highly tolerant of system failures, including failure of the node 
 executing its control functions.</li>
-<li>It is simple enough for the motivated end user to understand its source and 
-add functionality.</li>
+<li>A plugin mechanism exists to support various interconnects, authentication 
+mechanisms, schedulers, etc. These plugins are documented and  simple enough for the motivated end user to understand the source and add functionality.</li>
 </ul></p>
 
 <p>SLURM provides resource management on about 1000 computers worldwide,
@@ -35,20 +52,29 @@ including many of the most powerful computers in the world:
 <li><a href="https://asc.llnl.gov/computing_resources/bluegenel/">BlueGene/L</a> 
 at LLNL with 106,496 dual-core processors</li>
 <li><a href="http://c-r-labs.com/">EKA</a> at Computational Research Laboratories, 
-India with 14,240 Xeon processoers and Infiniband interconnect</li>
+India with 14,240 Xeon processors and Infiniband interconnect</li>
 <li><a href="https://asc.llnl.gov/computing_resources/purple/">ASC Purple</a>
 an IBM SP/AIX cluster at LLNL with 12,208 Power5 processors and a Federation switch</li>
 <li><a href="http://www.bsc.es/plantillaA.php?cat_id=5">MareNostrum</a>
 a Linux cluster at Barcelona Supercomputer Center
 with 10,240 PowerPC processors and a Myrinet switch</li>
+<li><a href="http://en.wikipedia.org/wiki/Anton_(computer)">Anton</a>
+a massively parallel supercomputer designed and built by
+<a href="http://www.deshawresearch.com/">D. E. Shaw Research</a> 
+for molecular dynamics simulation using 512 custom-designed ASICs 
+and a three-dimensional torus interconnect </li>
 </ul>
 <p>SLURM is actively being developed, distributed and supported by 
 <a href="https://www.llnl.gov">Lawrence Livermore National Laboratory</a>,
-<a href="http://www.hp.com">Hewlett-Packard</a>,
-<a href="http://www.bull.com">Bull</a>,
-<a href="http://www.clusterresources.com">Cluster Resources</a> and
-<a href="http://www.sicortex.com">SiCortex</a>.</p>
+<a href="http://www.hp.com">Hewlett-Packard</a> and 
+<a href="http://www.bull.com">Bull</a>.
+It is also distributed and supported by 
+<a href="http://www.clusterresources.com">Cluster Resources</a>,
+<a href="http://www.sicortex.com">SiCortex</a>,
+<a href="http://www.infiscale.com">Infiscale</a>,
+<a href="http://www.ibm.com">IBM</a> and
+<a href="http://www.sun.com">Sun Microsystems</a>.</p>
 
-<p style="text-align:center;">Last modified 29 November 2007</p>
+<p style="text-align:center;">Last modified 25 March 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/sun_const.shtml b/doc/html/sun_const.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..b1317b942f76f36d08eb735d00658aeef8cfb580
--- /dev/null
+++ b/doc/html/sun_const.shtml
@@ -0,0 +1,126 @@
+<!--#include virtual="header.txt"-->
+
+<h1>SLURM Administrator Guide for Sun Constellation systems</h1>
+
+<h2>Overview</h2>
+
+<p>This document describes the unique features of SLURM on
+Sun Constellation computers.
+You should be familiar with the SLURM's mode of operation on Linux clusters 
+before studying the relatively few differences in Sun Constellation system 
+operation described in this document.</p>
+
+<p>SLURM's primary mode of operation is designed for use on clusters with
+nodes configured in a one-dimensional space. 
+Minor changes were required for the <i>smap</i> and <i>sview</i> tools 
+to map nodes in a three-dimensional space. 
+Some changes are also desirable to optimize job placement in three-dimensional 
+space.</p>
+
+<h2>Configuration</h2>
+
+<p>Two variables must be defined in the <i>config.h</i> file: 
+<i>HAVE_SUN_CONST</i> and <i>HAVE_3D</i>.
+This can be accomplished in several different ways depending upon how 
+SLURM is being built.
+<ol>
+<li>Execute the <i>configure</i> command with the option 
+<i>--enable-sun-const</i> <b>OR</b></li>
+<li>Execute the <i>rpmbuild</i> command with the option 
+<i>--with sun_const</i> <b>OR</b></li>
+<li>Add <i>%with_sun_const 1</i> to your <i>~/.rpmmacros</i> file.</li>
+</ol></p>
+
+<p>Node names must have a three-digit suffix describing their 
+zero-origin position in the X-, Y- and Z-dimension respectively (e.g. 
+"tux000" for X=0, Y=0, Z=0; "tux123" for X=1, Y=2, Z=3). 
+Rectangular prisms of nodes can be specified in SLURM commands and
+configuration files using the system name prefix with the end-points 
+enclosed in square brackets and separated by an "x". 
+For example "tux[620x731]" is used to represent the eight nodes in a 
+block with endpoints at "tux620" and "tux731" (tux620, tux621, tux630, 
+tux631, tux720, tux721, tux730, tux731).
+While node names of this form are required for SLURM's internal use,
+it need not be the name returned by the <i>hostlist -s</i> command. 
+See <i>man slurm.conf</i> for details on how to use the <i>NodeName</i>,
+<i>NodeAddr</i> and <i>NodeHostName</i> configuration parameters 
+for flexibility in this matter.</p>
+
+<p>Next you need to select from two options for the resource selection 
+plugin (the <i>SelectType</i> option in SLURM's <i>slurm.conf</i> configuration
+file):
+<ol>
+<li><b>select/cons_res</b> - Performs a best-fit algorithm based upon a 
+one-dimensional space to allocate whole nodes, sockets, or cores to jobs
+based upon other configuration parameters.</li>
+<li><b>select/linear</b> - Performs a best-fit algorithm based upon a 
+one-dimensional space to allocate whole nodes to jobs.</li>
+</ol>
+
+<p>In order for <i>select/cons_res</i> or <i>select/linear</i> to 
+allocate resources physically nearby in three-dimensional space, the 
+nodes be specified in SLURM's <i>slurm.conf</i> configuration file in 
+such a fashion that those nearby in <i>slurm.conf</i> (managed
+internal to SLURM as a one-dimensional space) are also nearby in 
+the physical three-dimensional space. 
+If the definition of the nodes in SLURM's <i>slurm.conf</i> configuration 
+file are listed on one line (e.g. <i>NodeName=tux[000x333]</i>),
+SLURM will automatically perform that conversion using a 
+<a href="http://en.wikipedia.org/wiki/Hilbert_curve">Hilbert curve</a>.
+Otherwise you may construct your own node ordering sequence and 
+list them one node per line in <i>slurm.conf</i>.
+Note that each node must be listed exactly once and consecutive
+nodes should be nearby in three-dimensional space. 
+Also note that each node must be defined individually rather than using 
+a hostlist expression in order to preserve the ordering (there is no 
+problem using a hostlist expression in the partition specification after
+the nodes have already been defined).
+The open source code used by SLURM to generate the Hilbert curve is 
+included in the distribution at <i>contribs/skilling.c</i> in the event
+that you wish to experiment with it to generate your own node ordering.
+Two examples of SLURM configuration files are shown below:</p>
+
+<pre>
+# slurm.conf for Sun Constellation system of size 4x4x4
+
+# Configuration parameters removed here
+
+# Automatic orders nodes following a Hilbert curve
+NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
+NodeName=tux[000x333]
+PartitionName=debug Nodes=tux[000x333] Default=Yes State=UP
+</pre>
+
+<pre>
+# slurm.conf for Sun Constellation system of size 2x2x2
+
+# Configuration parameters removed here
+
+# Manual ordering of nodes following a space-filling curve
+NodeName=DEFAULT Procs=8 RealMemory=2048 State=Unknown
+NodeName=tux000
+NodeName=tux100
+NodeName=tux110
+NodeName=tux010
+NodeName=tux011
+NodeName=tux111
+NodeName=tux101
+NodeName=tux001
+PartitionName=debug Nodes=tux[000x111] Default=Yes State=UP
+</pre>
+
+<p>In both of the examples above, the node names output by the
+<i>scontrol show nodes</i> will be ordered as defined (sequentially 
+along the Hilbert curve or per the ordering in the <i>slurm.conf</i> file)
+rather than in numeric order (e.g. "tux001" follows "tux101" rather 
+than "tux000"). 
+The output of other SLURM commands (e.g. <i>sinfo</i> and <i>squeue</i>) 
+will use a SLURM hostlist expression with the node names numerically ordered).
+SLURM partitions should contain nodes which are defined sequentially
+by that ordering for optimal performance.</p>
+
+<p class="footer"><a href="#top">top</a></p>
+
+<p style="text-align:center;">Last modified 8 January 2009</p></td>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/switchplugins.shtml b/doc/html/switchplugins.shtml
index 9703dba8a536c38c60885791921f62c35a193ed9..3f8f65da7b57b61fb2f6780a72fbec3f18eee042 100644
--- a/doc/html/switchplugins.shtml
+++ b/doc/html/switchplugins.shtml
@@ -108,12 +108,6 @@ to indicate the reason for failure.</p>
 the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
-<p class="commandline">bool switch_p_no_frag(void);</p>
-<p style="margin-left:.2in"><b>Description</b>: Report if resource fragmentation is important. 
-If so, delay scheduling a new job while another is in the process of terminating.</p>
-<p style="margin-left:.2in"><b>Arguments</b>:<span class="commandline"> None</span></p>
-<p style="margin-left:.2in"><b>Returns</b>: TRUE if job scheduling should be delayed while 
-any other job is in the process of terminating.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h3>Node's Switch State Monitoring Functions</h3>
@@ -314,7 +308,7 @@ to indicate the reason for failure.</p>
 
 <p class="commandline">bool switch_p_part_comp (void);</p>
 <p style="margin-left:.2in"><b>Description</b>: Indicate if the switch plugin should 
-process partitial job step completions (i.e. switch_g_job_step_part_comp). Support
+process partial job step completions (i.e. switch_g_job_step_part_comp). Support
 of partition completions is compute intensive, so it should be avoided unless switch 
 resources are in short supply (e.g. switch/federation).</p>
 <p style="margin-left:.2in"><b>Returns</b>: True if partition step completions are 
@@ -380,7 +374,7 @@ to indicate the reason for failure.</p>
 <p class="footer"><a href="#top">top</a></p>
 
 <h3>Job Management Functions</h3>
-<blockquote><pre>
+<pre>
 =========================================================================
 Process 1 (root)        Process 2 (root, user)  |  Process 3 (user task) 
                                                 |                        
@@ -392,7 +386,7 @@ waitpid                 setuid, chdir, etc.     |
                         switch_p_job_fini*      |                        
 switch_p_job_postfini                           |                        
 =========================================================================
-</pre></blockquote>
+</pre>
 
 <p class="commandline">int switch_p_job_preinit (switch_jobinfo_t jobinfo switch_job);</p>
 <p style="margin-left:.2in"><b>Description</b>: Preinit is run as root in the first slurmd process, 
@@ -478,7 +472,7 @@ jobinfo switch_job, char *nodelist);</p>
 <p style="margin-left:.2in"><b>Description</b>: Note that the identified 
 job step is active at restart time. This function can be used to 
 restore global switch state information based upon job steps known to be 
-active at restart time. Use of this function is prefered over switch state 
+active at restart time. Use of this function is preferred over switch state 
 saved and restored by the switch plugin. Direct use of job step switch 
 information eliminates the possibility of inconsistent state information 
 between the switch and job steps.
@@ -519,6 +513,6 @@ plugin that transmitted it. It is at the discretion of the plugin author whether
 to maintain data format compatibility across different versions of the plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 3 July 2006</p>
+<p style="text-align:center;">Last modified 5 September 2008</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/taskplugins.shtml b/doc/html/taskplugins.shtml
index bcc3d0f1eb424426d32d4e7664ae2c62c4a01370..f476e2e94384c3c09cafcbc7bf412f9989ece3f3 100644
--- a/doc/html/taskplugins.shtml
+++ b/doc/html/taskplugins.shtml
@@ -18,7 +18,7 @@ abbreviation for the type of task management. We recommend, for example:</p>
 <ul>
 <li><b>affinity</b>&#151;A plugin that implements task binding to processors.
 The actual mechanism used to task binding is dependent upon the available
-infrastruture as determined by the "configure" program when SLURM is built
+infrastructure as determined by the "configure" program when SLURM is built
 and the value of the <b>TaskPluginParam</b> as defined in the <b>slurm.conf</b>
 (SLURM configuration file).</li>
 <li><b>none</b>&#151;A plugin that implements the API without providing any
@@ -46,6 +46,22 @@ SLURM_ERROR.</p>
 <p>The following functions must appear. Functions which are not implemented should 
 be stubbed.</p>
 
+<p class="commandline">int task_slurmd_batch_request (uint32_t job_id, 
+batch_job_launch_msg_t *req);</p>
+<p style="margin-left:.2in"><b>Description</b>: Prepare to launch a batch job.
+Establish node, socket, and core resource availability for it.
+Executed by the <b>slurmd</b> daemon as user root.</p>
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
+<span class="commandline">job_id</span>&nbsp;&nbsp;&nbsp;(input) 
+ID of the job to be started.<br>
+<span class="commandline">req</span>&nbsp;&nbsp;&nbsp;(input/output)
+Batch job launch request specification.
+See <b>src/common/slurm_protocol_defs.h</b> for the
+data structure definition.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. 
+On failure, the plugin should return SLURM_ERROR and set the errno to an 
+appropriate value to indicate the reason for failure.</p>
+
 <p class="commandline">int task_slurmd_launch_request (uint32_t job_id, 
 launch_tasks_request_msg_t *req, uint32_t node_id);</p>
 <p style="margin-left:.2in"><b>Description</b>: Prepare to launch a job.
@@ -148,7 +164,7 @@ appropriate value to indicate the reason for failure.</p>
 <p style="margin-left:.2in"><b>Description</b>: task_term() is called 
 after termination of job step.
 Executed by the <b>slurmstepd</b> program as the job's owner.
-It is preceeded by <b>--task-epilog</b> (from <b>srun</b> command line)
+It is preceded by <b>--task-epilog</b> (from <b>srun</b> command line)
 followed by <b>TaskEpilog</b> program (as configured in <b>slurm.conf</b>).</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline">job</span>&nbsp;&nbsp;&nbsp;(input)
@@ -164,6 +180,6 @@ appropriate value to indicate the reason for failure.</p>
 Future releases of SLURM may revise this API.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 28 May 2008</p>
+<p style="text-align:center;">Last modified 19 February 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/team.shtml b/doc/html/team.shtml
index 9231938dc3d8b76a55171c210d64380d2ddce0ac..cb3818a0a6a0f20841c6f95dac974181fcb4a13d 100644
--- a/doc/html/team.shtml
+++ b/doc/html/team.shtml
@@ -5,14 +5,14 @@
 <a href="https://www.llnl.gov/">Lawrence Livermore National Laboratory</a> (LLNL),
 <a href="http://www.hp.com/">HP</a>,
 <a href="http://www.bull.com/">Bull</a>,
-<a href="http://www.lnxi.com/">Linux NetworX</a>,
-and a host of others.
+Linux NetworX and many other contributorss.
 
 <p>The current SLURM development staff includes: </p>
 <ul>
 <li>Morris Jette (LLNL, Project leader)</li>
 <li>Danny Auble (LLNL)</li>
 <li>Susanne Balle (HP)</li>
+<li>David Bremer (LLNL)</li>
 <li>Chris Holmes (HP)</li>
 </ul>
 
@@ -27,6 +27,7 @@ and a host of others.
 <li>Daniel Christians (HP)</li>
 <li>Gilles Civario (Bull)</li>
 <li>Chuck Clouston (Bull)</li>
+<li>Joseph Donaghy (LLNL)</li>
 <li>Chris Dunlap (LLNL)</li>
 <li>Joey Ekstrom (LLNL/Bringham Young University)</li>
 <li>Josh England (TGS Management Corporation)</li>
@@ -38,15 +39,20 @@ and a host of others.
 <li>Matthieu Hautreux (CEA, France)</li>
 <li>Nathan Huff (North Dakota State University)</li>
 <li>David Jackson (Cluster Resources)</li>
+<li>Klaus Joas (University Karlsruhe, Germany)</li>
 <li>Greg Johnson (LANL)</li>
 <li>Jason King (LLNL)</li>
 <li>Nancy Kritkausky (Bull)</li>
+<li>Eric Lin (Bull)</li>
 <li>Puenlap Lee (Bull)</li>
 <li>Bernard Li (Genome Sciences Centre, Canada)</li>
+<li>Donald Lipari (LLNL)</li>
 <li>Steven McDougall (SiCortex)</li>
 <li>Donna Mecozzi (LLNL)</li>
+<li>Bj&oslash;rn-Helge Mevik (University of Oslo, Norway)</li>
 <li>Chris Morrone (LLNL)</li>
-<li>Pere Munt (Barcelona Supercomputer Center, Spain)<li>
+<li>Pere Munt (Barcelona Supercomputer Center, Spain)</li>
+<li>Michal Novotny (Masaryk University, Czech Republic)</li> 
 <li>Bryan O'Sullivan (Pathscale)</li>
 <li>Gennaro Oliva (Institute of High Performance Computing and 
 Networking, Italy)</li>
@@ -54,6 +60,7 @@ Networking, Italy)</li>
 <li>Dan Phung (LLNL/Columbia University)</li>
 <li>Ashley Pittman (Quadrics)</li>
 <li>Vijay Ramasubramanian (University of Maryland)</li>
+<li>Krishnakumar Ravi[KK] (HP)</li>
 <li>Andy Riebs (HP)</li>
 <li>Asier Roa (Barcelona Supercomputer Center, Spain)<li>
 <li>Miguel Ros (Barcelona Supercomputer Center, Spain)<li>
@@ -69,6 +76,6 @@ Networking, Italy)</li>
 <li>Anne-Marie Wunderlin (Bull)</li>
 </ul>
 
-<p style="text-align:center;">Last modified 20 January 2009</p>
+<p style="text-align:center;">Last modified 16 June 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/testimonials.shtml b/doc/html/testimonials.shtml
index ebc3c819f05ed004cbdfb22f4f1fa166df7dd4f8..64853ab7cae73cdaee674bb794d87a336e2660b9 100644
--- a/doc/html/testimonials.shtml
+++ b/doc/html/testimonials.shtml
@@ -20,26 +20,36 @@ Dona Crawford, Associate Directory Lawrence Livermore National Laboratory
 </i>
 <HR SIZE=4>
 
-<i>
-"We are extremely pleased with SLURM and strongly recommend it to others 
-because it is mature, the developers are highly responsive and 
-it just works."<br><br>
-Jeffrey M. Squyres, Pervasive Technology Labs at Indiana University
-</i>
-<HR SIZE=4>
-
 <i>
 "Thank you for SLURM! It is one of the nicest pieces of free software
 for managing HPC clusters we have come across in a long time.
 Both of our Blue Genes are running SLURM and it works fantastically 
-well."<br><br>
+well.
+It's the most flexible, useful scheduling tool I've ever run 
+across."<br><br>
 Adam Todorski, Computational Center for Nanotechnology Inovations,
 Rensselaer Polytechnic Institute
 </i>
 <HR SIZE=4>
 
 <i>
-We adopted SLURM as our resource manager over two years ago when it was at
+"Awesome! I just read the manual, set it up and it works great.
+I tell you, I've used Sun Grid Engine, Torque, PBS Pro and there's
+nothing like SLURM."<br><br>
+Aaron Knister, Environmental Protection Agency
+</i>
+<HR SIZE=4>
+
+<i>
+"We are extremely pleased with SLURM and strongly recommend it to others 
+because it is mature, the developers are highly responsive and 
+it just works."<br><br>
+Jeffrey M. Squyres, Pervasive Technology Labs at Indiana University
+</i>
+<HR SIZE=4>
+
+<i>
+"We adopted SLURM as our resource manager over two years ago when it was at
 the 0.3.x release level. Since then it has become an integral and important
 component of our production research services. Its stability, flexibility
 and performance has allowed us to significantly increase the quality of
@@ -88,7 +98,7 @@ Erest Artiaga, Barcelona Supercomputing Center
 
 <i>
 "SLURM was a great help for us in implementing our own very concise 
-job management system on top of it which could be taylored precisely 
+job management system on top of it which could be tailored precisely 
 to our needs, and which at the same time is very simple to use for 
 our customers. 
 In general, we are impressed with the stability, scalability, and performance 
@@ -112,6 +122,6 @@ Bill Celmaster, XC Program Manager, Hewlett-Packard Company
 </i>
 <HR SIZE=4>
 
-<p style="text-align:center;">Last modified 28 July 2008</p>
+<p style="text-align:center;">Last modified 8 April 2009</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/topo_ex1.gif b/doc/html/topo_ex1.gif
new file mode 100644
index 0000000000000000000000000000000000000000..4b1da437ca80b875393d6076d7d7d47e974d353b
Binary files /dev/null and b/doc/html/topo_ex1.gif differ
diff --git a/doc/html/topo_ex2.gif b/doc/html/topo_ex2.gif
new file mode 100644
index 0000000000000000000000000000000000000000..a0f0e12fada8e99f588809a015d82f27338ee9fd
Binary files /dev/null and b/doc/html/topo_ex2.gif differ
diff --git a/doc/html/topology.shtml b/doc/html/topology.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..862d1aa09f6011cd8d684624dde79f1e611fa0b8
--- /dev/null
+++ b/doc/html/topology.shtml
@@ -0,0 +1,112 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Topology</h1>
+
+<p>SLURM version 2.0 can be configured to support topology-aware resource
+allocation to optimize job performance.
+There are two primary modes of operation, one to optimize performance on
+systems with a three-dimensional torus interconnect and another for
+a hierarchical interconnect.</p>
+
+<p>SLURM's native mode of resource selection is to consider the nodes
+as a one-dimensional array. 
+Jobs are allocated resources on a best-fit basis.
+For larger jobs, this minimizes the number of sets of consecutive nodes
+allocated to the job.</p>
+
+<h2>Three-dimension Topology</h2>
+
+<p>Some larger computers rely upon a three-dimensional torus interconnect.
+The IBM BlueGene computers is one example of this which has highly
+constrained resource allocation scheme, essentially requiring that 
+jobs be allocated a set of nodes logically having a rectangular shape.
+SLURM has a plugin specifically written for BlueGene to select appropriate
+nodes for jobs, change network switch routing, boot nodes, etc as described
+in the <a href="bluegene.html">BlueGene User and Administrator Guide</a>.</p>
+
+<p>The Sun Constellation and Cray XT systems also have three-dimensional 
+torus interconnects, but do not require that jobs execute in adjacent nodes.
+On those systems, SLURM only needs to allocate resources to a job which 
+are nearby on the network. 
+SLURM accomplishes this using a 
+<a href="http://en.wikipedia.org/wiki/Hilbert_curve">Hilbert curve</a> 
+to map the nodes from a three-dimensional space into a one-dimensional
+space.
+SLURM's native best-fit algorithm is thus able to achieve a high degree
+of locality for jobs.
+For more information, see SLURM's documentation for
+<a href="sun_const.html">Sun Constellation</a> and
+<a href="cray.html">Cray XT</a> systems.</p>
+
+<h2>Hierarchical Networks</h2>
+
+<p>SLURM can also be configured to allocate resources to jobs on a 
+hierarchical network to minimize network contention.
+The basic algorithm is to identify the lowest level switch in the
+hierarchy that can satisfy a job's request and then allocate resources
+on its underlying leaf switches using a best-fit algorithm.
+Use of this logic requires a configuration setting of 
+<i>TopologyPlugin=topology/tree</i>.</p>
+
+<p>At some point in the future SLURM code may be provided to 
+gather network topology information directly.
+Now the network topology information must be included 
+in a <i>topology.conf</i> configuration file as shown in the 
+examples below.
+The first example describes a three level switch in which 
+each switch has two children. 
+Note that the <i>SwitchName</i> values are arbitrary and only
+used to bookkeeping purposes, but a name must be specified on
+each line.
+The leaf switch descriptions contain a <i>SwitchName</i> field
+plus a <i>Nodes</i> field to identify the nodes connected to the
+switch.
+Higher-level switch descriptions contain a <i>SwitchName</i> field
+plus a <i>Switches</i> field to identify the child switches.
+SLURM's hostlist expression parser is used, so the node and switch
+names need not be consecutive (e.g. "Nodes=tux[0-3,12,18-20]"
+and "Swithces=s[0-2,4-8,12]" will parse fine).
+</p>
+
+<p>An optional LinkSpeed option can be used to indicate the 
+relative performance of the link. 
+The units used are arbitrary and this information is currently not used.
+It may be used in the future to optimize resource allocations.</p>
+
+<p>The first example shows what a topology would look like for an
+eight node cluster in which all switches have only two children as
+shown in the diagram (not a very realistic configuration, but 
+useful for an example).</p>
+
+<pre>
+# topology.conf
+# Switch Configuration
+SwitchName=s0 Nodes=tux[0-1]
+SwitchName=s1 Nodes=tux[2-3]
+SwitchName=s2 Nodes=tux[4-5]
+SwitchName=s3 Nodes=tux[6-7]
+SwitchName=s4 Switches=s[0-1]
+SwitchName=s5 Switches=s[2-3]
+SwitchName=s6 Switches=s[4-5]
+</pre>
+<img src=topo_ex1.gif width=600>
+
+<p>The next example is for a network with two levels and 
+each switch has four connections.</p>
+<pre>
+# topology.conf
+# Switch Configuration
+SwitchName=s0 Nodes=tux[0-3]   LinkSpeed=900
+SwitchName=s1 Nodes=tux[4-7]   LinkSpeed=900
+SwitchName=s2 Nodes=tux[8-11]  LinkSpeed=900
+SwitchName=s3 Nodes=tux[12-15] LinkSpeed=1800
+SwitchName=s4 Switches=s[0-3]  LinkSpeed=1800
+SwitchName=s5 Switches=s[0-3]  LinkSpeed=1800
+SwitchName=s6 Switches=s[0-3]  LinkSpeed=1800
+SwitchName=s7 Switches=s[0-3]  LinkSpeed=1800
+</pre>
+<img src=topo_ex2.gif width=600>
+
+<p style="text-align:center;">Last modified 24 March 2009</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/topology_plugin.shtml b/doc/html/topology_plugin.shtml
new file mode 100644
index 0000000000000000000000000000000000000000..d2f146dd1742500be875d6290a5deaef439f95d5
--- /dev/null
+++ b/doc/html/topology_plugin.shtml
@@ -0,0 +1,74 @@
+<!--#include virtual="header.txt"-->
+
+<h1><a name="top">SLURM Topology Plugin Programmer Guide</a></h1>
+
+<h2> Overview</h2>
+<p> This document describes SLURM topology plugin and the API that 
+defines them. 
+It is intended as a resource to programmers wishing to write their own 
+SLURM topology plugin. 
+This is version 100 of the API.</p>
+
+<p>SLURM topology plugins are SLURM plugins that implement 
+convey system topology information so that SLURM is able to 
+optimize resource allocations and minimize communication overhead. 
+The plugins must conform to the SLURM Plugin API with the following 
+specifications:</p>
+
+<p><span class="commandline">const char plugin_type[]</span><br>
+The major type must be &quot;topology.&quot; 
+The minor type specifies the type of topology mechanism. 
+We recommend, for example:</p>
+<ul>
+<li><b>3d_torus</b>&#151;Optimize placement for a three dimensional torus.</li>
+<li><b>none</b>&#151;No topology informatin.</li>
+<li><b>tree</b>&#151;Optimize placement based upon a hiearachy of network
+switches.</li>
+</ul></p>
+
+<p>The <span class="commandline">plugin_name</span> and 
+<span class="commandline">plugin_version</span> 
+symbols required by the SLURM Plugin API require no specialization for 
+topology support. 
+The actions preformed by these plugins vary widely.
+In the case of <b>3d_torus</b>, the nodes in configuration file
+are re-ordeded so that nodes which are nearby in the one-dimensional
+table are also nearby in logical three-dimensional space.
+In the case of <b>tree</b>, a tabled is built to reflect network
+topology and that table is later used by the <b>select</b> plugin
+to optimize placement.
+Note carefully, however, the versioning discussion below.</p>
+
+<h2>Data Objects</h2>
+<p>The implementation must maintain (though not necessarily directly export) an 
+enumerated <span class="commandline">errno</span> to allow SLURM to discover 
+as practically as possible the reason for any failed API call. 
+Plugin-specific enumerated integer values may be used when appropriate.
+
+<p>These values must not be used as return values in integer-valued 
+functions in the API. 
+The proper error return value from integer-valued functions is SLURM_ERROR. 
+The implementation should endeavor to provide useful and pertinent 
+information by whatever means is practical. 
+Successful API calls are not required to reset any errno to a known value. 
+However, the initial value of any errno, prior to any error condition 
+arising, should be SLURM_SUCCESS. </p>
+<p class="footer"><a href="#top">top</a></p>
+
+<h2>API Functions</h2>
+<p>The following functions must appear. 
+Functions which are not implemented should be stubbed.</p>
+
+<p class="commandline">int topo_build_config(void);</p>
+<p style="margin-left:.2in"><b>Description</b>: Generate topology information.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS or 
+SLURM_ERROR on failure.</p>
+
+<h2>Versioning</h2>
+<p> This document describes version 100 of the SLURM topology API. 
+Future releases of SLURM may revise this API.</p>
+<p class="footer"><a href="#top">top</a></p>
+
+<p style="text-align:center;">Last modified 24 March 2009</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/troubleshoot.shtml b/doc/html/troubleshoot.shtml
index 9c585040a979fa14b9cd5bc7f7a9065a4e083166..6f60aed98cf942410e8f07f27f573e1ba61f72ba 100644
--- a/doc/html/troubleshoot.shtml
+++ b/doc/html/troubleshoot.shtml
@@ -102,7 +102,7 @@ to backfill schedule jobs will be limited.
 The backfill scheduler does not alter job specifications of required 
 or excluded nodes, so jobs which specify nodes will substantially 
 reduce the effectiveness of backfill scheduling.
-See the <a href="faq.shtml#backfill">backfill documentation</a>
+See the <a href="faq.html#backfill">backfill documentation</a>
 for more details.</li>
 
 <li>If the scheduler type is <i>wiki</i>, this represents 
@@ -150,7 +150,7 @@ either fix the node or change <i>slurm.conf</i>.</li>
 between the control machine and the DOWN node using the command
 "<i>ping &lt;address&gt;</i>" being sure to specify the 
 NodeAddr values configured in <i>slurm.conf</i>. 
-If ping fails, then fix the network or addressses in <i>slurm.conf</i>.</li>
+If ping fails, then fix the network or addresses in <i>slurm.conf</i>.</li>
 
 <li>Next, login to a node that SLURM considers to be in a DOWN 
 state and check if the slurmd daemon is running with the command
@@ -222,7 +222,7 @@ file) for an indication of why it is failing. (grep for update_block:)</li>
 <li>If the reason was something that happened to the system like a
 failed boot or a nodecard going bad or something like that you will
 need to fix the problem and then 
-<a href="#bluegene-block-free">maunally set the block to free</a>.</li>
+<a href="#bluegene-block-free">manually set the block to free</a>.</li>
 </ol>
 <p class="footer"><a href="#top">top</a></p>
 
@@ -233,7 +233,7 @@ will run on a block</a></h2>
 <li><a href="#bluegene-error-state">Set the block state to be in error
 manually</a>.</li>
 <li>When you are ready to run jobs again on the block <a
-href="#bluegene-block-free">maunally set the block to free</a>.</li>
+href="#bluegene-block-free">manually set the block to free</a>.</li>
 </ol>
 <p class="footer"><a href="#top">top</a></p>
 
diff --git a/doc/man/Makefile.am b/doc/man/Makefile.am
index 800536c24932cefc0c29a3d9596859831b799465..3ee8ff7965911739361d38d68e3d86324170a74d 100644
--- a/doc/man/Makefile.am
+++ b/doc/man/Makefile.am
@@ -13,9 +13,12 @@ man1_MANS =            \
 	man1/sinfo.1   \
 	man1/slurm.1 \
 	man1/smap.1 \
+	man1/sprio.1 \
 	man1/squeue.1 \
 	man1/sreport.1 \
 	man1/srun.1 \
+	man1/srun_cr.1 \
+	man1/sshare.1 \
 	man1/sstat.1 \
 	man1/strigger.1 \
 	man1/sview.1
@@ -39,11 +42,15 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_checkpoint_failed.3 \
 	man3/slurm_checkpoint_restart.3 \
 	man3/slurm_checkpoint_task_complete.3 \
+	man3/slurm_checkpoint_tasks.3 \
 	man3/slurm_checkpoint_vacate.3 \
 	man3/slurm_clear_trigger.3 \
 	man3/slurm_complete_job.3 \
-	man3/slurm_complete_job_step.3 \
 	man3/slurm_confirm_allocation.3 \
+	man3/slurm_create_partition.3 \
+	man3/slurm_create_reservation.3 \
+	man3/slurm_delete_partition.3 \
+	man3/slurm_delete_reservation.3 \
 	man3/slurm_free_ctl_conf.3 \
 	man3/slurm_free_job_info_msg.3 \
 	man3/slurm_free_job_alloc_info_response_msg.3 \
@@ -53,11 +60,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_free_node_info_msg.3 \
 	man3/slurm_free_partition_info.3 \
 	man3/slurm_free_partition_info_msg.3 \
+	man3/slurm_free_reservation_info_msg.3 \
 	man3/slurm_free_resource_allocation_response_msg.3 \
 	man3/slurm_free_slurmd_status.3 \
 	man3/slurm_free_submit_response_response_msg.3 \
 	man3/slurm_free_trigger_msg.3 \
-	man3/slurm_get_checkpoint_file_path.3 \
 	man3/slurm_get_end_time.3 \
 	man3/slurm_get_errno.3 \
 	man3/slurm_get_job_steps.3 \
@@ -66,6 +73,8 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_get_triggers.3 \
 	man3/slurm_init_job_desc_msg.3 \
 	man3/slurm_init_part_desc_msg.3 \
+	man3/slurm_init_resv_desc_msg.3 \
+	man3/slurm_init_update_node_msg.3 \
 	man3/slurm_job_step_create.3 \
 	man3/slurm_job_step_launch_t_init.3 \
 	man3/slurm_job_step_layout_get.3 \
@@ -79,6 +88,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_load_jobs.3 \
 	man3/slurm_load_node.3 \
 	man3/slurm_load_partitions.3 \
+	man3/slurm_load_reservations.3 \
 	man3/slurm_load_slurmd_status.3 \
 	man3/slurm_notify_job.3 \
 	man3/slurm_perror.3 \
@@ -93,6 +103,8 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_print_node_table.3 \
 	man3/slurm_print_partition_info.3 \
 	man3/slurm_print_partition_info_msg.3 \
+	man3/slurm_print_reservation_info.3 \
+	man3/slurm_print_reservation_info_msg.3 \
 	man3/slurm_print_slurmd_status.3 \
 	man3/slurm_read_hostfile.3 \
 	man3/slurm_reconfigure.3 \
@@ -108,6 +120,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_sprint_job_step_info.3 \
 	man3/slurm_sprint_node_table.3 \
 	man3/slurm_sprint_partition_info.3 \
+	man3/slurm_sprint_reservation_info.3 \
 	man3/slurm_step_ctx_create.3 \
 	man3/slurm_step_ctx_create_no_alloc.3 \
 	man3/slurm_step_ctx_daemon_per_node_hack.3 \
@@ -122,16 +135,18 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_strerror.3 \
 	man3/slurm_submit_batch_job.3 \
 	man3/slurm_suspend.3 \
+	man3/slurm_takeover.3 \
 	man3/slurm_terminate_job.3 \
 	man3/slurm_terminate_job_step.3 \
-	man3/slurm_trigger.3 \
 	man3/slurm_update_job.3 \
 	man3/slurm_update_node.3 \
-	man3/slurm_update_partition.3
+	man3/slurm_update_partition.3 \
+	man3/slurm_update_reservation.3
 
 man5_MANS = man5/bluegene.conf.5 \
 	man5/slurm.conf.5 \
 	man5/slurmdbd.conf.5 \
+	man5/topology.conf.5 \
 	man5/wiki.conf.5
 
 man8_MANS = man8/slurmctld.8 \
diff --git a/doc/man/Makefile.in b/doc/man/Makefile.in
index 6fcf040ea39518f0df0c2da1774a238046863a96..5faf27b8c0258967f5f39c884924da3b724c3ed5 100644
--- a/doc/man/Makefile.in
+++ b/doc/man/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -85,6 +89,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -257,9 +265,12 @@ man1_MANS = \
 	man1/sinfo.1   \
 	man1/slurm.1 \
 	man1/smap.1 \
+	man1/sprio.1 \
 	man1/squeue.1 \
 	man1/sreport.1 \
 	man1/srun.1 \
+	man1/srun_cr.1 \
+	man1/sshare.1 \
 	man1/sstat.1 \
 	man1/strigger.1 \
 	man1/sview.1
@@ -283,11 +294,15 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_checkpoint_failed.3 \
 	man3/slurm_checkpoint_restart.3 \
 	man3/slurm_checkpoint_task_complete.3 \
+	man3/slurm_checkpoint_tasks.3 \
 	man3/slurm_checkpoint_vacate.3 \
 	man3/slurm_clear_trigger.3 \
 	man3/slurm_complete_job.3 \
-	man3/slurm_complete_job_step.3 \
 	man3/slurm_confirm_allocation.3 \
+	man3/slurm_create_partition.3 \
+	man3/slurm_create_reservation.3 \
+	man3/slurm_delete_partition.3 \
+	man3/slurm_delete_reservation.3 \
 	man3/slurm_free_ctl_conf.3 \
 	man3/slurm_free_job_info_msg.3 \
 	man3/slurm_free_job_alloc_info_response_msg.3 \
@@ -297,11 +312,11 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_free_node_info_msg.3 \
 	man3/slurm_free_partition_info.3 \
 	man3/slurm_free_partition_info_msg.3 \
+	man3/slurm_free_reservation_info_msg.3 \
 	man3/slurm_free_resource_allocation_response_msg.3 \
 	man3/slurm_free_slurmd_status.3 \
 	man3/slurm_free_submit_response_response_msg.3 \
 	man3/slurm_free_trigger_msg.3 \
-	man3/slurm_get_checkpoint_file_path.3 \
 	man3/slurm_get_end_time.3 \
 	man3/slurm_get_errno.3 \
 	man3/slurm_get_job_steps.3 \
@@ -310,6 +325,8 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_get_triggers.3 \
 	man3/slurm_init_job_desc_msg.3 \
 	man3/slurm_init_part_desc_msg.3 \
+	man3/slurm_init_resv_desc_msg.3 \
+	man3/slurm_init_update_node_msg.3 \
 	man3/slurm_job_step_create.3 \
 	man3/slurm_job_step_launch_t_init.3 \
 	man3/slurm_job_step_layout_get.3 \
@@ -323,6 +340,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_load_jobs.3 \
 	man3/slurm_load_node.3 \
 	man3/slurm_load_partitions.3 \
+	man3/slurm_load_reservations.3 \
 	man3/slurm_load_slurmd_status.3 \
 	man3/slurm_notify_job.3 \
 	man3/slurm_perror.3 \
@@ -337,6 +355,8 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_print_node_table.3 \
 	man3/slurm_print_partition_info.3 \
 	man3/slurm_print_partition_info_msg.3 \
+	man3/slurm_print_reservation_info.3 \
+	man3/slurm_print_reservation_info_msg.3 \
 	man3/slurm_print_slurmd_status.3 \
 	man3/slurm_read_hostfile.3 \
 	man3/slurm_reconfigure.3 \
@@ -352,6 +372,7 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_sprint_job_step_info.3 \
 	man3/slurm_sprint_node_table.3 \
 	man3/slurm_sprint_partition_info.3 \
+	man3/slurm_sprint_reservation_info.3 \
 	man3/slurm_step_ctx_create.3 \
 	man3/slurm_step_ctx_create_no_alloc.3 \
 	man3/slurm_step_ctx_daemon_per_node_hack.3 \
@@ -366,16 +387,18 @@ man3_MANS = man3/slurm_hostlist_create.3 \
 	man3/slurm_strerror.3 \
 	man3/slurm_submit_batch_job.3 \
 	man3/slurm_suspend.3 \
+	man3/slurm_takeover.3 \
 	man3/slurm_terminate_job.3 \
 	man3/slurm_terminate_job_step.3 \
-	man3/slurm_trigger.3 \
 	man3/slurm_update_job.3 \
 	man3/slurm_update_node.3 \
-	man3/slurm_update_partition.3
+	man3/slurm_update_partition.3 \
+	man3/slurm_update_reservation.3
 
 man5_MANS = man5/bluegene.conf.5 \
 	man5/slurm.conf.5 \
 	man5/slurmdbd.conf.5 \
+	man5/topology.conf.5 \
 	man5/wiki.conf.5
 
 man8_MANS = man8/slurmctld.8 \
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index 035861fd131715e99a3ecdbfd8e30ad42d06e2ed..25f27b357b4070e174e7d498a4706400f52d3d0c 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -1,30 +1,27 @@
-.\" $Header$
-.TH  sacct 1
-.ds )H "Hewlett\-Packard Company
+.TH SACCT "1" "April 2009" "sacct 2.0" "Slurm components"
+
 .SH "NAME"
-sacct
-\- displays accounting data for all jobs and job steps in the SLURM job accounting log
+sacct \- displays accounting data for all jobs and job steps in the
+SLURM job accounting log or SLURM database
 
 .SH "SYNOPSIS"
-.HP 
-.BR "sacct "
-\f2\fP\f2options \fP 
+\fBsacct\fR [\fIOPTIONS\fR...]
 
 .SH "DESCRIPTION"
 .PP
-Accounting information for jobs invoked with SLURM are logged in the job 
-accounting log file.
+Accounting information for jobs invoked with SLURM are either logged
+in the job accounting log file or saved to the SLURM database.
 .PP
 The 
 .BR "sacct "
 command displays job accounting data stored in the job accounting log 
-file in a variety of forms for your analysis.
+file or SLURM database in a variety of forms for your analysis.
 The 
 .BR "sacct "
 command displays information on jobs, job steps, status, and exitcodes by 
 default.
 You can tailor the output with the use of the 
-\f3\-\-format=\fP 
+\f3\-\-fields=\fP 
 option to specify the fields to be shown.
 .PP
 For the root user, the 
@@ -52,32 +49,31 @@ gather and report incomplete information for these calls;
 \f2getrusage(3)\fP man page for information about which data are
 actually available on your system.
 .IP
-If --dump is specified, the field selection options (--brief,
---format, ...) have no effect. 
+If \-\-dump is specified, the field selection options (\-\-brief,
+\-\-format, ...) have no effect. 
 .IP
 Elapsed time fields are presented as 2 fields, integral seconds and integral microseconds
 .IP
-If --dump is not specified, elapsed time fields are presented as
+If \-\-dump is not specified, elapsed time fields are presented as
 [[days-]hours:]minutes:seconds.hundredths.
 .IP
 The default input file is the file named in the jobacct_logfile
 parameter in slurm.conf.
 
-.SS "Options"
+.SH "OPTIONS"
+
 .TP "10"
-\f3\-a \fP\f3,\fP \f3\-\-all\fP
-Displays the job accounting data for all jobs in the job accounting log file.
+\f3\-a \fP\f3,\fP \f3\-\-allusers\fP
+Displays the current user's jobs. Displays all users jobs when run by root.
 .IP 
-This is the default behavior when the 
-.BR "sacct "
-command is executed by the root user.
-.TP 
-\f3\-A \fP\f2account_list\fP\f3,\fP  \f3\-\-accounts\fP\f3=\fP\f2account_list\fP
-Displays the statistics only for the jobs started on the accounts specified by
-the \f2account_list\fP operand, which is a comma\-separated list of
-account names.
-Space characters are not allowed in the \f2account_list\fP. Default is
-all accounts\&.
+
+.TP
+\f3\-A \fP\f2account_list\fP \fP\f3,\fP \f3\-\-accounts\fP\f3=\fP\f2account_list\fP
+Displays jobs when a comma separated list of accounts are given as the
+argument.
+.IP 
+
+
 .TP 
 \f3\-b \fP\f3,\fP \f3\-\-brief\fP
 Displays a brief listing, which includes the following data:
@@ -98,128 +94,83 @@ This option has no effect when the
 option is also specified.
 
 .TP 
-\f3\-C \fP\f2cluster_list\fP\f3,\fP  \f3\-\-clusters\fP\f3=\fP\f2cluster_list\fP
+\f3\-C \fP\f2cluster_list\fP\f3,\fP  \f3\-\-cluster\fP\f3=\fP\f2cluster_list\fP
 Displays the statistics only for the jobs started on the clusters specified by
 the \f2cluster_list\fP operand, which is a comma\-separated list of clusters.
 Space characters are not allowed in the \f2cluster_list\fP. \-1 for
 all clusters, default is current cluster you are executing the sacct
 command on\&.
 
+.TP
+\f3\-c \fP\f3,\fP \f3\-\-completion\fP
+Use job completion instead of job accounting.
+.IP 
+
+
 .TP 
 \f3\-d \fP\f3,\fP \f3\-\-dump\fP
-Displays (dumps) the raw data records.
-.IP 
-This option overrides the 
-\f3\-\-brief\fP 
-and 
-\f3\-\-format=\fP 
-options.
+Dumps the raw data records.
 .IP 
+
 The section titled "INTERPRETING THE \-\-dump OPTION OUTPUT" describes the 
 data output when this option is used.
 
 .TP 
 \f3\-\-duplicates\fP
 If SLURM job ids are reset, but the job accounting log file isn't
-reset at the same time (with -e, for example), some job numbers will
+reset at the same time (with \-e, for example), some job numbers will
 probably appear more than once in the accounting log file to refer to
 different jobs; such jobs can be distinguished by the "submit" time
 stamp in the data records.
 .IP 
-When data for specific jobs are requested with the --jobs option, we
+When data for specific jobs are requested with the \-\-jobs option, we
 assume that the user wants to see only the most recent job with that
-number. This behavior can be overridden by specifying --duplicates, in
+number. This behavior can be overridden by specifying \-\-duplicates, in
 which case all records that match the selection criteria will be returned.
-.IP
-When --jobs is not specified, we report data for all jobs that match
-the selection criteria, even if some of the job numbers are
-reused. Specify that you only want the most recent job for each
-selected job number with the --noduplicates option.
-
 
 .TP
-\f3\-e \fP\f2time_spec\fP \f3,\fP \f3\-\-expire=\fP\f2time_spec\fP
-.IP
-Removes job data from SLURMs current accounting log file (or the file
-specified with \f3\-\-file\fP) for jobs that completed more than
-\f2time_spec\fP
-ago and appends them to the expired log file.
+\f3\-e \fP\f3,\fP \f3\-\-helpformat\fP
 .IP
-If \f2time_spec\fP is an integer value only, it is interpreted as minutes. If
-\f2time_spec\fP is an integer followed by "h", it is interpreted as
-a number of hours. If \f2time_spec\fP is an integer followed by "d",
-it is interpreted as number of days. For example, "\-\-expire=14d" 
-purges the job accounting log of all jobs that completed
-more than 14 days ago.
+Print a list of fields that can be specified with the \f3\-\-format\fP option.
 .IP
-The expired log file is a file with the same name as the accounting
-log file, with ".expired" appended to the file name. For example, if
-the accounting log file is /var/log/slurmacct.log, the expired log
-file will be /var/log/slurmacct.log.expired.
-
-.TP
-\f3\-\-endtime\fP\f3=\fP\f2endtime\fP
-Select jobs eligible before this time.
-Valid Formats are.
-	HH:MM[:SS] [AM|PM]
-	MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
-	MM/DD[/YY]-HH:MM[:SS]
-
-.TP 
-\f3\-F \fP\f2format_list\fP \f3,\fP  \f3\-\-format\fP\f3=\fP\f2format_list\fP
-Displays the job accounting data specified by the 
-\f2format_list\fP 
-operand, which is a comma\-separated list of fields.
-Space characters are not allowed in the 
-\f2format_list\fP\c
-\&. 
-.IP 
-See the 
-\f3\-\-helpformat\fP 
-option for a list of the available fields.
-See the section titled "Job Accounting Fields" for a description of 
-each field.
-.IP 
-The job accounting data is displayed in the order specified by the 
-\f2format_list\fP 
-operand.
-Thus, the following two commands display the same data but in different order:
 .RS 
 .PP
 .nf 
 .ft 3
-# sacct \-\-format=jobid,status
-Jobid    Status
-\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\-
-3          COMPLETED
-3.0        COMPLETED
+Fields available:
 
-.ft 1
-.fi 
-.RE 
-.RS 
-.PP
-.nf 
-.ft 3
-# sacct \-\-format=status,jobid
-Status     Jobid
-\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\-
-COMPLETED  3
-COMPLETED  3.0
+AllocCPUS     Account       AssocID       AveCPU       
+AvePages      AveRSS        AveVMSize     BlockID      
+Cluster       CPUTime       CPUTimeRAW    Elapsed      
+Eligible      End           ExitCode      GID          
+Group         JobID         JobName       Layout       
+MaxPages      MaxPagesNode  MaxPagesTask  MaxRSS       
+MaxRSSNode    MaxRSSTask    MaxVMSize     MaxVMSizeNode
+MaxVMSizeTask MinCPU        MinCPUNode    MinCPUTask   
+NCPUS         NNodes        NodeList      NTasks       
+Priority      Partition     QOS           QOSRAW       
+ReqCPUS       Reserved      ResvCPU       ResvCPURAW   
+Start         State         Submit        Suspended    
+SystemCPU     Timelimit     TotalCPU      UID          
+User          UserCPU       WCKey         WCKeyID      
 
 .ft 1
 .fi 
 .RE 
 .IP 
-The default value for the 
-\f2field_list\fP 
-operand is 
-\f3"jobid,jobname,partition,ncpus,state,exitcode"\fP\c
-\&.
-.IP 
-This option has no effect when the 
-\f3\-\-dump\fP 
-option is also specified.
+The section titled "Job Accounting Fields" describes these fields.
+
+.TP
+\f3\-E \fP\f2end_time\fP\fP\f3,\fP \f3\-\-endtime\fP\f3=\fP\f2end_time\fP
+.IP
+Select jobs starting before time.
+Valid time formats are...
+HH:MM[:SS] [AM|PM]
+MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
+MM/DD[/YY]-HH:MM[:SS]         
+YYYY-MM-DD[THH[:MM[:SS]]]
+.IP
+
 
 .TP 
 \f3\-f \fP\f2file\fP\f3,\fP  \f3\-\-file\fP\f3=\fP\f2file\fP
@@ -227,53 +178,21 @@ Causes the
 .BR "sacct "
 command to read job accounting data from the named 
 \f2file\fP 
-instead of the current SLURM job accounting log file.
+instead of the current SLURM job accounting log file. Only applicable
+when running the filetxt plugin.
 
 .TP 
-\f3\-g \fP\f2gid_list\fP\f3,\fP  \f3\-\-gid\fP\f3=\fP\f2gid_list\fP
+\f3\-g \fP\f2gid_list\fP\f3,\fP  \f3\-\-gid\fP\f3=\fP\f2gid_list\fP \f3\-\-group\fP\f3=\fP\f2group_list\fP
 Displays the statistics only for the jobs started with the GID
-specified by the \f2gid_list\fP operand, which is a comma\-separated
-list of gids.  Space characters are not allowed in the \f2gid_list\fP. 
-Default is no restrictions.  This is virtually the same as the \-\-group
-option\&. 
-
-.TP 
-\f3\-g \fP\f2group_list\fP\f3,\fP  \f3\-\-group\fP\f3=\fP\f2group_list\fP
-Displays the statistics only for the jobs started with the GROUP
-specified by the \f2group_list\fP operand, which is a comma\-separated
-list of groups.  Space characters are not allowed in the \f2group_list\fP. 
-Default is no restrictions.  This is virtually the same as the \-\-gid option\&. 
+or the GROUP specified by the \f2gid_list\fP or the\f2group_list\fP operand, which is a comma\-separated
+list.  Space characters are not allowed. 
+Default is no restrictions.\&. 
 
 .TP 
 \f3\-h \fP\f3,\fP \f3\-\-help\fP
 Displays a general help message.
 .TP 
-\f3\-\-helpformat\fP
-Displays a list of fields that can be specified with the 
-\f3\-\-format\fP 
-option.
-.RS 
-.PP
-.nf 
-.ft 3
-Fields available:
-account     associd     cluster     cpu       
-cputime     elapsed     eligible    end       
-exitcode    finished    gid         group     
-job         jobid       jobname     ncpus     
-nodes       nnodes      nprocs      ntasks    
-pages       partition   rss         start     
-state       status      submit      timelimit 
-submitted   systemcpu   uid         user      
-usercpu     vsize       blockid     connection
-geo         max_procs   reboot      rotate    
-bg_start_point  wckey     
 
-.ft 1
-.fi 
-.RE 
-.IP 
-The section titled "Job Accounting Fields" describes these fields.
 
 .TP 
 \f3\-j \fP\f2job(.step)\fP \f3,\fP  \f3\-\-jobs\fP\f3=\fP\f2job(.step)\fP
@@ -288,59 +207,35 @@ The default is to display information on all jobs.
 
 .TP 
 \f3\-l\fP\f3,\fP \f3\-\-long\fP
-Displays a long listing, which includes the following data:
-.RS 
-.TP "3"
-\(bu
-\f3jobid\fP 
-.TP "3"
-\(bu
-\f3jobname\fP 
-.TP "3"
-\(bu
-\f3partition\fP 
-.TP "3"
-\(bu
-\f3vsize\fP 
-.TP "3"
-\(bu
-\f3rss\fP 
-.TP "3"
-\(bu
-\f3pages\fP 
-.TP "3"
-\(bu
-\f3cputime\fP 
-.TP "3"
-\(bu
-\f3ntasks\fP 
-.TP "3"
-\(bu
-\f3ncpus\fP 
-.TP "3"
-\(bu
-\f3elapsed\fP 
-.TP "3"
-\(bu
-\f3status\fP 
-.TP "3"
-\(bu
-\f3exitcode\fP 
-.RE 
+Equivelent to specifying:
+.IP
+\'--fields=jobid,jobname,partition,maxvsize,maxvsizenode,maxvsizetask,avevsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks,alloccpus,elapsed,state,exitcode\' 
+
 
 .TP 
-\f3\-\-noduplicates\fP
-See the discussion under --duplicates.
+\f3\-L\fP\f3,\fP \f3\-\-allclusters\fP
+Display jobs ran on all clusters. By default, only jobs ran on the
+cluster from where sacct is called are displayed.
 
 .TP 
-\f3\-\-noheader\fP
-Prevents the display of the heading over the output.
-The default action is to display a header.
+\f3\-n\fP\f3,\fP \f3\-\-noheader\fP
+No heading will be added to the output. The default action is to
+display a header.
 .IP 
 This option has no effect when used with the 
 \f3\-\-dump\fP 
 option.
 
+.TP 
+\f3\-N\fP\f3,\fP \f3\-\-nodes\fP
+Display jobs that ran on any of these nodes.
+.IP
+
+.TP 
+\f3\-o \fP\f3,\fP \f3\-\-format\fP
+Comma seperated list of fields. (use "\-\-helpformat" for a list of available fields).
+.IP 
+
 .TP 
 \f3\-O \fP\f3,\fP \f3\-\-formatted_dump\fP
 Dumps accounting records in an easy\-to\-read format.
@@ -348,51 +243,18 @@ Dumps accounting records in an easy\-to\-read format.
 This option is provided for debugging.
 
 .TP
-\f3\-P \fP\f3,\fP \f3\-\-purge\fP
-Used in conjunction with --expire to remove invalid data from the job accounting log.
+\f3\-p \fP\f3,\fP \f3\-\-parsable\fP
+output will be '|' delimited with a '|' at the end
 
-.TP 
-\f3\-p \fP\f2partition_list\fP \f3,\fP  \f3\-\-partition\fP\f3=\fP\f2partition_list\fP
-Displays information about jobs and job steps specified by the 
-\f2partition_list\fP 
-operand, which is a comma\-separated list of partitions.
-Space characters are not allowed in the 
-\f2partition_list\fP\c
-\&. 
-.IP 
-The default is to display information on jobs and job steps on all partitions.
+.TP
+\f3\-P \fP\f3,\fP \f3\-\-parsable2\fP
+output will be '|' delimited without a '|' at the end
 
 .TP
-\f3\-S \fP\f3,\fP \f3\-\-stat\fP
-.IP
-Queries the status of a job as the job is running displaying
-the following data:
-.RS 
-.TP "3"
-\(bu
-\f3jobid\fP 
-.TP "3"
-\(bu
-\f3vsize\fP 
-.TP "3"
-\(bu
-\f3rss\fP 
-.TP "3"
-\(bu
-\f3pages\fP 
-.TP "3"
-\(bu
-\f3cputime\fP 
-.TP "3"
-\(bu
-\f3ntasks\fP 
-.TP "3"
-\(bu
-\f3status\fP 
-.RE 
-.IP
-You must also include the \-\-jobs=job(.step) option if no (.step) is 
-given you will recieve the job.0 step.
+\f3\-r \fP\f3,\fP \f3\-\-partition\fP
+
+Comma seperated list of partitions to select jobs and job steps
+from. The default is all partitions.
 
 .TP 
 \f3\-s \fP\f2state_list\fP \f3,\fP  \f3\-\-state\fP\f3=\fP\f2state_list\fP
@@ -432,34 +294,25 @@ Space characters are not allowed in the
 \f2state_list\fP\c
 \&.
 
-.TP
-\f3\-\-starttime\fP\f3=\fP\f2starttime\fP
-Select jobs eligible after this time.
-Valid Formats are.
-	HH:MM[:SS] [AM|PM]
-	MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
-	MM/DD[/YY]-HH:MM[:SS]
-
 .TP 
-\f3\-t \fP\f3,\fP \f3\-\-total\fP
-Displays only the cumulative statistics for each job.
-Intermediate steps are displayed by default.
+\f3\-S \fP\f3,\fP \f3\-\-starttime\fP
+Select jobs eligible after the specified time. Default is midnight of
+current day.
+Valid time formats are...
+HH:MM[:SS] [AM|PM]
+MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
+MM/DD[/YY]-HH:MM[:SS]         
+YYYY-MM-DD[THH[:MM[:SS]]]
 
 .TP 
-\f3\-u \fP\f2uid_list\fP\f3,\fP  \f3\-\-uid\fP\f3=\fP\f2uid_list\fP
-Displays the statistics only for the jobs started by the specified
-\f2uid_list\fP operand, which is a comma\-separated list of uids.
-Space characters are not allowed in the \f2uid_list\fP.  
-\-1 for all uids, default is current uid.  If run as user root default
-is all users.  This is virtually the same as the \-\-user option\&.
+\f3\-T \fP\f3,\fP \f3\-\-truncate\fP
+Truncate time.  So if a job started before --starttime the start time
+would be truncated to --starttime.  The same for end time and --endtime.
 
 .TP 
-\f3\-u \fP\f2user_list\fP\f3,\fP  \f3\-\-user\fP\f3=\fP\f2user_list\fP
-Displays the statistics only for the jobs started by the specified
-\f2user_list\fP operand, which is a comma\-separated list of users.
-Space characters are not allowed in the \f2user_list\fP.  
-\-1 for all uids, default is current uid.  If run as user root default
-is all users.  This is virtually the same as the \-\-uid option\&.
+\f3\-u \fP\f2uid_list\fP\f3,\fP  \f3\-\-uid\fP\f3=\fP\f2uid_list\fP \f3\-\-user\fP\f3=\fP\f2user_list\fP
+Use this comma seperated list of uids or user names to select jobs to display.  By default, the running
+user's uid is used.
 
 .TP 
 \f3\-\-usage\fP
@@ -467,8 +320,11 @@ Displays a help message.
 
 .TP 
 \f3\-v \fP\f3,\fP \f3\-\-verbose\fP
-Reports the state of certain variables during processing.
-This option is primarily used for debugging.
+Primarily for debug use reports the state of certain variables during processing.
+
+.TP 
+\f3\-V \fP\f3,\fP \f3\-\-version\fP
+Print version.
 
 .TP 
 \f3\-W \fP\f2wckey_list\fP\f3,\fP  \f3\-\-wckeys\fP\f3=\fP\f2wckey_list\fP
@@ -478,25 +334,57 @@ wckey names.
 Space characters are not allowed in the \f2wckey_list\fP. Default is
 all wckeys\&.
 
+.TP 
+\f3\-X \fP\f3,\fP \f3\-\-allocations\fP
+Only show cumulative statistics for each job, not the intermediate steps.
+
 .SS "Job Accounting Fields"
 The following describes each job accounting field:
 .RS 
 .TP "10"
+\f3alloccpus\fP
+Count of allocated processors.
+
+.TP
 \f3account\fP
-User supplied account number for the job
+Account the job ran under.
+
+.TP
+\f3associd\fP
+Reference to the association of user, account and cluster.
+
+.TP
+\f3avecpu\fP
+Average CPU time of a process.
+
+.TP
+\f3avepages\fP
+Average pages of a process.
+
+.TP
+\f3averss\fP
+Average resident set size of a process.
+
+.TP
+\f3avevsize\fP
+Average Virtual Memory size of a process.
 
 .TP
 \f3blockid\fP
-Block ID, applicable to BlueGene computers only
+Block ID, applicable to BlueGene computers only.
 
 .TP
-\f3cpu\fP 
-The sum of the system time (systemcpu) and user time (usercpu) in seconds 
+\f3cluster\fP 
+Cluster name.
 
 .TP
 \f3cputime\fP
-Minimum CPU time of any process followed by its task id along with
-the average of all processes running in the step.
+Formatted number of cpu seconds a process was allocated. 
+
+.TP
+\f3cputimeraw\fP
+How much cpu time process was allocated in second format, not formatted
+like above.
 
 .TP 
 \f3elapsed\fP 
@@ -526,6 +414,10 @@ minutes
 seconds
 .RE 
 
+.TP 
+\f3eligible\fP 
+When the job became eligible to run.
+
 .TP
 \f3end\fP
 Termination time of the job. Format output is as follows:
@@ -567,26 +459,6 @@ The group identifier of the user who ran the job.
 \f3group\fP
 The group name of the user who ran the job.
 
-.TP 
-\f3idrss\fP 
-Maximum unshared data size (in KB) of any process.
-
-.TP 
-\f3inblocks\fP 
-Total block input operations for all processes.
-
-.TP 
-\f3isrss\fP 
-Maximum unshared stack space size (in KB) of any process.
-
-.TP 
-\f3ixrss\fP 
-Maximum shared memory (in KB) of any process.
-
-.TP 
-\f3job\fP 
-The SLURM job identifier of the job.
-
 .TP 
 \f3jobid\fP 
 The number of the job or job step.
@@ -599,103 +471,130 @@ It is in the form:
 The name of the job or job step.
 
 .TP 
-\f3majflt\fP 
-Maximum number of major page faults for any process.
+\f3layout\fP
+What the layout of a step was when it was running.  This can be used
+to give you an idea of which node ran which rank in your job.
 
-.TP 
-\f3minflt\fP 
-Maximum number of minor page faults (page reclaims) for any process.
+.TP
+\f3maxpages\fP
+Maximum page faults of a process.
 
-.TP 
-\f3msgrcv\fP 
-Total number of messages received for all processes.
+.TP
+\f3maxpagesnode\fP
+The node where the maxpages occured.
 
-.TP 
-\f3msgsnd\fP 
-Total number of messages sent for all processes.
+.TP
+\f3maxpagestask\fP
+The task on maxpagesnode where the maxpages occured.
+
+.TP
+\f3maxrss\fP
+Maximum resident set size of a process.
+
+.TP
+\f3maxrssnode\fP
+The node where the maxrss occured.
+
+.TP
+\f3maxrsstask\fP
+The task on maxrssnode where the maxrss occured.
+
+.TP
+\f3maxvmsize\fP
+Maximum  Virtual  Memory size of any process.
+
+.TP
+\f3maxvmsizenode\fP
+The node where the maxvsize occured.
+
+.TP
+\f3maxvmsizetask\fP
+The task on maxvsizenode where the maxvsize occured.
+
+.TP
+\f3mincpu\fP
+Minimum cpu of any process.
+
+.TP
+\f3mincpunode\fP
+The node where the mincpu occured.
+
+.TP
+\f3mincputask\fP
+The task on mincpunode where the mincpu occured.
 
 .TP 
 \f3ncpus\fP 
 Total number of CPUs allocated to the job.
 
-.TP 
-\f3nivcsw\fP 
-Total number of involuntary context switches for all processes.
+.TP
+\f3nodelist\fP
+List of nodes in job/step.
+
+.TP
+\f3nnodes\fP
+Number of nodes in a job or step.
 
 .TP 
-\f3nodes\fP 
-A list of nodes allocated to the job.
+\f3ntasks\fP 
+Total number of tasks in a job or step.
 
 .TP
-\f3nprocs\fP
-Total number of tasks in job. Identical to \f3ntasks\fP.
+\f3priority\fP
+Slurm priority.
 
 .TP 
-\f3nsignals\fP
-Total number of signals received for all processes.
+\f3partition\fP
+Identifies the partition on which the job ran.
 
 .TP
-\f3nswap\fP
-Maximum number of swap operations of any process.
+\f3qos\fP
+Name of Quality of Service.
 
 .TP
-\f3ntasks\fP 
-Total number of tasks in job.
+\f3qosraw\fP 
+Id of Quality of Service.
 
 .TP 
-\f3nvcsw\fP 
-Total number of voluntary context switches for all processes.
+\f3reqcpus\fP 
+Required CPUs.
 
 .TP 
-\f3outblocks\fP 
-Total block output operations for all processes.
+\f3reserved\fP 
+How much wall clock time was used as reserved time for this job.  This is
+derived from how long a job was waiting from eligible time to when it
+actually started.
 
 .TP
-\f3pages\fP
-Maximum page faults of any process followed by its task id along with
-the average of all processes running in the step.
+\f3resvcpu\fP
+Formatted time for how long (cpu secs) a job was reserved for.
 
 .TP 
-\f3partition\fP 
-Identifies the partition on which the job ran.
-
-.TP 
-\f3rss\fP 
-Maximum resident set size of any process followed by its task id along with
-the average of all processes running in the step.
+\f3resvcpuraw\fP 
+Reserved CPUs in second format, not formatted.
 
 .TP
 \f3start\fP
 Initiation time of the job in the same format as \f3end\fP.
 
 .TP
-\f3status\fP
+\f3state\fP
 Displays the job status, or state.
-.IP 
-Output can be 
-\f3RUNNING\fP\c
-\&, 
-\f3SUSPENDED\fP\c
-\&,
-\f3COMPLETED\fP\c
-\&, 
-\f3CANCELLED\fP\c
-\&, 
-\f3FAILED\fP\c
-\&, 
-\f3TIMEOUT\fP\c
-\&, or 
-\f3NODE_FAIL\fP\c
-\&.
 
-.TP 
-\f3submit\fP 
-The time and date stamp (in Universal Time Coordinated, UTC) the job was 
-submitted.
-The format of the output is identical to that of the \f3end\fP field.
+Output can be RUNNING, SUSPENDED, COMPLETED,  CANCELLED, FAILED,
+TIMEOUT, or NODE_FAIL. 
 
-.TP 
-\f3systemcpu\fP 
+.TP
+\f3submit\fP
+The  time and date stamp (in Universal Time Coordinated, UTC) the job
+was submitted.  The format of the output is identical to that of the end field.
+
+.TP
+\f3suspended\fP
+How long the job was suspended for.
+
+.TP
+\f3systemcpu\fP
 The amount of system CPU time. (If job was running on multiple cpus this
 is a combination of all the times so this number could be much larger
 than the elapsed time.)
@@ -703,37 +602,40 @@ The format of the output is identical to that of the
 \f3elapsed\fP 
 field.
 
-.TP 
-\f3uid\fP 
-The user identifier of the user who ran the job.
+.TP
+\f3timelimit\fP
+What the timelimit was/is for the job.
 
-.TP 
-\f3uid.gid\fP 
-The user and group identifiers of the user who ran the job. (This
-field is used in record headers, and simply concatenates the
-\f3uid\fP and \f3gid\fP fields.)
+.TP
+\f3totalcpu\fP
+The total amount CPU time actually used by the job, not just
+accounted for (which most likely is a higher number). (If job was
+running on multiple cpus this is a combination of  all  the  times  so
+this number could be much larger than the elapsed time.) The format of
+the output is identical  to  that  of  the elapsed field.
+
+.TP
+\f3uid\fP
+The user identifier of the user who ran the job.
 
 .TP
 \f3user\fP
 The user name of the user who ran the job.
 
-.TP 
-\f3usercpu\fP 
-The amount of user CPU time. (If job was running on multiple cpus this
-is a combination of all the times so this number could be much larger
-than the elapsed time.)
-The format of the output is identical to that of the 
-\f3elapsed\fP field.
-
 .TP
-\f3vsize\fP
-Maximum Virtual Memory size of any process followed by its task id along with
-the average of all processes running in the step.
+\f3usercpu\fP
+The amount of user CPU time. (If job was running on multiple cpus
+this is a combination of  all  the  times  so this number could be
+much larger than the elapsed time.) The format of the output is
+identical  to  that  of  the elapsed field.
 
 .TP
 \f3wckey\fP
-Workload Characterization Key.  Arbitrary string for grouping
-orthogonal accounts together.
+Workload  Characterization  Key.   Arbitrary  string for grouping orthogonal accounts together.
+
+.TP
+\f3wckeyid\fP
+Reference to the wckey.
 
 .RE 
 .SH "INTERPRETING THE \-DUMP OPTION OUTPUT"
@@ -1156,12 +1058,12 @@ command:
 .nf 
 .ft 3
 # sacct
-Jobid      Jobname    Partition    Ncpus Status     Exitcode
-\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-
-2          script01   srun             1 RUNNING           0
-3          script02   srun             1 RUNNING           0
-4          endscript  srun             1 RUNNING           0
-4.0                   srun             1 COMPLETED         0
+Jobid      Jobname    Partition    Account AllocCPUS State     ExitCode
+\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-
+2          script01   srun       acct1               1 RUNNING           0
+3          script02   srun       acct1               1 RUNNING           0
+4          endscript  srun       acct1               1 RUNNING           0
+4.0                   srun       acct1               1 COMPLETED         0
 
 .ft 1
 .fi 
@@ -1189,17 +1091,17 @@ Jobid      Status     Exitcode
 .PP
 .nf 
 .ft 3
-# sacct \-\-total
-Jobid      Jobname    Partition    Ncpus Status     Exitcode
-\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-
-3          sja_init   andy             1 COMPLETED         0
-4          sjaload    andy             2 COMPLETED         0
-5          sja_scr1   andy             1 COMPLETED         0
-6          sja_scr2   andy            18 COMPLETED         2
-7          sja_scr3   andy            18 COMPLETED         0
-8          sja_scr5   andy             2 COMPLETED         0
-9          sja_scr7   andy            90 COMPLETED         1
-10         endscript  andy           186 COMPLETED         0
+# sacct \-\-allocations
+Jobid      Jobname    Partition Account    AllocCPUS  State     Exitcode
+\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-
+3          sja_init   andy       acct1            1 COMPLETED         0
+4          sjaload    andy       acct1            2 COMPLETED         0
+5          sja_scr1   andy       acct1            1 COMPLETED         0
+6          sja_scr2   andy       acct1           18 COMPLETED         2
+7          sja_scr3   andy       acct1           18 COMPLETED         0
+8          sja_scr5   andy       acct1            2 COMPLETED         0
+9          sja_scr7   andy       acct1           90 COMPLETED         1
+10         endscript  andy       acct1          186 COMPLETED         0
 
 .ft 1
 .fi 
@@ -1214,14 +1116,14 @@ The fields are displayed in the order designated on the command line.
 .nf 
 .ft 3
 # sacct \-\-fields=jobid,ncpus,ntasks,nsignals,status
-Jobid        Ncpus  Ntasks  Nsignals Status
-\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\- \-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\-
-3                2       1         0 COMPLETED
-3.0              2       1         0 COMPLETED
-4                2       2         0 COMPLETED
-4.0              2       2         0 COMPLETED
-5                2       1         0 COMPLETED
-5.0              2       1         0 COMPLETED
+Jobid     Elapsed    Ncpus     Ntasks   Status
+\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\-
+3            00:01:30          2        1 COMPLETED
+3.0          00:01:30          2        1 COMPLETED
+4            00:00:00          2        2 COMPLETED
+4.0          00:00:01          2        2 COMPLETED
+5            00:01:23          2        1 COMPLETED
+5.0          00:01:31          2        1 COMPLETED
 
 .ft 1
 .fi 
@@ -1230,6 +1132,9 @@ Jobid        Ncpus  Ntasks  Nsignals Status
 
 Copyright (C) 2005\-2007 Copyright Hewlett\-Packard Development Company L.P.
 .LP
+Copyright (C) 2008\-2009 Lawrence Livermore National Security. Produced at Lawrence Livermore National Laboratory (cf,
+DISCLAIMER). CODE\-OCEC\-09\-009. All rights reserved.
+.LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
 .LP
@@ -1252,4 +1157,4 @@ designate the job accounting log file that collects system job accounting.
 The default job accounting log file.
 By default, this file is set to read and write permission for root only.
 .SH "SEE ALSO"
-ps(1), srun(1), squeue(1), getrusage(2), time(2)
+sstat(1), ps(1), srun(1), squeue(1), getrusage(2), time(2)
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index ca40ffc6b5761377148cca9e49560ac5dfdba645..cb6e5eb087deb9b14d2b03cb426552982e39f289 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -1,4 +1,4 @@
-.TH SACCTMGR "1" "October 2008" "sacctmgr 1.3" "Slurm components"
+.TH SACCTMGR "1" "April 2009" "sacctmgr 2.0" "Slurm components"
 
 .SH "NAME"
 sacctmgr \- Used to view and modify Slurm account information.
@@ -35,7 +35,7 @@ This is equivalent to the \fBhelp\fR command.
 commit changes immediately.
 
 .TP
-\fB\-n\fR, \fB\-\-no_header\fR
+\fB\-n\fR, \fB\-\-noheader\fR
 No header will be added to the beginning of the output.
 
 .TP
@@ -97,7 +97,7 @@ Identical to the \fBadd\fR command.
 Delete the specified entities.
 
 .TP
-\fBdump\fR <\fIENTITY\fR> with <\fFile=IFILENAME\fR>
+\fBdump\fR <\fIENTITY\fR> with <\fIFile=FILENAME\fR>
 Dump cluster data to the specified file.
 
 .TP
@@ -179,6 +179,11 @@ The entity used to group information consisting of four parameters:
 The \fIClusterName\fR parameter in the \fIslurm.conf\fR configuration
 file, used to differentiate accounts from on different machines. 
 
+.TP
+\fIconfiguration\fP
+Used only with the \fIlist\fR or \fIshow\fR command to report current
+system configuration. 
+
 .TP
 \fIcoordinator\fR
 A special privileged user usually an account manager or such that can
@@ -210,13 +215,14 @@ Number used in conjunction with other accounts to determine job priority.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
-\fIGrpCPUMins\fP=<max cpu hours> 
-Maximum number of CPU hours running jobs are able to be allocated in aggregate for 
-this association and all association which are children of this association.
+\fIGrpCPUMins\fP=<max cpu minutes> 
+Maximum number of CPU minutes running jobs are able to be allocated in 
+aggregate for this association and all association which are children
+of this association.
 To clear a previously set value use the modify command with a new
-value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
-You can still set this, but have to wait for future versions of SLURM
-before it is enforced.)
+value of \-1.  (NOTE: This limit is not enforced if set on the root
+association of a cluster.  So even though it may appear in sacctmgr
+output it will not be enforced.)
 
 .TP
 \fIGrpCPUs\fP=<max cpus>
@@ -251,6 +257,9 @@ To clear a previously set value use the modify command with a new value of \-1.
 Maximum wall clock time running jobs are able to be allocated in aggregate for 
 this association and all association which are children of this association.
 To clear a previously set value use the modify command with a new value of \-1.
+(NOTE: This limit is not enforced if set on the root
+association of a cluster.  So even though it may appear in sacctmgr
+output it will not be enforced.)
 
 .TP
 \fIMaxCPUMins\fP=<max cpu minutes> 
@@ -258,9 +267,7 @@ Maximum number of CPU minutes each job is able to use in this account.
 This is overridden if set directly on a user. 
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new
-value of \-1.  (NOTE: This limit is not currently enforced in SLURM.
-You can still set this, but have to wait for future versions of SLURM
-before it is enforced.)
+value of \-1. 
 
 .TP
 \fIMaxCPUs\fP=<max cpus>
@@ -468,6 +475,18 @@ controller is placed here.
 When a slurmctld registers with the database the port the controller
 is listening on is placed here.
 
+.TP
+\fICPU Count\fP
+The current count of cpus on the cluster.
+
+.TP
+\fINodeNames\fP
+The current Nodes associated with the cluster.
+
+.TP
+\fINodeCount\fP
+The current count of nodes associated with the cluster.
+
 .TP
 \fIRPC\fP
 When a slurmctld registers with the database the rpc version the controller
@@ -858,7 +877,7 @@ Maximum number of jobs this user can run.
 \fIMaxNodesPerJob=\fP
 Maximum number of nodes per job this user can run.
 .TP
-\fIMaxProcSecondsPerJob= 
+\fIMaxProcSecondsPerJob=\fP
 Maximum cpu seconds this user can run per job.
 .TP
 \fIMaxWallDurationPerJob=\fP
@@ -868,6 +887,64 @@ Maximum time (not related to job size) this user can run.
 Comma separated list of Quality of Service names (Defined in sacctmgr).
 .RE
 
+.SH "ARCHIVE FUNCTIONALITY"
+Sacctmgr has the capability to archive to a flatfile and or load that
+data if needed later.  The archiving is usually done by the slurmdbd
+and it is highly recommended you only do it through sacctmgr if you
+completely understand what you are doing.  For slurmdbd options see
+"man slurmdbd" for more information. 
+Loading data into the database can be done from these files to either
+view old data or regenerate rolled up data. 
+
+These are the options for both dump and load of archive information.
+
+archive dump
+
+.TP
+\fIDirectory=\fP
+Directory to store the archive data.
+.TP
+\fIEvents\fP
+Archive Events.  If not specified and PurgeEventMonths is set
+all event data removed will be lost permanently.
+.TP
+\fIJobs\fP
+Archive Jobs.  If not specified and PurgeJobMonths is set
+all job data removed will be lost permanently.
+.TP
+\fIPurgeEventMonths=\fP
+Purge cluster event records older than time stated in months.
+.TP
+\fIPurgeJobMonths=\fP
+Purge job records older than time stated in months.
+.TP
+\fIPurgeStepMonths=\fP
+Purge step records older than time stated in months.
+.TP
+\fIPurgeSuspendMonths=\fP
+Purge job suspend records older than time stated in months.
+.TP
+\fIScript=\fP
+Run this script instead of the generic form of archive to flat files.
+.TP
+\fISteps\fP
+Archive Steps.  If not specified and PurgeStepMonths is set
+all step data removed will be lost permanently.
+.TP
+\fISuspend\fP
+Archive Suspend Data.  If not specified and PurgeSuspendMonths is set
+all suspend data removed will be lost permanently.
+.RE    
+                                                                       
+archive load
+.TP
+\fIFile=\fP
+File to load into database.
+.TP
+\fIInsert=\fP 
+SQL to insert directly into the database.  This should be used very
+cautiously since this is writing your sql into the database.
+.RE
 
 .SH "EXAMPLES"
 .eo
@@ -948,9 +1025,9 @@ sacctmgr line.
 .ec
 
 .SH "COPYING"
-Copyright (C) 2008 Lawrence Livermore National Security.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index 62778dcfdc832f919106f23b9a24a88cbc73a38f..04388acc37804372cca7dace566005fa1f4dab8b 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -1,15 +1,13 @@
-.TH "salloc" "1" "SLURM 1.3" "August 2008" "SLURM Commands"
+.TH "salloc" "1" "SLURM 2.0" "April 2009" "SLURM Commands"
+
 .SH "NAME"
-.LP 
 salloc \- Obtain a SLURM job allocation (a set of nodes), execute a command, 
 and then release the allocation when the command is finished.
 
 .SH "SYNOPSIS"
-.LP 
 salloc [\fIoptions\fP] [<\fIcommand\fP> [\fIcommand args\fR]]
 
 .SH "DESCRIPTION"
-.LP 
 salloc is used to allocate a SLURM job allocation, which is a set of resources 
 (nodes), possibly with some set of constraints (e.g. number of processors per 
 node).  When salloc successfully obtains the requested allocation, it then runs 
@@ -24,10 +22,10 @@ section). If no command is specified, then the value of
 user's default shell.
 
 .SH "OPTIONS"
-.LP 
+.LP
 
 .TP
-\fB\-\-acctg\-freq\fR=\fIseconds\fR
+\fB\-\-acctg\-freq\fR=<\fIseconds\fR>
 Define the job accounting sampling interval.
 This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's
 configuration file, \fIslurm.conf\fR.
@@ -35,7 +33,7 @@ A value of zero disables real the periodic job sampling and provides accounting
 information only on job termination (reducing SLURM interference with the job).
 
 .TP
-\fB\-B\fR \fB\-\-extra\-node\-info\fR=\fIsockets\fR[:\fIcores\fR[:\fIthreads\fR]]
+\fB\-B\fR \fB\-\-extra\-node\-info\fR=<\fIsockets\fR[:\fIcores\fR[:\fIthreads\fR]]>
 Request a specific allocation of resources with details as to the
 number and type of computational resources within a cluster:
 number of sockets (or physical processors) per node,
@@ -48,9 +46,9 @@ resources of that type are to be utilized.
 As with nodes, the individual levels can also be specified in separate
 options if desired:
 .nf
-    \fB\-\-sockets\-per\-node\fR=\fIsockets\fR
-    \fB\-\-cores\-per\-socket\fR=\fIcores\fR
-    \fB\-\-threads\-per\-core\fR=\fIthreads\fR
+    \fB\-\-sockets\-per\-node\fR=<\fIsockets\fR>
+    \fB\-\-cores\-per\-socket\fR=<\fIcores\fR>
+    \fB\-\-threads\-per\-core\fR=<\fIthreads\fR>
 .fi
 When the task/affinity plugin is enabled,
 specifying an allocation in this manner also instructs SLURM to use
@@ -63,7 +61,7 @@ If select/cons_res is configured, it must have a parameter of CR_Core,
 CR_Core_Memory, CR_Socket, or CR_Socket_Memory. 
 
 .TP
-\fB\-\-begin\fR[=]<\fItime\fR>
+\fB\-\-begin\fR=<\fItime\fR>
 Submit the batch script to the SLURM controller immediately, like normal, but
 tell the controller to defer the allocation of the job until the specified time.
 
@@ -75,16 +73,24 @@ You may also specify \fImidnight\fR, \fInoon\fR, or
 with \fIAM\fR or \fIPM\fR for running in the morning or the evening.
 You can also say what day the job will be run, by specifying
 a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR
-or \fIMM.DD.YY\fR. You can also
+\fIYYYY-MM-DD\fR. Combine date and time using the following
+format \fIYYYY\-MM\-DD[THH[:MM[:SS]]]\fR. You can also
 give times like \fInow + count time\-units\fR, where the time\-units
-can be \fIminutes\fR, \fIhours\fR, \fIdays\fR, or \fIweeks\fR
-and you can tell SLURM to run the job today with the keyword
-\fItoday\fR and to run the job tomorrow with the keyword
-\fItomorrow\fR.
+can be \fIseconds\fR (default), \fIminutes\fR, \fIhours\fR,
+\fIdays\fR, or \fIweeks\fR and you can tell SLURM to run
+the job today with the keyword \fItoday\fR and to run the
+job tomorrow with the keyword \fItomorrow\fR.
 The value may be changed after job submission using the
 \fBscontrol\fR command.
+For example:
+.nf
+   \-\-begin=16:00
+   \-\-begin=now+1hour
+   \-\-begin=now+60           (seconds by default)
+   \-\-begin=2010-01-20T12:34:00
+.fi
 
-.TP 
+.TP
 \fB\-\-bell\fR
 Force salloc to ring the terminal bell when the job allocation is granted
 (and only if stdout is a tty).  By default, salloc only rings the bell
@@ -92,7 +98,11 @@ if the allocation is pending for more than ten seconds (and only if stdout
 is a tty). Also see the option \fB\-\-no\-bell\fR.
 
 .TP
-\fB\-C\fR, \fB\-\-constraint\fR[=]<\fIlist\fR>
+\fB\-\-comment\fR=<\fIstring\fR>
+An arbitrary comment.
+
+.TP
+\fB\-C\fR, \fB\-\-constraint\fR=<\fIlist\fR>
 Specify a list of constraints. 
 The constraints are features that have been assigned to the nodes by 
 the slurm administrator. 
@@ -107,7 +117,7 @@ There is no mechanism to specify that you want one node with feature
 node has both features.
 If only one of a set of possible options should be used for all allocated 
 nodes, then use the OR operator and enclose the options within square brackets. 
-For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might 
+For example: "\fB\-\-constraint=[rack1|rack2|rack3|rack4]"\fR might 
 be used to specify that all nodes must be allocated on a single rack of 
 the cluster, but any of those four racks can be used.
 A request can also specify the number of nodes needed with some feature
@@ -120,16 +130,117 @@ If no nodes have the requested features, then the job will be rejected
 by the slurm job manager.
 
 .TP
-\fB\-\-comment\fR=<\fIstring\fR>
-An arbitrary comment.
+\fB\-\-contiguous\fR
+If set, then the allocated nodes must form a contiguous set.
+Not honored with the \fBtopology/tree\fR or \fBtopology/3d_torus\fR
+plugins, both of which can modify the node ordering.
+
+.TP
+\fB\-\-cpu_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR
+Bind tasks to CPUs. Used only when the task/affinity plugin is enabled.
+The configuration parameter \fBTaskPluginParam\fR may override these options.
+For example, if \fBTaskPluginParam\fR is configured to bind to cores,
+your job will not be able to bind tasks to sockets.
+NOTE: To have SLURM always report on the selected CPU binding for all
+commands executed in a shell, you can enable verbose mode by setting
+the SLURM_CPU_BIND environment variable value to "verbose".
+
+The following informational environment variables are set when \fB\-\-cpu_bind\fR
+is in use:
+.nf
+        SLURM_CPU_BIND_VERBOSE
+        SLURM_CPU_BIND_TYPE
+        SLURM_CPU_BIND_LIST
+.fi
 
+See the \fBENVIRONMENT VARIABLE\fR section for a more detailed description
+of the individual SLURM_CPU_BIND* variables.
+
+When using \fB\-\-cpus\-per\-task\fR to run multithreaded tasks, be aware that
+CPU binding is inherited from the parent of the process.  This means that
+the multithreaded task should either specify or clear the CPU binding
+itself to avoid having all threads of the multithreaded task use the same
+mask/CPU as the parent.  Alternatively, fat masks (masks which specify more
+than one allowed CPU) could be used for the tasks in order to provide
+multiple CPUs for the multithreaded tasks.
+
+By default, a job step has access to every CPU allocated to the job.
+To ensure that distinct CPUs are allocated to each job step, us the
+\fB\-\-exclusive\fR option.
+
+If the job step allocation includes an allocation with a number of
+sockets, cores, or threads equal to the number of tasks to be started
+then the tasks will by default be bound to the appropriate resources.
+Disable this mode of operation by explicitly setting "-\-cpu\-bind=none".
+
+Note that a job step can be allocated different numbers of CPUs on each node
+or be allocated CPUs not starting at location zero. Therefore one of the
+options which automatically generate the task binding is recommended.
+Explicitly specified masks or bindings are only honored when the job step
+has been allocated every available CPU on the node.
+
+Binding a task to a NUMA locality domain means to bind the task to the set of
+CPUs that belong to the NUMA locality domain or "NUMA node".
+If NUMA locality domain options are used on systems with no NUMA support, then
+each socket is considered a locality domain.
+
+Supported options include:
+.PD 1
+.RS
 .TP
-\fB\-\-contiguous\fR
-Demand a contiguous range of nodes. The default is "yes". Specify
-\-\-contiguous=no if a contiguous range of nodes is not required.
+.B q[uiet]
+Quietly bind before task runs (default)
+.TP
+.B v[erbose]
+Verbosely report binding before task runs
+.TP
+.B no[ne]
+Do not bind tasks to CPUs (default)
+.TP
+.B rank
+Automatically bind by task rank.
+Task zero is bound to socket (or core or thread) zero, etc.
+Not supported unless the entire node is allocated to the job.
+.TP
+.B map_cpu:<list>
+Bind by mapping CPU IDs to tasks as specified
+where <list> is <cpuid1>,<cpuid2>,...<cpuidN>.
+CPU IDs are interpreted as decimal values unless they are preceded
+with '0x' in which case they are interpreted as hexadecimal values.
+Not supported unless the entire node is allocated to the job.
+.TP
+.B mask_cpu:<list>
+Bind by setting CPU masks on tasks as specified
+where <list> is <mask1>,<mask2>,...<maskN>.
+CPU masks are \fBalways\fR interpreted as hexadecimal values but can be
+preceded with an optional '0x'.
+.TP
+.B sockets
+Automatically generate masks binding tasks to sockets.
+If the number of tasks differs from the number of allocated sockets
+this can result in sub\-optimal binding.
+.TP
+.B cores
+Automatically generate masks binding tasks to cores.
+If the number of tasks differs from the number of allocated cores
+this can result in sub\-optimal binding.
+.TP
+.B threads
+Automatically generate masks binding tasks to threads.
+If the number of tasks differs from the number of allocated threads
+this can result in sub\-optimal binding.
+.TP
+.B ldoms
+Automatically generate masks binding tasks to NUMA locality domains.
+If the number of tasks differs from the number of allocated locality domains
+this can result in sub\-optimal binding.
+.TP
+.B help
+Show this help message
+.RE
 
-.TP 
-\fB\-c\fR, \fB\-\-cpus\-per\-task\fR[=]<\fIncpus\fR>
+.TP
+\fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
 Advise the SLURM controller that ensuing job steps will require \fIncpus\fR 
 number of processors per task.  Without this option, the controller will
 just try to allocate one processor per task.
@@ -143,8 +254,8 @@ the \-\-cpus\-per\-task=3 options, the controller knows that each task requires
 of 4 nodes, one for each of the 4 tasks.
 
 .TP
-\fB\-D\fR, \fB\-\-chdir\fR=\fIpath\fR
-change directory to \fIpath\fR before beginning execution. 
+\fB\-D\fR, \fB\-\-chdir\fR=<\fIpath\fR>
+change directory to \fIpath\fR before beginning execution.
 
 .TP
 \fB\-\-exclusive\fR
@@ -153,48 +264,48 @@ the oposite of \-\-share, whichever option is seen last on the command line
 will win.  (The default shared/exclusive behaviour depends on system
 configuration.)
 
-.TP 
-\fB\-F\fR, \fB\-\-nodefile\fR[=]<\fInode file\fR>
+.TP
+\fB\-F\fR, \fB\-\-nodefile\fR=<\fInode file\fR>
 Much like \-\-nodelist, but the list is contained in a file of name
 \fInode file\fR.  The node names of the list may also span multiple lines
 in the file.    Duplicate node names in the file will be ignored.
 The order of the node names in the list is not important; the node names
-will be sorted my SLURM.
+will be sorted by SLURM.
 
 .TP
 \fB\-\-get\-user\-env\fR[=\fItimeout\fR][\fImode\fR]
-This option will load login environment variables for the user specified 
+This option will load login environment variables for the user specified
 in the \fB\-\-uid\fR option.
 The environment variables are retrieved by running something of this sort
-"su \- <username> \-c /usr/bin/env" and parsing the output.  
-Be aware that any environment variables already set in salloc's environment 
-will take precedence over any environment variables in the user's 
+"su \- <username> \-c /usr/bin/env" and parsing the output.
+Be aware that any environment variables already set in salloc's environment
+will take precedence over any environment variables in the user's
 login environment.
 The optional \fItimeout\fR value is in seconds. Default value is 3 seconds.
 The optional \fImode\fR value control the "su" options.
 With a \fImode\fR value of "S", "su" is executed without the "\-" option.
 With a \fImode\fR value of "L", "su" is executed with the "\-" option,
 replicating the login environment.
-If \fImode\fR not specified, the mode established at SLURM build time 
+If \fImode\fR not specified, the mode established at SLURM build time
 is used.
 Example of use include "\-\-get\-user\-env", "\-\-get\-user\-env=10"
 "\-\-get\-user\-env=10L", and "\-\-get\-user\-env=S".
 NOTE: This option only works if the caller has an
-effective uid of "root".  
+effective uid of "root".
 This option was originally created for use by Moab.
 
 .TP
-\fB\-\-gid\fR[=]<\fIgroup\fR>
+\fB\-\-gid\fR=<\fIgroup\fR>
 If \fBsalloc\fR is run as root, and the \fB\-\-gid\fR option is used, 
 submit the job with \fIgroup\fR's group access permissions.  \fIgroup\fR 
 may be the group name or the numerical group ID.
 
-.TP 
+.TP
 \fB\-h\fR, \fB\-\-help\fR
 Display help information and exit.
 
 .TP
-\fB\-\-hint\fR=\fItype\fR
+\fB\-\-hint\fR=<\fItype\fR>
 Bind tasks according to application hints
 .RS
 .TP
@@ -209,28 +320,29 @@ use only one core in each physical CPU
 .B [no]multithread
 [don't] use extra threads with in-core multi-threading
 which can benefit communication intensive applications
+.TP
 .B help
 show this help message
 .RE
 
-.TP 
-\fB\-I\fR,\fB\-\-immediate\fR
-Grab the requested resources immediately, or abort if the resources are not
-currently available.  The \fIcommand\fR parameter will not be run if the resources
-are not available.
+.TP
+\fB\-I\fR, \fB\-\-immediate\fR
+Grab the requested resources immediately, or abort if the resources
+are not currently available.  The \fIcommand\fR parameter will not be
+run if the resources are not available.
 
 .TP
-\fB\-J\fR, \fB\-\-job\-name\fR[=]<\fIjobname\fR>
+\fB\-J\fR, \fB\-\-job\-name\fR=<\fIjobname\fR>
 Specify a name for the job allocation. The specified name will appear along with
 the job id number when querying running jobs on the system.  The default job
 name is the name of the "command" specified on the command line.
 
 .TP
 \fB\-\-jobid\fR=<\fIjobid\fR>
-Allocate resources as the specified job id. 
+Allocate resources as the specified job id.
 NOTE: Only valid for user root.
 
-.TP 
+.TP
 \fB\-K\fR, \fB\-\-kill\-command\fR[=\fIsignal\fR]
 salloc always runs a user\-specified command once the allocation is
 granted.  salloc will wait indefinitely for that command to exit.
@@ -243,7 +355,7 @@ name or number, the default signal is SIGTERM.
 
 .TP
 \fB\-k\fR, \fB\-\-no\-kill\fR
-Do not automatically terminate a job of one of the nodes it has been 
+Do not automatically terminate a job of one of the nodes it has been
 allocated fails.  The user will assume the responsibilities for fault\-tolerance
 should a node fail.  When there is a node failure, any active job steps (usually
 MPI jobs) on that node will almost certainly suffer a fatal error, but with
@@ -254,7 +366,7 @@ By default SLURM terminates the entire job allocation if any node fails in its
 range of allocated nodes.
 
 .TP
-\fB\-L\fR, \fB\-\-licenses\fR=
+\fB\-L\fR, \fB\-\-licenses\fR=<\fBlicense\fR>
 Specification of licenses (or other resources available on all 
 nodes of the cluster) which must be allocated to this job.
 License names can be followed by an asterisk and count 
@@ -264,8 +376,10 @@ Multiple license names should be comma separated (e.g.
 
 .TP
 \fB\-m\fR, \fB\-\-distribution\fR=
-(\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>\fR)
-Specify an alternate distribution method for remote processes.
+<\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>\fR>
+Specify an alternate distribution method for remote processes.  In
+salloc this only sets environment variables that will be used by
+subsequent srun requests.
 .RS
 .TP
 .B block
@@ -291,12 +405,12 @@ The options include a number representing the size of the task block.
 This is followed by an optional specification of the task distribution 
 scheme within a block of tasks and between the blocks of tasks.
 For more details (including examples and diagrams), please see
-.na
-.nh
-https://computing.llnl.gov/linux/slurm/mc_support.html and
+.br
+https://computing.llnl.gov/linux/slurm/mc_support.html
+.br
+and
+.br
 https://computing.llnl.gov/linux/slurm/dist_plane.html.
-.hy
-.ad
 .TP
 .B arbitrary
 The arbitrary method of distribution will allocate processes in\-order as 
@@ -308,19 +422,19 @@ contain at minimum the number of hosts requested.  If requesting tasks
 .RE
 
 .TP
-\fB\-\-mail\-type\fR=\fItype\fR
+\fB\-\-mail\-type\fR=<\fItype\fR>
 Notify user by email when certain event types occur. 
 Valid \fItype\fR values are BEGIN, END, FAIL, ALL (any state change). 
 The user to be notified is indicated with \fB\-\-mail\-user\fR. 
 
 .TP
-\fB\-\-mail\-user\fR=\fIuser\fR
+\fB\-\-mail\-user\fR=<\fIuser\fR>
 User to receive email notification of state changes as defined by 
 \fB\-\-mail\-type\fR.
-The default value is the username of the submitting user.
+The default value is the submitting user.
 
 .TP
-\fB\-\-mem\fR[=]<\fIMB\fR>
+\fB\-\-mem\fR=<\fIMB\fR>
 Specify the real memory required per node in MegaBytes.
 Default value is \fBDefMemPerNode\fR and the maximum value is
 \fBMaxMemPerNode\fR. If configured, both of parameters can be
@@ -331,7 +445,7 @@ Also see \fB\-\-mem\-per\-cpu\fR.
 \fB\-\-mem\fR and \fB\-\-mem\-per\-cpu\fR are mutually exclusive.
 
 .TP
-\fB\-\-mem\-per\-cpu\fR[=]<\fIMB\fR>
+\fB\-\-mem\-per\-cpu\fR=<\fIMB\fR>
 Mimimum memory required per allocated CPU in MegaBytes.
 Default value is \fBDefMemPerCPU\fR and the maximum value is
 \fBMaxMemPerCPU\fR. If configured, both of parameters can be 
@@ -342,34 +456,95 @@ Also see \fB\-\-mem\fR.
 \fB\-\-mem\fR and \fB\-\-mem\-per\-cpu\fR are mutually exclusive.
 
 .TP
-\fB\-\-mincores\fR[=]<\fIn\fR>
+\fB\-\-mem_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR
+Bind tasks to memory. Used only when the task/affinity plugin is enabled 
+and the NUMA memory functions are available.
+\fBNote that the resolution of CPU and memory binding 
+may differ on some architectures.\fR For example, CPU binding may be performed 
+at the level of the cores within a processor while memory binding will 
+be performed at the level of nodes, where the definition of "nodes" 
+may differ from system to system. \fBThe use of any type other than 
+"none" or "local" is not recommended.\fR
+If you want greater control, try running a simple test code with the 
+options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine 
+the specific configuration.
+
+NOTE: To have SLURM always report on the selected memory binding for
+all commands executed in a shell, you can enable verbose mode by
+setting the SLURM_MEM_BIND environment variable value to "verbose".
+
+The following informational environment variables are set when \fB\-\-mem_bind\
+is in use:
+
+.nf
+        SLURM_MEM_BIND_VERBOSE
+        SLURM_MEM_BIND_TYPE
+        SLURM_MEM_BIND_LIST
+.fi
+
+See the \fBENVIRONMENT VARIABLES\fR section for a more detailed description
+of the individual SLURM_MEM_BIND* variables.
+
+Supported options include:
+.RS
+.TP
+.B q[uiet]
+quietly bind before task runs (default)
+.TP
+.B v[erbose]
+verbosely report binding before task runs
+.TP
+.B no[ne]
+don't bind tasks to memory (default)
+.TP
+.B rank
+bind by task rank (not recommended)
+.TP
+.B local
+Use memory local to the processor in use
+.TP
+.B map_mem:<list>
+bind by mapping a node's memory to tasks as specified
+where <list> is <cpuid1>,<cpuid2>,...<cpuidN>.
+CPU IDs are interpreted as decimal values unless they are preceded
+with '0x' in which case they interpreted as hexadecimal values
+(not recommended)
+.TP
+.B mask_mem:<list>
+bind by setting memory masks on tasks as specified
+where <list> is <mask1>,<mask2>,...<maskN>.
+memory masks are \fBalways\fR interpreted as hexadecimal values.
+Note that masks must be preceded with a '0x' if they don't begin
+with [0-9] so they are seen as numerical values by srun.
+.TP
+.B help
+show this help message
+.RE
+
+.TP
+\fB\-\-mincores\fR=<\fIn\fR>
 Specify a minimum number of cores per socket.
 
 .TP
-\fB\-\-mincpus\fR[=]<\fIn\fR>
-Specify minimum number of cpus per node.
+\fB\-\-mincpus\fR=<\fIn\fR>
+Specify a minimum number of logical cpus/processors per node.
 
 .TP
-\fB\-\-minsockets\fR[=]<\fIn\fR>
+\fB\-\-minsockets\fR=<\fIn\fR>
 Specify a minimum number of sockets (physical processors) per node.
 
 .TP
-\fB\-\-minthreads\fR[=]<\fIn\fR>
+\fB\-\-minthreads\fR=<\fIn\fR>
 Specify a minimum number of threads per core.
 
-.TP 
-\fB\-N\fR, \fB\-\-nodes\fR[=]<\fInumber|[min]\-[max]\fR>
-Specify the number of nodes to be used by this job step.  This option accepts
-either a single number, or a range of possible node counts.  If a single
-number is used, such as "\-N 4", then the allocation is asking for four and
-ONLY four nodes.  If a range is specified, such as "\-N 2\-6", the SLURM
-controller may grant salloc anywhere from 2 to 6 nodes.  When using a range,
-either of the min or max options may be omitted.  For instance, "\-N 10\-"
-means "no fewer than 10 nodes", and "\-N \-20" means "no more than 20 nodes".
-The default value of this option is one node, but other options implicitly
-increase the default node count. 
-The job will be allocated as many nodes as possible within the range specified 
-and without delaying the initiation of the job.
+.TP
+\fB\-N\fR, \fB\-\-nodes\fR=<\fIminnodes\fR[\-\fImaxnodes\fR]>
+Request that a minimum of \fIminnodes\fR nodes be allocated to this job.
+The scheduler may decide to launch the job on more than \fIminnodes\fR nodes.
+A limit on the maximum node count may be specified with \fImaxnodes\fR
+(e.g. "\-\-nodes=2\-4").  The minimum and maximum node count may be the
+same to specify a specific number of nodes (e.g. "\-\-nodes=2\-2" will ask
+for two and ONLY two nodes).
 The partition's node limits supersede those of the job. 
 If a job's node limits are outside of the range permitted for its 
 associated partition, the job will be left in a PENDING state. 
@@ -377,9 +552,17 @@ This permits possible execution at a later time, when the partition
 limit is changed.
 If a job node limit exceeds the number of nodes configured in the 
 partition, the job will be rejected.
+Note that the environment 
+variable \fBSLURM_NNODES\fR will be set to the count of nodes actually 
+allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section 
+for more information.  If \fB\-N\fR is not specified, the default
+behavior is to allocate enough nodes to satisfy the requirements of
+the \fB\-n\fR and \fB\-c\fR options.
+The job will be allocated as many nodes as possible within the range specified
+and without delaying the initiation of the job.
 
 .TP
-\fB\-n\fR, \fB\-\-ntasks\fR[=]<\fInumber\fR>
+\fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
 salloc does not launch tasks, it requests an allocation of resources and 
 executed some command. This option advises the SLURM controller that job 
 steps run within this allocation will launch a maximum of \fInumber\fR
@@ -389,23 +572,23 @@ of the \fISelectTypeParameters\fR parameter in slurm.conf), but note
 that the \fB\-\-cpus\-per\-task\fR option will change this default.
 
 .TP
-\fB\-\-network\fR=\fItype\fR
+\fB\-\-network\fR=<\fItype\fR>
 Specify the communication protocol to be used. 
 This option is supported on AIX systems.
 Since POE is used to launch tasks, this option is not normally used or 
 is specified using the \fBSLURM_NETWORK\fR environment variable.
-The interpretation of \fItype\fR is system dependent. 
+The interpretation of \fItype\fR is system dependent.
 For systems with an IBM Federation switch, the following 
-comma\-separated and case insensitive types are recongnized:
+comma\-separated and case insensitive types are recognized:
 \fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR, 
 \fBBULK_XFER\fR and adapter names  (e.g. \fBSNI0\fR and \fBSNI1\fR). 
-For more information, on IBM systems see \fIpoe\fR documenation on 
+For more information, on IBM systems see \fIpoe\fR documentation on
 the environment variables \fBMP_EUIDEVICE\fR and \fBMP_USE_BULK_XFER\fR.
 Note that only four jobs steps may be active at once on a node with the 
 \fBBULK_XFER\fR option due to limitations in the Federation switch driver.
 
-.TP 
-\fB\-\-nice\fR[=]<\fIadjustment\fR>
+.TP
+\fB\-\-nice\fR[=\fIadjustment\fR]
 Run the job with an adjusted scheduling priority within SLURM.
 With no adjustment value the scheduling priority is decreased
 by 100. The adjustment range is from \-10000 (highest priority)
@@ -415,27 +598,29 @@ ignored if \fISchedulerType=sched/wiki\fR or
 \fISchedulerType=sched/wiki2\fR.
 
 .TP
-\fB\-\-ntasks\-per\-core\fR=\fIntasks\fR
+\fB\-\-ntasks\-per\-core\fR=<\fIntasks\fR>
 Request that no more than \fIntasks\fR be invoked on each core.
 Similar to \fB\-\-ntasks\-per\-node\fR except at the core level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific core unless \fB\-\-cpu_bind=none\fR
 is specified.
-NOTE: This option is not supported unless \fISelectType=CR_Core\fR
-or \fISelectType=CR_Core_Memory\fR is configured.
+NOTE: This option is not supported unless
+\fISelectTypeParameters=CR_Core\fR or
+\fISelectTypeParameters=CR_Core_Memory\fR is configured.
 
 .TP
-\fB\-\-ntasks\-per\-socket\fR=\fIntasks\fR
+\fB\-\-ntasks\-per\-socket\fR=<\fIntasks\fR>
 Request that no more than \fIntasks\fR be invoked on each socket.
 Similar to \fB\-\-ntasks\-per\-node\fR except at the socket level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR
 is specified.
-NOTE: This option is not supported unless \fISelectType=CR_Socket\fR 
-or \fISelectType=CR_Socket_Memory\fR is configured.
+NOTE: This option is not supported unless
+\fISelectTypeParameters=CR_Socket\fR or
+\fISelectTypeParameters=CR_Socket_Memory\fR is configured.
 
 .TP
-\fB\-\-ntasks\-per\-node\fR=\fIntasks\fR
+\fB\-\-ntasks\-per\-node\fR=<\fIntasks\fR>
 Request that no more than \fIntasks\fR be invoked on each node.
 This is similar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR
 but does not require knowledge of the actual number of cpus on
@@ -445,10 +630,10 @@ on each node.  Examples of this include submitting
 a hybrid MPI/OpenMP app where only one MPI "task/rank" should be
 assigned to each node while allowing the OpenMP portion to utilize
 all of the parallelism present in the node, or submitting a single
-setup/cleanup/monitoring job to each node of a pre\-existing  
+setup/cleanup/monitoring job to each node of a pre\-existing
 allocation as one step in a larger job script.
 
-.TP 
+.TP
 \fB\-\-no\-bell\fR
 Silence salloc's use of the terminal bell. Also see the option \fB\-\-bell\fR.
 
@@ -459,19 +644,13 @@ shell when used with the \fB\-A\fR, \fB\-\-allocate\fR option.
 
 .TP
 \fB\-O\fR, \fB\-\-overcommit\fR
-Overcommit resources. Normally, \fBsbatch\fR will allocate one cpu per
-task to be executed. By specifying \fB\-\-overcommit\fR you are explicitly
-allowing more than one process per cpu. However no more than
+Overcommit resources.  Normally, \fBsalloc\fR will allocate one task
+per processor.  By specifying \fB\-\-overcommit\fR you are explicitly
+allowing more than one task per processor.  However no more than
 \fBMAX_TASKS_PER_NODE\fR tasks are permitted to execute per node.
 
-.TP 
-\fB\-p\fR, \fB\-\-partition\fR[=]<\fIpartition name\fR>
-Request a specific partition for the resource allocation.  If not specified,
-the default behaviour is to allow the slurm controller to select the default
-partition as designated by the system administrator.
-
-.TP 
-\fB\-P\fR, \fB\-\-dependency\fR[=]<\fIdependency_list\fR>
+.TP
+\fB\-P\fR, \fB\-\-dependency\fR=<\fIdependency_list\fR>
 Defer the start of this job until the specified dependencies have been
 satisfied completed.
 <\fIdependency_list\fR> is of the form 
@@ -503,9 +682,19 @@ job name and user have terminated.
 .RE
 
 .TP
-\fB\-q\fR, \fB\-\-quiet\fR
+\fB\-p\fR, \fB\-\-partition\fR=<\fIpartition name\fR>
+Request a specific partition for the resource allocation.  If not specified,
+the default behaviour is to allow the slurm controller to select the default
+partition as designated by the system administrator.
+
+.TP
+\fB\-Q\fR, \fB\-\-quiet\fR
 Suppress informational messages from salloc. Errors will still be displayed.
 
+.TP
+\fB\-\-reservation\fR=<\fIname\fR>
+Allocate resources for the job from the named reservation.
+
 .TP
 \fB\-s\fR, \fB\-\-share\fR
 The job allocation can share nodes with other running jobs.  (The default
@@ -515,35 +704,35 @@ option was not set and allow higher system utilization, but application
 performance will likely suffer due to competition for resources within a node.
 
 .TP
-\fB\-t\fR, \fB\-\-time\fR=\fItime\fR
-Set a limit on the total run time of the job allocation.
-If the requested time limit exceeds the partition's time limit, the 
-job will be left in a PENDING state (possibly indefinitely).  The default
-time limit is the partition's time limit.  When the time limit is reached,
+\fB\-t\fR, \fB\-\-time\fR=<\fItime\fR>
+Set a limit on the total run time of the job allocation.  If the
+requested time limit exceeds the partition's time limit, the job will
+be left in a PENDING state (possibly indefinitely).  The default time
+limit is the partition's time limit.  When the time limit is reached,
 the each task in each job step is sent SIGTERM followed by SIGKILL. The
-interval between signals is specified by the SLURM configuration parameter
-\fBKillWait\fR.  A time limit of zero represents unlimited time.
-Acceptable time formats include "minutes", "minutes:seconds", 
-"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and 
-"days\-hours:minutes:seconds".
+interval between signals is specified by the SLURM configuration
+parameter \fBKillWait\fR.  A time limit of zero requests that no time
+limit be imposed.  Acceptable time formats include "minutes",
+"minutes:seconds", "hours:minutes:seconds", "days\-hours",
+"days\-hours:minutes" and "days\-hours:minutes:seconds".
 
 .TP
-\fB\-\-tmp\fR[=]<\fIMB\fR>
+\fB\-\-tmp\fR=<\fIMB\fR>
 Specify a minimum amount of temporary disk space.
 
 .TP
-\fB\-U\fR, \fB\-\-account\fR[=]<\fIaccount\fR>
+\fB\-U\fR, \fB\-\-account\fR=<\fIaccount\fR>
 Change resource use by this job to specified account.
-The \fIaccount\fR is an arbitrary string. The account name may 
-be changed after job submission using the \fBscontrol\fR 
+The \fIaccount\fR is an arbitrary string. The account name may
+be changed after job submission using the \fBscontrol\fR
 command.
 
 .TP
 \fB\-u\fR, \fB\-\-usage\fR
-Display brief usage message and exit.
+Display brief help message and exit.
 
 .TP
-\fB\-\-uid\fR[=]<\fIuser\fR>
+\fB\-\-uid\fR=<\fIuser\fR>
 Attempt to submit and/or run a job as \fIuser\fR instead of the
 invoking user id. The invoking user's credentials will be used
 to check access permissions for the target partition. User root
@@ -553,16 +742,17 @@ its permissions to the uid specified after node allocation is
 successful. \fIuser\fR may be the user name or numerical user ID.
 
 .TP
-\fB\-v\fR, \fB\-\-verbose\fR
-Increase the verbosity of salloc's informational messages.  Multiple \-v's
-will further increase salloc's verbosity.
-
-.TP 
 \fB\-V\fR, \fB\-\-version\fR
 Display version information and exit.
 
-.TP 
-\fB\-W\fR, \fB\-\-wait\fR[=]<\fIseconds\fR>
+.TP
+\fB\-v\fR, \fB\-\-verbose\fR
+Increase the verbosity of salloc's informational messages.  Multiple
+\fB\-v\fR's will further increase salloc's verbosity.  By default only
+errors will be displayed.
+
+.TP
+\fB\-W\fR, \fB\-\-wait\fR=<\fIseconds\fR>
 If the resources needed to satisy a job allocation are not immediately
 available, the job allocation is enqueued and is said to be PENDING.  This
 option tells salloc how long (in seconds) to wait for the allocation to be
@@ -570,40 +760,39 @@ granted before giving up.  When the wait limit has been reached, salloc
 will exit without running the \fIcommand\fR parameter.  By default, salloc
 will wait indefinitely.  (The \-\-immediate option makes \-\-wait moot.)
 
-.TP 
-\fB\-w\fR, \fB\-\-nodelist\fR[=]<\fInode name list\fR>
+.TP
+\fB\-w\fR, \fB\-\-nodelist\fR=<\fInode name list\fR>
 Request a specific list of node names.  The list may be specified as a
 comma\-separated list of node names, or a range of node names
 (e.g. mynode[1\-5,7,...]).  Duplicate node names in the list will be ignored.
 The order of the node names in the list is not important; the node names
-will be sorted my SLURM.
+will be sorted by SLURM.
 
 .TP
-\fB\-\-wckey\fR=\fIwckey\fR
+\fB\-\-wckey\fR=<\fIwckey\fR>
 Specify wckey to be used with job.  If TrackWCKey=no (default) in the
-slurm.conf this value does not get looked at. 
+slurm.conf this value is ignored.
 
 .TP
-\fB\-x\fR, \fB\-\-exclude\fR[=]<\fInode name list\fR>
+\fB\-x\fR, \fB\-\-exclude\fR=<\fInode name list\fR>
 Explicitly exclude certain nodes from the resources granted to the job.
 
-
 .PP
-The following options support Blue Gene systems, but may be 
+The following options support Blue Gene systems, but may be
 applicable to other systems as well.
 
 .TP
-\fB\-\-blrts\-image\fR[=]<\fIpath\fR>
+\fB\-\-blrts\-image\fR=<\fIpath\fR>
 Path to blrts image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-cnload\-image\fR=\fIpath\fR
+\fB\-\-cnload\-image\fR=<\fIpath\fR>
 Path to compute node image for bluegene block.  BGP only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-conn\-type\fR[=]<\fItype\fR>
+\fB\-\-conn\-type\fR=<\fItype\fR>
 Require the partition connection type to be of a certain type.  
 On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.  
 If NAV, or if not set, then SLURM will try to fit a TORUS else MESH.
@@ -614,7 +803,7 @@ midplane and below).  You can use HTC_S for SMP, HTC_D for Dual, HTC_V
 for virtual node mode, and HTC_L for Linux mode.
 
 .TP
-\fB\-g\fR, \fB\-\-geometry\fR[=]<\fIXxYxZ\fR>
+\fB\-g\fR, \fB\-\-geometry\fR=<\fIXxYxZ\fR>
 Specify the geometry requirements for the job. The three numbers 
 represent the required geometry giving dimensions in the X, Y and 
 Z directions. For example "\-\-geometry=2x3x4", specifies a block 
@@ -622,17 +811,17 @@ of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on
 Blue Gene).
 
 .TP
-\fB\-\-ioload\-image\fR=\fIpath\fR
+\fB\-\-ioload\-image\fR=<\fIpath\fR>
 Path to io image for bluegene block.  BGP only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-linux\-image\fR[=]<\fIpath\fR>
+\fB\-\-linux\-image\fR=<\fIpath\fR>
 Path to linux image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-mloader\-image\fR[=]<\fIpath\fR>
+\fB\-\-mloader\-image\fR=<\fIpath\fR>
 Path to mloader image for bluegene block.
 Default from \fIblugene.conf\fR if not set.
 
@@ -643,7 +832,7 @@ appropriate partition.
 By default the specified geometry can rotate in three dimensions.
 
 .TP
-\fB\-\-ramdisk\-image\fR[=]<\fIpath\fR>
+\fB\-\-ramdisk\-image\fR=<\fIpath\fR>
 Path to ramdisk image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
@@ -657,60 +846,71 @@ Upon startup, salloc will read and handle the options set in the following
 environment variables.  Note: Command line options always override environment
 variables settings.
 
-.TP 22 
+.TP 22
 \fBSALLOC_ACCOUNT\fR
-Same as \fB\-\-account\fR.
+Same as \fB\-U, \-\-account\fR
 .TP
 \fBSALLOC_ACCTG_FREQ\fR
-Same as \fB\-\-acctg\-freq\fR.
+Same as \fB\-\-acctg\-freq\fR
 .TP
 \fBSALLOC_BELL\fR
-Same as \fB\-\-bell\fR.
+Same as \fB\-\-bell\fR
 .TP
 \fBSALLOC_CONN_TYPE\fR
-Same as \fB\-\-conn\-type\fR.
+Same as \fB\-\-conn\-type\fR
+.TP
+\fBSALLOC_CPU_BIND\fR
+Same as \fB\-\-cpu_bind\fR
 .TP
 \fBSALLOC_DEBUG\fR
-Same as \fB\-v\fR or \fB\-\-verbose\fR.
+Same as \fB\-v, \-\-verbose\fR
 .TP
 \fBSALLOC_EXCLUSIVE\fR
-Same as \fB\-\-exclusive\fR.
+Same as \fB\-\-exclusive\fR
 .TP
 \fBSALLOC_GEOMETRY\fR
-Same as \fB\-g\fR or \fB\-\-geometry\fR.
+Same as \fB\-g, \-\-geometry\fR
 .TP
 \fBSALLOC_IMMEDIATE\fR
-Same as \fB\-I\fR or \fB\-\-immediate\fR.
+Same as \fB\-I, \-\-immediate\fR
 .TP
 \fBSALLOC_JOBID\fR
-Same as \fB\-\-jobid\fR.
+Same as \fB\-\-jobid\fR
+.TP
+\fBSALLOC_MEM_BIND\fR
+Same as \fB\-\-mem_bind\fR
 .TP
 \fBSALLOC_NETWORK\fR
-Same as \fB\-\-network\fR.
+Same as \fB\-\-network\fR
 .TP
 \fBSALLOC_NO_BELL\fR
-Same as \fB\-\-no\-bell\fR.
+Same as \fB\-\-no\-bell\fR
 .TP
 \fBSALLOC_NO_ROTATE\fR
-Same as \fB\-R\fR or \fB\-\-no\-rotate\fR.
+Same as \fB\-R, \-\-no\-rotate\fR
 .TP
-\fBSLURM_OVERCOMMIT\fR
+\fBSALLOC_OVERCOMMIT\fR
 Same as \fB\-O, \-\-overcommit\fR
 .TP
 \fBSALLOC_PARTITION\fR
-Same as \fB\-p\fR or \fB\-\-partition\fR.
+Same as \fB\-p, \-\-partition\fR
 .TP
 \fBSALLOC_TIMELIMIT\fR
-Same as \fB\-t\fR or \fB\-\-time\fR.
+Same as \fB\-t, \-\-time\fR
 .TP
 \fBSALLOC_WAIT\fR
-Same as \fB\-W\fR or \fB\-\-wait\fR.
+Same as \fB\-W, \-\-wait\fR
 
 .SH "OUTPUT ENVIRONMENT VARIABLES"
 .PP
 salloc will set the following environment variables in the environment of
 the executed program:
-
+.TP
+\fBBASIL_RESERVATION_ID\fR
+The reservation ID on Cray systems running ALPS/BASIL only.
+.TP
+\fBSLURM_CPU_BIND\fR
+Set to value of the \-\-cpu_bind\fR option.
 .TP
 \fBSLURM_JOB_ID\fR (and \fBSLURM_JOBID\fR for backwards compatibility)
 The ID of the job allocation.
@@ -729,6 +929,12 @@ List of nodes allocated to the job.
 \fBSLURM_JOB_NUM_NODES\fR (and \fBSLURM_NNODES\fR for backwards compatibility)
 Total number of nodes in the job allocation.
 .TP
+\fBSLURM_MEM_BIND\fR
+Set to value of the \-\-mem_bind\fR option.
+.TP
+\fBSLURM_NTASKS_PER_NODE\fR
+Set to value of the \-\-ntasks\-per\-node\fR option, if specified.
+.TP
 \fBSLURM_TASKS_PER_NODE\fR
 Number of tasks to be initiated on each node. Values are
 comma separated and in the same order as SLURM_NODELIST.
@@ -772,9 +978,9 @@ salloc \-N5 srun \-n10 myprogram
 
 .SH "COPYING"
 Copyright (C) 2006\-2007 The Regents of the University of California.
-Copyright (C) 2008 Lawrence Livermore National Security.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/sattach.1 b/doc/man/man1/sattach.1
index a13f1da7d366357d5b6f6d97215820ef6ce01b46..d4d86707c502c417a72ee182269143c15c1d59e5 100644
--- a/doc/man/man1/sattach.1
+++ b/doc/man/man1/sattach.1
@@ -1,4 +1,4 @@
-.TH "sattach" "1" "SLURM 1.2" "September 2006" "SLURM Commands"
+.TH "sattach" "1" "SLURM 2.0" "September 2006" "SLURM Commands"
 .SH "NAME"
 .LP 
 sattach \- Attach to a SLURM job step.
@@ -69,7 +69,7 @@ sattach \-\-output\-filter 5 65386.15
 .SH "COPYING"
 Copyright (C) 2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index c58ff0a2296d8ca192362332a29e4b1cc54640b7..cd6850da998f3a02bdbe95eff033e1cda8561d27 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -1,12 +1,12 @@
-.TH "sbatch" "1" "SLURM 1.3" "July 2008" "SLURM Commands"
+.TH "sbatch" "1" "SLURM 2.0" "May 2009" "SLURM Commands"
+
 .SH "NAME"
-.LP 
 sbatch \- Submit a batch script to SLURM.
+
 .SH "SYNOPSIS"
-.LP 
 sbatch [\fIoptions\fP] \fIscript\fP [\fIargs\fP...]
+
 .SH "DESCRIPTION"
-.LP 
 sbatch submits a batch script to SLURM.  The batch script may be given to
 sbatch through a file name on the command line, or if no file name is specified,
 sbatch will read in a script from standard input. The batch script may contain
@@ -21,10 +21,10 @@ When the job allocation is finally granted for the batch script, SLURM
 runs a single copy of the batch script on the first node in the set of
 allocated nodes.
 .SH "OPTIONS"
-.LP 
+.LP
 
 .TP
-\fB\-\-acctg\-freq\fR=\fIseconds\fR
+\fB\-\-acctg\-freq\fR=<\fIseconds\fR>
 Define the job accounting sampling interval.
 This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's
 configuration file, \fIslurm.conf\fR.
@@ -32,7 +32,7 @@ A value of zero disables real the periodic job sampling and provides accounting
 information only on job termination (reducing SLURM interference with the job).
 
 .TP
-\fB\-B\fR \fB\-\-extra\-node\-info\fR=\fIsockets\fR[:\fIcores\fR[:\fIthreads\fR]]
+\fB\-B\fR \fB\-\-extra\-node\-info\fR=<\fIsockets\fR[:\fIcores\fR[:\fIthreads\fR]]>
 Request a specific allocation of resources with details as to the
 number and type of computational resources within a cluster:
 number of sockets (or physical processors) per node,
@@ -45,9 +45,9 @@ resources of that type are to be utilized.
 As with nodes, the individual levels can also be specified in separate
 options if desired:
 .nf
-    \fB\-\-sockets\-per\-node\fR=\fIsockets\fR
-    \fB\-\-cores\-per\-socket\fR=\fIcores\fR
-    \fB\-\-threads\-per\-core\fR=\fIthreads\fR
+    \fB\-\-sockets\-per\-node\fR=<\fIsockets\fR>
+    \fB\-\-cores\-per\-socket\fR=<\fIcores\fR>
+    \fB\-\-threads\-per\-core\fR=<\fIthreads\fR>
 .fi
 When the task/affinity plugin is enabled,
 specifying an allocation in this manner also instructs SLURM to use
@@ -60,29 +60,57 @@ If select/cons_res is configured, it must have a parameter of CR_Core,
 CR_Core_Memory, CR_Socket, or CR_Socket_Memory. 
 
 .TP
-\fB\-\-begin\fR[=]<\fItime\fR>
+\fB\-\-begin\fR=<\fItime\fR>
 Submit the batch script to the SLURM controller immediately, like normal, but
 tell the controller to defer the allocation of the job until the specified time.
 
-Time may be of the form \fIHH:MM:SS\fR to run a job at 
+Time may be of the form \fIHH:MM:SS\fR to run a job at
 a specific time of day (seconds are optional).
-(If that time is already past, the next day is assumed.) 
-You may also specify \fImidnight\fR, \fInoon\fR, or 
-\fIteatime\fR (4pm) and you can have a time\-of\-day suffixed 
-with \fIAM\fR or \fIPM\fR for running in the morning or the evening.  
+(If that time is already past, the next day is assumed.)
+You may also specify \fImidnight\fR, \fInoon\fR, or
+\fIteatime\fR (4pm) and you can have a time\-of\-day suffixed
+with \fIAM\fR or \fIPM\fR for running in the morning or the evening.
 You can also say what day the job will be run, by specifying
 a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR
-or \fIMM.DD.YY\fR. You can also
+\fIYYYY-MM-DD\fR. Combine date and time using the following
+format \fIYYYY\-MM\-DD[THH[:MM[:SS]]]\fR. You can also
 give times like \fInow + count time\-units\fR, where the time\-units
-can be \fIminutes\fR, \fIhours\fR, \fIdays\fR, or \fIweeks\fR 
-and you can tell SLURM to run the job today with the keyword
-\fItoday\fR and to run the job tomorrow with the keyword
-\fItomorrow\fR.
+can be \fIseconds\fR (default), \fIminutes\fR, \fIhours\fR,
+\fIdays\fR, or \fIweeks\fR and you can tell SLURM to run
+the job today with the keyword \fItoday\fR and to run the
+job tomorrow with the keyword \fItomorrow\fR.
 The value may be changed after job submission using the
 \fBscontrol\fR command.
+For example:
+.nf
+   \-\-begin=16:00
+   \-\-begin=now+1hour
+   \-\-begin=now+60           (seconds by default)
+   \-\-begin=2010-01-20T12:34:00
+.fi
 
 .TP
-\fB\-C\fR, \fB\-\-constraint\fR[=]<\fIlist\fR>
+\fB\-\-checkpoint\fR=<\fItime\fR>
+Specifies the interval between creating checkpoints of the job step. 
+By default, the job step will no checkpoints created.
+Acceptable time formats include "minutes", "minutes:seconds", 
+"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and 
+"days\-hours:minutes:seconds".
+
+.TP
+\fB\-\-checkpoint\-dir\fR=<\fIdirectory\fR>
+Specifies the directory into which the job or job step's checkpoint should 
+be written (used by the checkpoint/blcrm and checkpoint/xlch plugins only).
+The default value is the current working directory.
+Checkpoint files will be of the form "<job_id>.ckpt" for jobs
+and "<job_id>.<step_id>.ckpt" for job steps.
+
+.TP
+\fB\-\-comment\fR=<\fIstring\fR>
+An arbitrary comment.
+
+.TP
+\fB\-C\fR, \fB\-\-constraint\fR=<\fIlist\fR>
 Specify a list of constraints. 
 The constraints are features that have been assigned to the nodes by 
 the slurm administrator. 
@@ -97,7 +125,7 @@ There is no mechanism to specify that you want one node with feature
 node has both features.
 If only one of a set of possible options should be used for all allocated 
 nodes, then use the OR operator and enclose the options within square brackets. 
-For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might 
+For example: "\fB\-\-constraint=[rack1|rack2|rack3|rack4]"\fR might 
 be used to specify that all nodes must be allocated on a single rack of 
 the cluster, but any of those four racks can be used.
 A request can also specify the number of nodes needed with some feature
@@ -109,8 +137,118 @@ Constraints with node counts may only be combined with AND operators.
 If no nodes have the requested features, then the job will be rejected 
 by the slurm job manager.
 
-.TP 
-\fB\-c\fR, \fB\-\-cpus\-per\-task\fR[=]<\fIncpus\fR>
+.TP
+\fB\-\-contiguous\fR
+If set, then the allocated nodes must form a contiguous set.
+Not honored with the \fBtopology/tree\fR or \fBtopology/3d_torus\fR
+plugins, both of which can modify the node ordering.
+
+.TP
+\fB\-\-cpu_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR
+Bind tasks to CPUs. Used only when the task/affinity plugin is enabled.
+The configuration parameter \fBTaskPluginParam\fR may override these options.
+For example, if \fBTaskPluginParam\fR is configured to bind to cores,
+your job will not be able to bind tasks to sockets.
+NOTE: To have SLURM always report on the selected CPU binding for all
+commands executed in a shell, you can enable verbose mode by setting
+the SLURM_CPU_BIND environment variable value to "verbose".
+
+The following informational environment variables are set when \fB\-\-cpu_bind\fR
+is in use:
+.nf
+        SLURM_CPU_BIND_VERBOSE
+        SLURM_CPU_BIND_TYPE
+        SLURM_CPU_BIND_LIST
+.fi
+
+See the \fBENVIRONMENT VARIABLE\fR section for a more detailed description
+of the individual SLURM_CPU_BIND* variables.
+
+When using \fB\-\-cpus\-per\-task\fR to run multithreaded tasks, be aware that
+CPU binding is inherited from the parent of the process.  This means that
+the multithreaded task should either specify or clear the CPU binding
+itself to avoid having all threads of the multithreaded task use the same
+mask/CPU as the parent.  Alternatively, fat masks (masks which specify more
+than one allowed CPU) could be used for the tasks in order to provide
+multiple CPUs for the multithreaded tasks.
+
+By default, a job step has access to every CPU allocated to the job.
+To ensure that distinct CPUs are allocated to each job step, us the
+\fB\-\-exclusive\fR option.
+
+If the job step allocation includes an allocation with a number of
+sockets, cores, or threads equal to the number of tasks to be started
+then the tasks will by default be bound to the appropriate resources.
+Disable this mode of operation by explicitly setting "-\-cpu\-bind=none".
+
+Note that a job step can be allocated different numbers of CPUs on each node
+or be allocated CPUs not starting at location zero. Therefore one of the
+options which automatically generate the task binding is recommended.
+Explicitly specified masks or bindings are only honored when the job step
+has been allocated every available CPU on the node.
+
+Binding a task to a NUMA locality domain means to bind the task to the set of
+CPUs that belong to the NUMA locality domain or "NUMA node".
+If NUMA locality domain options are used on systems with no NUMA support, then
+each socket is considered a locality domain.
+
+Supported options include:
+.PD 1
+.RS
+.TP
+.B q[uiet]
+Quietly bind before task runs (default)
+.TP
+.B v[erbose]
+Verbosely report binding before task runs
+.TP
+.B no[ne]
+Do not bind tasks to CPUs (default)
+.TP
+.B rank
+Automatically bind by task rank.
+Task zero is bound to socket (or core or thread) zero, etc.
+Not supported unless the entire node is allocated to the job.
+.TP
+.B map_cpu:<list>
+Bind by mapping CPU IDs to tasks as specified
+where <list> is <cpuid1>,<cpuid2>,...<cpuidN>.
+CPU IDs are interpreted as decimal values unless they are preceded
+with '0x' in which case they are interpreted as hexadecimal values.
+Not supported unless the entire node is allocated to the job.
+.TP
+.B mask_cpu:<list>
+Bind by setting CPU masks on tasks as specified
+where <list> is <mask1>,<mask2>,...<maskN>.
+CPU masks are \fBalways\fR interpreted as hexadecimal values but can be
+preceded with an optional '0x'.
+.TP
+.B sockets
+Automatically generate masks binding tasks to sockets.
+If the number of tasks differs from the number of allocated sockets
+this can result in sub\-optimal binding.
+.TP
+.B cores
+Automatically generate masks binding tasks to cores.
+If the number of tasks differs from the number of allocated cores
+this can result in sub\-optimal binding.
+.TP
+.B threads
+Automatically generate masks binding tasks to threads.
+If the number of tasks differs from the number of allocated threads
+this can result in sub\-optimal binding.
+.TP
+.B ldoms
+Automatically generate masks binding tasks to NUMA locality domains.
+If the number of tasks differs from the number of allocated locality domains
+this can result in sub\-optimal binding.
+.TP
+.B help
+Show this help message
+.RE
+
+.TP
+\fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
 Advise the SLURM controller that ensuing job steps will require \fIncpus\fR 
 number of processors per task.  Without this option, the controller will
 just try to allocate one processor per task.
@@ -124,21 +262,12 @@ the \-\-cpus\-per\-task=3 options, the controller knows that each task requires
 of 4 nodes, one for each of the 4 tasks.
 
 .TP
-\fB\-\-comment\fR=<\fIstring\fR>
-An arbitrary comment.
-
-.TP
-\fB\-\-contiguous\fR
-Demand a contiguous range of nodes. The default is "yes". Specify
-\-\-contiguous=no if a contiguous range of nodes is not required.
-
-.TP 
-\fB\-D\fR, \fB\-\-workdir\fR[=]<\fIdirectory\fR>
+\fB\-D\fR, \fB\-\-workdir\fR=<\fIdirectory\fR>
 Set the working directory of the batch script to \fIdirectory\fR before
 it it executed.
 
 .TP
-\fB\-e\fR, \fB\-\-error\fR[=]<\fIfilename pattern\fR>
+\fB\-e\fR, \fB\-\-error\fR=<\fIfilename pattern\fR>
 Instruct SLURM to connect the batch script's standard error directly to the 
 file name specified in the "\fIfilename pattern\fR".
 See the \fB\-\-input\fR option for filename specification options.
@@ -150,13 +279,13 @@ the oposite of \-\-share, whichever option is seen last on the command line
 will win.  (The default shared/exclusive behaviour depends on system
 configuration.)
 
-.TP 
-\fB\-F\fR, \fB\-\-nodefile\fR[=]<\fInode file\fR>
+.TP
+\fB\-F\fR, \fB\-\-nodefile\fR=<\fInode file\fR>
 Much like \-\-nodelist, but the list is contained in a file of name
 \fInode file\fR.  The node names of the list may also span multiple lines
 in the file.    Duplicate node names in the file will be ignored.
 The order of the node names in the list is not important; the node names
-will be sorted my SLURM.
+will be sorted by SLURM.
 
 .TP
 \fB\-\-get\-user\-env\fR[=\fItimeout\fR][\fImode\fR]
@@ -166,7 +295,8 @@ The environment variables are retrieved by running something of this sort
 "su \- <username> \-c /usr/bin/env" and parsing the output.
 Be aware that any environment variables already set in sbatch's environment
 will take precedence over any environment variables in the user's
-login environment.
+login environment. Clear any environment variables before calling sbatch
+that you do not want propagated to the spawned program.
 The optional \fItimeout\fR value is in seconds. Default value is 8 seconds.
 The optional \fImode\fR value control the "su" options.
 With a \fImode\fR value of "S", "su" is executed without the "\-" option.
@@ -181,17 +311,17 @@ effective uid of "root".
 This option was originally created for use by Moab.
 
 .TP
-\fB\-\-gid\fR[=]<\fIgroup\fR>
+\fB\-\-gid\fR=<\fIgroup\fR>
 If \fBsbatch\fR is run as root, and the \fB\-\-gid\fR option is used, 
 submit the job with \fIgroup\fR's group access permissions.  \fIgroup\fR 
 may be the group name or the numerical group ID.
 
-.TP 
+.TP
 \fB\-h\fR, \fB\-\-help\fR
 Display help information and exit.
 
 .TP
-\fB\-\-hint\fR=\fItype\fR
+\fB\-\-hint\fR=<\fItype\fR>
 Bind tasks according to application hints
 .RS
 .TP
@@ -206,19 +336,20 @@ use only one core in each physical CPU
 .B [no]multithread
 [don't] use extra threads with in-core multi-threading
 which can benefit communication intensive applications
+.TP
 .B help
 show this help message
 .RE
 
-.TP 
-\fB\-I\fR,\fB\-\-immediate\fR
+.TP
+\fB\-I\fR, \fB\-\-immediate\fR
 The batch script will only be submitted to the controller if the resources
 necessary to grant its job allocation are immediately available.  If the
 job allocation will have to wait in a queue of pending jobs, the batch script
 will not be submitted.
 
 .TP
-\fB\-i\fR, \fB\-\-input\fR[=]<\fIfilename pattern\fR>
+\fB\-i\fR, \fB\-\-input\fR=<\fIfilename pattern\fR>
 Instruct SLURM to connect the batch script's standard input
 directly to the file name specified in the "\fIfilename pattern\fR".
 
@@ -228,22 +359,23 @@ standard output and standard error are directed to a file of the name
 described below.
 
 The filename pattern may contain one or more replacement symbols, which are
-a percent sign "%" followed by a letter (e.g. %t).
+a percent sign "%" followed by a letter (e.g. %j).
 
 Supported replacement symbols are:
 .PD 0
 .RS 10
-.TP 
+.TP
 \fB%j\fR
 Job allocation number.
 .PD 0
-.TP 
+.TP
 \fB%N\fR
-Node name. (Will result in a separate file per node.)
+Node name.  Only one file is created, so %N will be replaced by the name of the
+first node in the job, which is the one that runs the script.
 .RE
 
 .TP
-\fB\-J\fR, \fB\-\-job\-name\fR[=]<\fIjobname\fR>
+\fB\-J\fR, \fB\-\-job\-name\fR=<\fIjobname\fR>
 Specify a name for the job allocation. The specified name will appear along with
 the job id number when querying running jobs on the system. The default
 is the name of the batch script, or just "sbatch" if the script is
@@ -256,7 +388,7 @@ NOTE: Only valid for user root.
 
 .TP
 \fB\-k\fR, \fB\-\-no\-kill\fR
-Do not automatically terminate a job of one of the nodes it has been 
+Do not automatically terminate a job of one of the nodes it has been
 allocated fails.  The user will assume the responsibilities for fault\-tolerance
 should a node fail.  When there is a node failure, any active job steps (usually
 MPI jobs) on that node will almost certainly suffer a fatal error, but with
@@ -267,7 +399,7 @@ By default SLURM terminates the entire job allocation if any node fails in its
 range of allocated nodes.
 
 .TP
-\fB\-L\fR, \fB\-\-licenses\fR=
+\fB\-L\fR, \fB\-\-licenses\fR=<\fBlicense\fR>
 Specification of licenses (or other resources available on all 
 nodes of the cluster) which must be allocated to this job.
 License names can be followed by an asterisk and count 
@@ -277,8 +409,10 @@ Multiple license names should be comma separated (e.g.
 
 .TP
 \fB\-m\fR, \fB\-\-distribution\fR=
-(\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>\fR)
-Specify an alternate distribution method for remote processes.
+<\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>\fR>
+Specify an alternate distribution method for remote processes.  In
+sbatch this only sets environment variables that will be used by
+subsequent srun requests.
 .RS
 .TP
 .B block
@@ -304,12 +438,12 @@ The options include a number representing the size of the task block.
 This is followed by an optional specification of the task distribution 
 scheme within a block of tasks and between the blocks of tasks.
 For more details (including examples and diagrams), please see
-.na
-.nh
-https://computing.llnl.gov/linux/slurm/mc_support.html and
+.br
+https://computing.llnl.gov/linux/slurm/mc_support.html
+.br
+and
+.br
 https://computing.llnl.gov/linux/slurm/dist_plane.html.
-.hy
-.ad
 .TP
 .B arbitrary
 The arbitrary method of distribution will allocate processes in\-order as 
@@ -321,19 +455,19 @@ contain at minimum the number of hosts requested.  If requesting tasks
 .RE
 
 .TP
-\fB\-\-mail\-type\fR=\fItype\fR
+\fB\-\-mail\-type\fR=<\fItype\fR>
 Notify user by email when certain event types occur. 
 Valid \fItype\fR values are BEGIN, END, FAIL, ALL (any state change). 
 The user to be notified is indicated with \fB\-\-mail\-user\fR. 
 
 .TP
-\fB\-\-mail\-user\fR=\fIuser\fR
+\fB\-\-mail\-user\fR=<\fIuser\fR>
 User to receive email notification of state changes as defined by 
 \fB\-\-mail\-type\fR.
-The default value is the username of the submitting user.
+The default value is the submitting user.
 
 .TP
-\fB\-\-mem\fR[=]<\fIMB\fR>
+\fB\-\-mem\fR=<\fIMB\fR>
 Specify the real memory required per node in MegaBytes.
 Default value is \fBDefMemPerNode\fR and the maximum value is
 \fBMaxMemPerNode\fR. If configured, both of parameters can be
@@ -344,7 +478,7 @@ Also see \fB\-\-mem\-per\-cpu\fR.
 \fB\-\-mem\fR and \fB\-\-mem\-per\-cpu\fR are mutually exclusive.
 
 .TP
-\fB\-\-mem\-per\-cpu\fR[=]<\fIMB\fR>
+\fB\-\-mem\-per\-cpu\fR=<\fIMB\fR>
 Mimimum memory required per allocated CPU in MegaBytes.
 Default value is \fBDefMemPerCPU\fR and the maximum value is
 \fBMaxMemPerCPU\fR. If configured, both of parameters can be 
@@ -355,34 +489,95 @@ Also see \fB\-\-mem\fR.
 \fB\-\-mem\fR and \fB\-\-mem\-per\-cpu\fR are mutually exclusive.
 
 .TP
-\fB\-\-mincores\fR[=]<\fIn\fR>
+\fB\-\-mem_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR
+Bind tasks to memory. Used only when the task/affinity plugin is enabled 
+and the NUMA memory functions are available.
+\fBNote that the resolution of CPU and memory binding 
+may differ on some architectures.\fR For example, CPU binding may be performed 
+at the level of the cores within a processor while memory binding will 
+be performed at the level of nodes, where the definition of "nodes" 
+may differ from system to system. \fBThe use of any type other than 
+"none" or "local" is not recommended.\fR
+If you want greater control, try running a simple test code with the 
+options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine 
+the specific configuration.
+
+NOTE: To have SLURM always report on the selected memory binding for
+all commands executed in a shell, you can enable verbose mode by
+setting the SLURM_MEM_BIND environment variable value to "verbose".
+
+The following informational environment variables are set when \fB\-\-mem_bind\
+is in use:
+
+.nf
+        SLURM_MEM_BIND_VERBOSE
+        SLURM_MEM_BIND_TYPE
+        SLURM_MEM_BIND_LIST
+.fi
+
+See the \fBENVIRONMENT VARIABLES\fR section for a more detailed description
+of the individual SLURM_MEM_BIND* variables.
+
+Supported options include:
+.RS
+.TP
+.B q[uiet]
+quietly bind before task runs (default)
+.TP
+.B v[erbose]
+verbosely report binding before task runs
+.TP
+.B no[ne]
+don't bind tasks to memory (default)
+.TP
+.B rank
+bind by task rank (not recommended)
+.TP
+.B local
+Use memory local to the processor in use
+.TP
+.B map_mem:<list>
+bind by mapping a node's memory to tasks as specified
+where <list> is <cpuid1>,<cpuid2>,...<cpuidN>.
+CPU IDs are interpreted as decimal values unless they are preceded
+with '0x' in which case they interpreted as hexadecimal values
+(not recommended)
+.TP
+.B mask_mem:<list>
+bind by setting memory masks on tasks as specified
+where <list> is <mask1>,<mask2>,...<maskN>.
+memory masks are \fBalways\fR interpreted as hexadecimal values.
+Note that masks must be preceded with a '0x' if they don't begin
+with [0-9] so they are seen as numerical values by srun.
+.TP
+.B help
+show this help message
+.RE
+
+.TP
+\fB\-\-mincores\fR=<\fIn\fR>
 Specify a minimum number of cores per socket.
 
 .TP
-\fB\-\-mincpus\fR[=]<\fIn\fR>
-Specify minimum number of cpus per node.
+\fB\-\-mincpus\fR=<\fIn\fR>
+Specify a minimum number of logical cpus/processors per node.
 
 .TP
-\fB\-\-minsockets\fR[=]<\fIn\fR>
+\fB\-\-minsockets\fR=<\fIn\fR>
 Specify a minimum number of sockets (physical processors) per node.
 
 .TP
-\fB\-\-minthreads\fR[=]<\fIn\fR>
+\fB\-\-minthreads\fR=<\fIn\fR>
 Specify a minimum number of threads per core.
 
-.TP 
-\fB\-N\fR, \fB\-\-nodes\fR[=]<\fInumber|[min]\-[max]\fR>
-Specify the number of nodes to be used by this job step.  This option accepts
-either a single number, or a range of possible node counts.  If a single number
-is used, such as "\-N 4", then the allocation is asking for four and ONLY four
-nodes.  If a range is specified, such as "\-N 2\-6", SLURM controller may
-grant the batch job anywhere from 2 to 6 nodes.  When using a range, either of
-the min or max options may be omitted.  For instance, "\-N 10\-" means
-"no fewer than 10 nodes", and "\-N \-20" means "no more than 20 nodes".  The
-default value of this option is one node, but other command line options
-may implicitly set the default node count to a higher value.
-The job will be allocated as many nodes as possible within the range specified 
-and without delaying the initiation of the job.
+.TP
+\fB\-N\fR, \fB\-\-nodes\fR=<\fIminnodes\fR[\-\fImaxnodes\fR]>
+Request that a minimum of \fIminnodes\fR nodes be allocated to this job.
+The scheduler may decide to launch the job on more than \fIminnodes\fR nodes.
+A limit on the maximum node count may be specified with \fImaxnodes\fR
+(e.g. "\-\-nodes=2\-4").  The minimum and maximum node count may be the
+same to specify a specific number of nodes (e.g. "\-\-nodes=2\-2" will ask
+for two and ONLY two nodes).
 The partition's node limits supersede those of the job. 
 If a job's node limits are outside of the range permitted for its 
 associated partition, the job will be left in a PENDING state. 
@@ -390,9 +585,17 @@ This permits possible execution at a later time, when the partition
 limit is changed.
 If a job node limit exceeds the number of nodes configured in the 
 partition, the job will be rejected.
+Note that the environment 
+variable \fBSLURM_NNODES\fR will be set to the count of nodes actually 
+allocated to the job. See the \fBENVIRONMENT VARIABLES \fR section 
+for more information.  If \fB\-N\fR is not specified, the default
+behavior is to allocate enough nodes to satisfy the requirements of
+the \fB\-n\fR and \fB\-c\fR options.
+The job will be allocated as many nodes as possible within the range specified
+and without delaying the initiation of the job.
 
 .TP
-\fB\-n\fR, \fB\-\-ntasks\fR[=]<\fInumber\fR>
+\fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
 sbatch does not launch tasks, it requests an allocation of resources and 
 submits a batch script. This option advises the SLURM controller that job 
 steps run within this allocation will launch a maximum of \fInumber\fR
@@ -402,22 +605,23 @@ of the \fISelectTypeParameters\fR parameter in slurm.conf), but note
 that the \fB\-\-cpus\-per\-task\fR option will change this default.
 
 .TP
-\fB\-\-network\fR=\fItype\fR
+\fB\-\-network\fR=<\fItype\fR>
 Specify the communication protocol to be used. 
 This option is supported on AIX systems.
-This option sets the \fBSLURM_NETWORK\fR environment variable for use by POE.
-The interpretation of \fItype\fR is system dependent. 
+Since POE is used to launch tasks, this option is not normally used or 
+is specified using the \fBSLURM_NETWORK\fR environment variable.
+The interpretation of \fItype\fR is system dependent.
 For systems with an IBM Federation switch, the following 
-comma\-separated and case insensitive types are recongnized:
+comma\-separated and case insensitive types are recognized:
 \fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR, 
 \fBBULK_XFER\fR and adapter names  (e.g. \fBSNI0\fR and \fBSNI1\fR). 
-For more information, on IBM systems see \fIPOE\fR documentation on 
+For more information, on IBM systems see \fIpoe\fR documentation on
 the environment variables \fBMP_EUIDEVICE\fR and \fBMP_USE_BULK_XFER\fR.
 Note that only four jobs steps may be active at once on a node with the 
 \fBBULK_XFER\fR option due to limitations in the Federation switch driver.
 
-.TP 
-\fB\-\-nice\fR[=]<\fIadjustment\fR>
+.TP
+\fB\-\-nice\fR[=\fIadjustment\fR]
 Run the job with an adjusted scheduling priority within SLURM.
 With no adjustment value the scheduling priority is decreased
 by 100. The adjustment range is from \-10000 (highest priority)
@@ -437,29 +641,31 @@ The \fIJobRequeue\fR configuration parameter controls the default
 behavior on the cluster.
 
 .TP
-\fB\-\-ntasks\-per\-core\fR=\fIntasks\fR
+\fB\-\-ntasks\-per\-core\fR=<\fIntasks\fR>
 Request that no more than \fIntasks\fR be invoked on each core.
 Similar to \fB\-\-ntasks\-per\-node\fR except at the core level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific core unless \fB\-\-cpu_bind=none\fR
 is specified.
-NOTE: This option is not supported unless \fISelectType=CR_Core\fR
-or \fISelectType=CR_Core_Memory\fR is configured.
+NOTE: This option is not supported unless
+\fISelectTypeParameters=CR_Core\fR or
+\fISelectTypeParameters=CR_Core_Memory\fR is configured.
 
 .TP
-\fB\-\-ntasks\-per\-socket\fR=\fIntasks\fR
+\fB\-\-ntasks\-per\-socket\fR=<\fIntasks\fR>
 Request that no more than \fIntasks\fR be invoked on each socket.
 Similar to \fB\-\-ntasks\-per\-node\fR except at the socket level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR
 is specified.
-NOTE: This option is not supported unless \fISelectType=CR_Socket\fR 
-or \fISelectType=CR_Socket_Memory\fR is configured.
+NOTE: This option is not supported unless
+\fISelectTypeParameters=CR_Socket\fR or
+\fISelectTypeParameters=CR_Socket_Memory\fR is configured.
 
 .TP
-\fB\-\-ntasks\-per\-node\fR=\fIntasks\fR
+\fB\-\-ntasks\-per\-node\fR=<\fIntasks\fR>
 Request that no more than \fIntasks\fR be invoked on each node.
-This is similiar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR
+This is similar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR
 but does not require knowledge of the actual number of cpus on
 each node.  In some cases, it is more convenient to be able to
 request that no more than a specific number of ntasks be invoked
@@ -467,18 +673,18 @@ on each node.  Examples of this include submitting
 a hybrid MPI/OpenMP app where only one MPI "task/rank" should be
 assigned to each node while allowing the OpenMP portion to utilize
 all of the parallelism present in the node, or submitting a single
-setup/cleanup/monitoring job to each node of a pre\-existing  
+setup/cleanup/monitoring job to each node of a pre\-existing
 allocation as one step in a larger job script.
 
 .TP
 \fB\-O\fR, \fB\-\-overcommit\fR
-Overcommit resources. Normally, \fBsbatch\fR will allocate one cpu per
-task to be executed. By specifying \fB\-\-overcommit\fR you are explicitly
-allowing more than one process per cpu. However no more than
+Overcommit resources.  Normally, \fBsbatch\fR will allocate one task
+per processor.  By specifying \fB\-\-overcommit\fR you are explicitly
+allowing more than one task per processor.  However no more than
 \fBMAX_TASKS_PER_NODE\fR tasks are permitted to execute per node.
 
 .TP
-\fB\-o\fR, \fB\-\-output\fR[=]<\fIfilename pattern\fR>
+\fB\-o\fR, \fB\-\-output\fR=<\fIfilename pattern\fR>
 Instruct SLURM to connect the batch script's standard output directly to the 
 file name specified in the "\fIfilename pattern\fR".
 See the \fB\-\-input\fR option for filename specification options.
@@ -489,10 +695,42 @@ Open the output and error files using append or truncate mode as specified.
 The default value is specified by the system configuration parameter
 \fIJobFileAppend\fR.
 
-.TP 
-\fB\-p\fR, \fB\-\-partition\fR[=]<\fIpartition name\fR>
-Request a specific partition for the resource allocation.  If not specified, the
-default behaviour is to allow the slurm controller to select the default
+.TP
+\fB\-P\fR, \fB\-\-dependency\fR=<\fIdependency_list\fR>
+Defer the start of this job until the specified dependencies have been
+satisfied completed.
+<\fIdependency_list\fR> is of the form 
+<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>.
+Many jobs can share the same dependency and these jobs may even belong to
+different  users. The  value may be changed after job submission using the
+scontrol command.
+.PD
+.RS
+.TP
+\fBafter:job_id[:jobid...]\fR
+This job can begin execution after the specified jobs have begun
+execution.
+.TP
+\fBafterany:job_id[:jobid...]\fR
+This job can begin execution after the specified jobs have terminated.
+.TP
+\fBafternotok:job_id[:jobid...]\fR
+This job can begin execution after the specified jobs have terminated
+in some failed state (non-zero exit code, node failure, timed out, etc).
+.TP
+\fBafterok:job_id[:jobid...]\fR
+This job can begin execution after the specified jobs have successfully
+executed (ran to completion with non-zero exit code).
+.TP
+\fBsingleton\fR
+This job can begin execution after any previously launched jobs sharing the same
+job name and user have terminated.
+.RE
+
+.TP
+\fB\-p\fR, \fB\-\-partition\fR=<\fIpartition name\fR>
+Request a specific partition for the resource allocation.  If not specified,
+the default behaviour is to allow the slurm controller to select the default
 partition as designated by the system administrator.
 
 .TP
@@ -539,40 +777,8 @@ The maximum resident set size
 The maximum stack size
 .RE
 
-.TP 
-\fB\-P\fR, \fB\-\-dependency\fR[=]<\fIdependency_list\fR>
-Defer the start of this job until the specified dependencies have been
-satisfied completed.
-<\fIdependency_list\fR> is of the form 
-<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>.
-Many jobs can share the same dependency and these jobs may even belong to
-different  users. The  value may be changed after job submission using the
-scontrol command.
-.PD
-.RS
-.TP
-\fBafter:job_id[:jobid...]\fR
-This job can begin execution after the specified jobs have begun
-execution.
-.TP
-\fBafterany:job_id[:jobid...]\fR
-This job can begin execution after the specified jobs have terminated.
-.TP
-\fBafternotok:job_id[:jobid...]\fR
-This job can begin execution after the specified jobs have terminated
-in some failed state (non-zero exit code, node failure, timed out, etc).
-.TP
-\fBafterok:job_id[:jobid...]\fR
-This job can begin execution after the specified jobs have successfully
-executed (ran to completion with non-zero exit code).
-.TP
-\fBsingleton\fR
-This job can begin execution after any previously launched jobs sharing the same
-job name and user have terminated.
-.RE
-
 .TP
-\fB\-q\fR, \fB\-\-quiet\fR
+\fB\-Q\fR, \fB\-\-quiet\fR
 Suppress informational messages from sbatch. Errors will still be displayed.
 
 .TP
@@ -583,6 +789,10 @@ Also see the \fB\-\-no\-requeue\fR option.
 The \fIJobRequeue\fR configuration parameter controls the default 
 behavior on the cluster.
 
+.TP
+\fB\-\-reservation\fR=<\fIname\fR>
+Allocate resources for the job from the named reservation.
+
 .TP
 \fB\-s\fR, \fB\-\-share\fR
 The job allocation can share nodes with other running jobs.  (The default
@@ -592,40 +802,40 @@ option was not set and allow higher system utilization, but application
 performance will likely suffer due to competition for resources within a node.
 
 .TP
-\fB\-t\fR, \fB\-\-time\fR=\fItime\fR
-Set a  limit on the total run time of the job allocation.
-If the requested time limit exceeds the partition's time limit, the 
-job will be left in a PENDING state (possibly indefinitely).  The default
-time limit is the partition's time limit.  When the time limit is reached,
-the each task in each job step is sent SIGTERM followed by SIGKILL. The
-interval between signals is specified by the SLURM configuration parameter
-\fBKillWait\fR.  A time limit of zero represents unlimited time.
-Acceptable time formats include "minutes", "minutes:seconds", 
-"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and 
-"days\-hours:minutes:seconds".
+\fB\-t\fR, \fB\-\-time\fR=<\fItime\fR>
+Set a limit on the total run time of the job allocation.  If the
+requested time limit exceeds the partition's time limit, the job will
+be left in a PENDING state (possibly indefinitely).  The default time
+limit is the partition's time limit.  When the time limit is reached,
+each task in each job step is sent SIGTERM followed by SIGKILL.  The
+interval between signals is specified by the SLURM configuration
+parameter \fBKillWait\fR.  A time limit of zero requests that no time
+limit be imposed.  Acceptable time formats include "minutes",
+"minutes:seconds", "hours:minutes:seconds", "days\-hours",
+"days\-hours:minutes" and "days\-hours:minutes:seconds".
 
 .TP
-\fB\-\-tasks\-per\-node\fR[=]<\fIn\fR>
+\fB\-\-tasks\-per\-node\fR=<\fIn\fR>
 Specify the number of tasks to be launched per node.
 Equivalent to \fB\-\-ntasks\-per\-node\fR.
 
 .TP
-\fB\-\-tmp\fR[=]<\fIMB\fR>
+\fB\-\-tmp\fR=<\fIMB\fR>
 Specify a minimum amount of temporary disk space.
 
 .TP
-\fB\-U\fR, \fB\-\-account\fR[=]<\fIaccount\fR>
+\fB\-U\fR, \fB\-\-account\fR=<\fIaccount\fR>
 Change resource use by this job to specified account.
-The \fIaccount\fR is an arbitrary string. The account name may 
-be changed after job submission using the \fBscontrol\fR 
+The \fIaccount\fR is an arbitrary string. The account name may
+be changed after job submission using the \fBscontrol\fR
 command.
 
 .TP
 \fB\-u\fR, \fB\-\-usage\fR
-Display brief usage message and exit.
+Display brief help message and exit.
 
 .TP
-\fB\-\-uid\fR[=]<\fIuser\fR>
+\fB\-\-uid\fR=<\fIuser\fR>
 Attempt to submit and/or run a job as \fIuser\fR instead of the
 invoking user id. The invoking user's credentials will be used
 to check access permissions for the target partition. User root
@@ -634,55 +844,56 @@ partition for example. If run as root, \fBsbatch\fR will drop
 its permissions to the uid specified after node allocation is
 successful. \fIuser\fR may be the user name or numerical user ID.
 
-.TP 
+.TP
 \fB\-V\fR, \fB\-\-version\fR
 Display version information and exit.
 
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
-Increase the verbosity of sbatch's informational messages.  Multiple \-v's
-will further increase sbatch's verbosity.
+Increase the verbosity of sbatch's informational messages.  Multiple
+\fB\-v\fR's will further increase sbatch's verbosity.  By default only
+errors will be displayed.
 
-.TP 
-\fB\-w\fR, \fB\-\-nodelist\fR[=]<\fInode name list\fR>
+.TP
+\fB\-w\fR, \fB\-\-nodelist\fR=<\fInode name list\fR>
 Request a specific list of node names.  The list may be specified as a
 comma\-separated list of node names, or a range of node names
 (e.g. mynode[1\-5,7,...]).  Duplicate node names in the list will be ignored.
 The order of the node names in the list is not important; the node names
-will be sorted my SLURM.
+will be sorted by SLURM.
 
 .TP
-\fB\-\-wckey\fR=\fIwckey\fR
+\fB\-\-wckey\fR=<\fIwckey\fR>
 Specify wckey to be used with job.  If TrackWCKey=no (default) in the
-slurm.conf this value does not get looked at. 
+slurm.conf this value is ignored.
 
 .TP
-\fB\-\-wrap\fR[=]<\fIcommand string\fR>
+\fB\-\-wrap\fR=<\fIcommand string\fR>
 Sbatch will wrap the specified command string in a simple "sh" shell script,
 and submit that script to the slurm controller.  When \-\-wrap is used,
 a script name and arguments may not be specified on the command line; instead
 the sbatch-generated wrapper script is used.
 
 .TP
-\fB\-x\fR, \fB\-\-exclude\fR[=]<\fInode name list\fR>
+\fB\-x\fR, \fB\-\-exclude\fR=<\fInode name list\fR>
 Explicitly exclude certain nodes from the resources granted to the job.
 
 .PP
-The following options support Blue Gene systems, but may be 
+The following options support Blue Gene systems, but may be
 applicable to other systems as well.
 
 .TP
-\fB\-\-blrts\-image\fR[=]<\fIpath\fR>
+\fB\-\-blrts\-image\fR=<\fIpath\fR>
 Path to blrts image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-cnload\-image\fR=\fIpath\fR
+\fB\-\-cnload\-image\fR=<\fIpath\fR>
 Path to compute node image for bluegene block.  BGP only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-conn\-type\fR[=]<\fItype\fR>
+\fB\-\-conn\-type\fR=<\fItype\fR>
 Require the partition connection type to be of a certain type.  
 On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.  
 If NAV, or if not set, then SLURM will try to fit a TORUS else MESH.
@@ -693,25 +904,25 @@ midplane and below).  You can use HTC_S for SMP, HTC_D for Dual, HTC_V
 for virtual node mode, and HTC_L for Linux mode.
 
 .TP
-\fB\-g\fR, \fB\-\-geometry\fR[=]<\fIXxYxZ\fR>
+\fB\-g\fR, \fB\-\-geometry\fR=<\fIXxYxZ\fR>
 Specify the geometry requirements for the job. The three numbers 
 represent the required geometry giving dimensions in the X, Y and 
 Z directions. For example "\-\-geometry=2x3x4", specifies a block 
-of nodes having 2 x 3 x 4 = 24 nodes (actually base partions on 
+of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on 
 Blue Gene).
 
 .TP
-\fB\-\-ioload\-image\fR=\fIpath\fR
+\fB\-\-ioload\-image\fR=<\fIpath\fR>
 Path to io image for bluegene block.  BGP only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-linux\-image\fR[=]<\fIpath\fR>
+\fB\-\-linux\-image\fR=<\fIpath\fR>
 Path to linux image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-mloader\-image\fR[=]<\fIpath\fR>
+\fB\-\-mloader\-image\fR=<\fIpath\fR>
 Path to mloader image for bluegene block.
 Default from \fIblugene.conf\fR if not set.
 
@@ -722,7 +933,7 @@ appropriate partition.
 By default the specified geometry can rotate in three dimensions.
 
 .TP
-\fB\-\-ramdisk\-image\fR[=]<\fIpath\fR>
+\fB\-\-ramdisk\-image\fR=<\fIpath\fR>
 Path to ramdisk image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
@@ -736,64 +947,81 @@ Upon startup, sbatch will read and handle the options set in the following
 environment variables.  Note that environment variables will override any
 options set in a batch script, and command line options will override any
 environment variables.
- 
+
 .TP 22
 \fBSBATCH_ACCOUNT\fR
-Same as \fB\-\-account\fR.
+Same as \fB\-U, \-\-account\fR
+.TP
+\fBSBATCH_ACCTG_FREQ\fR
+Same as \fB\-\-acctg\-freq\fR
 .TP
-\fBSALLOC_ACCTG_FREQ\fR
-Same as \fB\-\-acctg\-freq\fR.
+\fBSLURM_CHECKPOINT\fR
+Same as \fB\-\-checkpoint\fR
+.TP
+\fBSLURM_CHECKPOINT_DIR\fR
+Same as \fB\-\-checkpoint\-dir\fR
 .TP
 \fBSBATCH_CONN_TYPE\fR
-Same as \fB\-\-conn\-type\fR.
+Same as \fB\-\-conn\-type\fR
+.TP
+\fBSBATCH_CPU_BIND\fR
+Same as \fB\-\-cpu_bind\fR
 .TP
 \fBSBATCH_DEBUG\fR
-Same as \fB\-v\fR or \fB\-\-verbose\fR.
+Same as \fB\-v, \-\-verbose\fR
 .TP
 \fBSBATCH_DISTRIBUTION\fR
-Same as \fB\-m\fR or \fB\-\-distribution\fR.
+Same as \fB\-m, \-\-distribution\fR
 .TP
 \fBSBATCH_EXCLUSIVE\fR
-Same as \fB\-\-exclusive\fR.
+Same as \fB\-\-exclusive\fR
 .TP
 \fBSBATCH_GEOMETRY\fR
-Same as \fB\-g\fR or \fB\-\-geometry\fR.
+Same as \fB\-g, \-\-geometry\fR
 .TP
 \fBSBATCH_IMMEDIATE\fR
-Same as \fB\-I\fR or \fB\-\-immediate\fR.
+Same as \fB\-I, \-\-immediate\fR
 .TP
 \fBSBATCH_JOBID\fR
-Same as \fB\-\-jobid\fR.
+Same as \fB\-\-jobid\fR
 .TP
 \fBSBATCH_JOB_NAME\fR
-Same as \fB\-J\fR or \fB\-\-job\-name\fR.
+Same as \fB\-J, \-\-job\-name\fR
+.TP
+\fBSBATCH_MEM_BIND\fR
+Same as \fB\-\-mem_bind\fR
 .TP
 \fBSBATCH_NETWORK\fR
-Same as \fB\-\-network\fR.
+Same as \fB\-\-network\fR
 .TP
 \fBSBATCH_NO_REQUEUE\fR
-Same as \fB\-\-no\-requeue\fR.
+Same as \fB\-\-no\-requeue\fR
 .TP
 \fBSBATCH_NO_ROTATE\fR
-Same as \fB\-R\fR or \fB\-\-no\-rotate\fR.
+Same as \fB\-R, \-\-no\-rotate\fR
 .TP
-\fBSLURM_OPEN_MODE\fR
-Same as \fB\-\-open\-mode\fR.
+\fBSBATCH_OPEN_MODE\fR
+Same as \fB\-\-open\-mode\fR
 .TP
-\fBSLURM_OVERCOMMIT\fR
+\fBSBATCH_OVERCOMMIT\fR
 Same as \fB\-O, \-\-overcommit\fR
 .TP
 \fBSBATCH_PARTITION\fR
-Same as \fB\-p\fR or \fB\-\-partition\fR.
+Same as \fB\-p, \-\-partition\fR
 .TP
 \fBSBATCH_TIMELIMIT\fR
-Same as \fB\-t\fR or \fB\-\-time\fR.
+Same as \fB\-t, \-\-time\fR
 
 .SH "OUTPUT ENVIRONMENT VARIABLES"
 .PP
 The SLURM controller will set the following variables in the environment of
 the batch script.
-
+.TP
+\fBBASIL_RESERVATION_ID\fR
+The reservation ID on Cray systems running ALPS/BASIL only.
+.TP
+\fBSLURM_CPU_BIND\fR
+Set to value of the \-\-cpu_bind\fR option.
 .TP
 \fBSLURM_JOB_ID\fR (and \fBSLURM_JOBID\fR for backwards compatibility)
 The ID of the job allocation.
@@ -818,6 +1046,9 @@ List of nodes allocated to the job.
 \fBSLURM_JOB_NUM_NODES\fR (and \fBSLURM_NNODES\fR for backwards compatibility)
 Total number of nodes in the job's resource allocation.
 .TP
+\fBSLURM_MEM_BIND\fR
+Set to value of the \-\-mem_bind\fR option.
+.TP
 \fBSLURM_TASKS_PER_NODE\fR
 Number of tasks to be initiated on each node. Values are
 comma separated and in the same order as SLURM_NODELIST.
@@ -845,6 +1076,14 @@ Only set if the \fB\-\-ntasks\-per\-node\fR option is specified.
 Number of tasks requested per socket.
 Only set if the \fB\-\-ntasks\-per\-socket\fR option is specified.
 .TP
+\fBSLURM_RESTART_COUNT\fR
+If the job has been restarted due to system failure or has been
+explicitly requeued, this will be sent to the number of times
+the job has been restarted.
+.TP
+\fBSLURM_SUBMIT_DIR\fR
+The directory from which \fBsbatch\fR was invoked.
+.TP
 \fBMPIRUN_PARTITION\fR
 The block name on Blue Gene systems only.
 
@@ -906,9 +1145,9 @@ host4
 
 .SH "COPYING"
 Copyright (C) 2006\-2007 The Regents of the University of California.
-Copyright (C) 2008 Lawrence Livermore National Security.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/sbcast.1 b/doc/man/man1/sbcast.1
index 7f76a4ca30ee83b9cbf1ce70728e354df2a9203d..85215e0e36c02ee7e6fea587d69ab8ef1b9b96ca 100644
--- a/doc/man/man1/sbcast.1
+++ b/doc/man/man1/sbcast.1
@@ -1,4 +1,4 @@
-.TH SBCAST "1" "April 2006" "sbcast 1.1" "Slurm components"
+.TH SBCAST "1" "April 2006" "sbcast 2.0" "Slurm components"
 
 .SH "NAME"
 sbcast \- transmit a file to the nodes allocated to a SLURM job.
@@ -100,7 +100,7 @@ srun: jobid 12345 submitted
 .SH "COPYING"
 Copyright (C) 2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/scancel.1 b/doc/man/man1/scancel.1
index 0d6ec07915f42503df672805654b462074c2907f..da043691486ecee65c310cde92c974044d46ad91 100644
--- a/doc/man/man1/scancel.1
+++ b/doc/man/man1/scancel.1
@@ -1,4 +1,4 @@
-.TH SCANCEL "1" "November 2008" "scancel 1.2" "Slurm components"
+.TH SCANCEL "1" "April 2009" "scancel 2.0" "Slurm components"
 
 .SH "NAME"
 scancel \- Used to signal jobs or job steps that are under the control of Slurm.
@@ -78,6 +78,13 @@ This option is incompatible with the \fB\-\-quiet\fR option.
 \fB\-V\fR, \fB\-\-Version\fR
 Print the version number of the scancel command. 
 
+.TP
+\fB\-w\fR, \fB\-\-nodelist=\fIhost1,host2,...\fR
+Cancel any jobs using any of the given hosts.  The list may be specified as 
+a comma\-separated list of hosts, a range of hosts (host[1\-5,7,...] for 
+example), or a filename. The host list will be assumed to be a filename only 
+if it contains a "/" character. 
+
 .TP
 ARGUMENTS
 
@@ -89,9 +96,17 @@ The Slurm job ID to be signaled.
 \fIstep_id\fP
 The step ID of the job step to be signaled. 
 If not specified, the operation is performed at the level of a job.
+
+If neither \fB\-\-batch\fR nor \fB\-\-signal\fR are used, 
+the entire job will be terminated.
+
 When \fB\-\-batch\fR is used, the batch shell processes will be signaled.
-Otherwise the processes associated with all job steps, but not the 
-batch script itself, will be signaled.
+The child processes of the shell will not be signalled by SLURM, but 
+the shell may forward the signal.
+
+When \fB\-\-batch\fR is not used but \fB\-\-signal\fR is used, 
+then all job steps will be signalled, but the batch script itself 
+will not be signalled.
 
 .SH "ENVIRONMENT VARIABLES"
 .PP
@@ -148,8 +163,12 @@ Cancel job 1234 along with all of its steps:
 scancel 1234
 
 .TP
-Send SIGUSR1 to the batch shell processes of job 1235:
-scancel \-\-signal=USR1 \-\-batch 1235
+Send SIGKILL to all steps of job 1235, but do not cancel the job itself:
+scancel \-\-signal=KILL 1235
+
+.TP
+Send SIGUSR1 to the batch shell processes of job 1236:
+scancel \-\-signal=USR1 \-\-batch 1236
 
 .TP
 Cancel job all pending jobs belonging to user "bob" in partition "debug":
@@ -157,8 +176,9 @@ scancel \-\-state=PENDING \-\-user=bob \-\-partition=debug
 
 .SH "COPYING"
 Copyright (C) 2002-2007 The Regents of the University of California.
+Copyright (C) 2008-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index 0c9b19cd7bb1471eea44108acbfdf23610001c20..0503dd93d0ccd061a72b4dbc42593c42d14d5aa7 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -1,4 +1,4 @@
-.TH SCONTROL "1" "August 2008" "scontrol 1.3" "Slurm components"
+.TH SCONTROL "1" "May 2009" "scontrol 2.0" "Slurm components"
 
 .SH "NAME"
 scontrol \- Used view and modify Slurm configuration and state.
@@ -8,17 +8,18 @@ scontrol \- Used view and modify Slurm configuration and state.
 
 .SH "DESCRIPTION"
 \fBscontrol\fR is used to view or modify Slurm configuration including: job, 
-job step, node, partition, and overall system configuration. Most of the 
-commands can only be executed by user root. If an attempt to view or modify 
+job step, node, partition, reservation, and overall system configuration. Most 
+of the commands can only be executed by user root. If an attempt to view or modify 
 configuration information is made by an unauthorized user, an error message 
 will be printed and the requested action will not occur. If no command is 
 entered on the execute line, \fBscontrol\fR will operate in an interactive 
 mode and prompt for input. It will continue prompting for input and executing 
 commands until explicitly terminated. If a command is entered on the execute 
 line, \fBscontrol\fR will execute that command and terminate. All commands 
-and options are case\-insensitive, although node names and partition names 
-are case\-sensitive (node names "LX" and "lx" are distinct). Commands can 
-be abbreviated to the extent that the specification is unique.
+and options are case\-insensitive, although node names, partition names, and 
+reservation names are case\-sensitive (node names "LX" and "lx" are distinct). 
+All commands and options can be abbreviated to the extent that the 
+specification is unique.
 
 .SH "OPTIONS"
 .TP
@@ -63,7 +64,7 @@ are unavailable to user's group.
 Instruct the Slurm controller to terminate immediately and generate a core file.
 
 .TP
-\fBcheckpoint\fP \fICKPT_OP\fP \fIID\fP
+\fBcheckpoint\fP \fICKPT_OP\fP \fIID\fP \fIOPTIONS\fP
 Perform a checkpoint activity on the job step(s) with the specified identification.
 \fIID\fP can be used to identify a specific job (e.g. "<job_id>",
 which applies to all of its existing steps)
@@ -84,8 +85,28 @@ Acceptable values for \fICKPT_OP\fP include:
 \fIerror\fP (report the result for the last checkpoint request, error code and message) 
 .TP
 \fIrestart\fP (restart execution of the previously checkpointed job steps)
+.TP
+Acceptable values for \fICKPT_OP\fP include:
+.TP
+\fIMaxWait=<seconds>\fP maximum time for checkpoint to be written. 
+Default value is 10 seconds.
+Valid with \fIcreate\fP and \fIvacate\fP options only.
+.TP
+\fIImageDir=<directory_name>\fP Location of checkpoint file.
+Valid with \fIcreate\fP, \fIvacate\fP and \fIrestart\fP options only.
+This value takes precedent over any \-\-checkpoint\-dir value specified
+at job submission time.
+.TP
+\fIStickToNodes\fP If set, resume job on the same nodes are previously used.
+Valid with the \fIrestart\fP option only.
 .RE
 
+.TP
+\fBcreate\fP \fISPECIFICATION\fP
+Create a new partition or reservation.  See the full list of parameters 
+below.  Include the tag "res" to create a reservation without specifying
+a reservation name.
+
 .TP
 \fBcompleting\fP
 Display all jobs in a COMPLETING state along with associated nodes in either a 
@@ -94,8 +115,8 @@ COMPLETING or DOWN state.
 .TP
 \fBdelete\fP \fISPECIFICATION\fP
 Delete the entry with the specified \fISPECIFICATION\fP.
-The only supported \fISPECIFICATION\fP presently is of the form
-\fIPartitionName=<name>\fP.
+The two \fISPECIFICATION\fP choices are \fIPartitionName=<name>\fP
+and \fIReservation=<name>\fP
 
 .TP
 \fBexit\fP
@@ -196,16 +217,22 @@ is restarted or "scontrol reconfigure" is executed).
 \fBshow\fP \fIENTITY\fP \fIID\fP
 Display the state of the specified entity with the specified identification.
 \fIENTITY\fP may be \fIconfig\fP, \fIdaemons\fP, \fIjob\fP, \fInode\fP, 
-\fIpartition\fP, \fIslurmd\fP, \fIstep\fP, \fIhostlist\fP or \fIhostnames\fP 
+\fIpartition\fP, \fIreservation\fP, \fIslurmd\fP, \fIstep\fP, \fItopology\fP,
+\fIhostlist\fP or \fIhostnames\fP 
 (also \fIblock\fP or \fIsubbp\fP on BlueGene systems).
 \fIID\fP can be used to identify a specific element of the identified 
 entity: the configuration parameter name, job ID, node name, partition name, 
-or job step ID \fIconfig\fP, \fIjob\fP, \fInode\fP, \fIpartition\fP, 
-or \fIstep\fP respectively. 
+reservation name, or job step ID for \fIconfig\fP, \fIjob\fP, \fInode\fP, 
+\fIpartition\fP, or \fIstep\fP respectively. 
+For an \fIENTITY\fP of \fItopology\fP, the \fIID\fP may be a node or switch name.
+If one node name is specified, all switches connected to that node (and 
+their parent switches) will be shown.
+If more than one node name is specified, only switches that connect to all 
+named nodes will be shown.
 \fIhostnames\fP takes an optional hostlist expression as input and 
 writes a list of individual host names to standard output (one per 
 line). If no hostlist expression is supplied, the contents of the 
-SLURM_NODELIST environment variable is used. For example "tux[1-3]" 
+SLURM_NODELIST environment variable is used. For example "tux[1\-3]" 
 is mapped to "tux1","tux2" and "tux3" (one hostname per line).
 \fIhostlist\fP takes a list of host names and prints the hostlist 
 expression for them (the inverse of \fIhostnames\fP). 
@@ -236,19 +263,30 @@ User processes must stop on receipt of SIGSTOP signal and resume
 upon receipt of SIGCONT for this operation to be effective.
 Not all architectures and configurations support job suspension.
 
+.TP
+\fBtakeover\fP
+Instruct SLURM's backup controller (slurmctld) to take over system control.
+SLURM's backup controller requests control from the primary and waits for 
+its termination. After that, it switches from backup mode to controller
+mode. If primary controller can not be contacted, it directly switches to 
+controller mode. This can be used to speed up the SLURM controller 
+fail\-over mechanism when the primary node is down.
+This can be used to minimize disruption if the computer executing the
+primary SLURM controller is scheduled down.
+(Note: SLURM's primary controller will take the control back at startup.)
+
 .TP
 \fBupdate\fP \fISPECIFICATION\fP 
-Update job, node or partition configuration per the supplied specification.
-\fISPECIFICATION\fP is in the same format as the Slurm configuration file 
-and the output of the \fIshow\fP command described above. It may be desirable 
-to execute the \fIshow\fP command (described above) on the specific entity 
-you which to update, then use cut\-and\-paste tools to enter updated configuration 
-values to the \fIupdate\fP. Note that while most configuration values can be 
-changed using this command, not all can be changed using this mechanism. In 
-particular, the hardware configuration of a node or the physical addition or 
-removal of nodes from the cluster may only be accomplished through editing 
-the Slurm configuration file and executing the \fIreconfigure\fP command 
-(described above).
+Update job, node, partition, or reservation configuration per the supplied 
+specification. \fISPECIFICATION\fP is in the same format as the Slurm 
+configuration file and the output of the \fIshow\fP command described above. It
+may be desirable to execute the \fIshow\fP command (described above) on the 
+specific entity you which to update, then use cut\-and\-paste tools to enter 
+updated configuration values to the \fIupdate\fP. Note that while most 
+configuration values can be changed using this command, not all can be changed 
+using this mechanism. In particular, the hardware configuration of a node or 
+the physical addition or removal of nodes from the cluster may only be 
+accomplished through editing the Slurm configuration file and executing the \fIreconfigure\fP command (described above).
 
 .TP
 \fBverbose\fP
@@ -264,7 +302,7 @@ Display the version number of scontrol being executed.
 Repeat the last command executed.
 
 .TP
-\fBSPECIFICATIONS FOR SHOW AND UPDATE COMMANDS, JOBS\fR
+\fBSPECIFICATIONS FOR UPDATE COMMAND, JOBS\fR
 .TP
 \fIAccount\fP=<account>
 Account name to be changed for this job's resource use.
@@ -272,11 +310,11 @@ Value may be cleared with blank data value, "Account=".
 .TP
 \fIContiguous\fP=<yes|no>
 Set the job's requirement for contiguous (consecutive) nodes to be allocated. 
-Possible values are"YES" and "NO".
+Possible values are "YES" and "NO".
 .TP
 \fIDependency\fP=<job_id>
 Defer job's initiation until specified job_id completes.
-Cancel dependency with job_id value of "0", "Depedency=0".
+Cancel dependency with job_id value of "0", "Dependency=0".
 .TP
 \fIExcNodeList\fP=<nodes>
 Set the job's list of excluded node. Multiple node names may be 
@@ -290,7 +328,7 @@ The second number of the signal that caused the process to terminate if
 it was terminated by a signal.
 .TP
 \fIFeatures\fP=<features>
-Set the job's required features on nodes specified value. Multiple values 
+Set the job's required node features. Multiple values 
 may be comma separated if all features are required (AND operation) or 
 separated by "|" if any of the specified features are required (OR operation).
 Value may be cleared with blank data value, "Features=".
@@ -302,10 +340,10 @@ Identify the job to be updated. This specification is required.
 Set the job's minimum number of cores per socket to the specified value.
 .TP
 \fIMinMemory\fP=<megabytes>
-Set the job's minimum real memory required per nodes to the specified value.
+Set the job's minimum real memory required per node to the specified value.
 .TP
 \fIMinProcs\fP=<count>
-Set the job's minimum number of processors per nodes to the specified value.
+Set the job's minimum number of processors per node to the specified value.
 .TP
 \fIMinSockets\fP=<count>
 Set the job's minimum number of sockets per node to the specified value.
@@ -314,7 +352,7 @@ Set the job's minimum number of sockets per node to the specified value.
 Set the job's minimum number of threads per core to the specified value.
 .TP
 \fIMinTmpDisk\fP=<megabytes>
-Set the job's minimum temporary disk space required per nodes to the specified value.
+Set the job's minimum temporary disk space required per node to the specified value.
 .TP
 \fIName\fP=<name>
 Set the job's name to the specified value.
@@ -339,7 +377,7 @@ Set the job's list of required node. Multiple node names may be specified using
 simple node range expressions (e.g. "lx[10\-20]"). 
 Value may be cleared with blank data value, "ReqNodeList=".
 .TP
-\fIReqNodes\fP=<min_count>[-<max_count>]
+\fIReqNodes\fP=<min_count>[\-<max_count>]
 Set the job's minimum and optionally maximum count of nodes to be allocated.
 .TP
 \fIReqSockets\fP=<count>
@@ -364,8 +402,8 @@ You may also specify \fImidnight\fR, \fInoon\fR, or
 \fIteatime\fR (4pm) and you can have a time\-of\-day suffixed
 with \fIAM\fR or \fIPM\fR for running in the morning or the evening.
 You can also say what day the job will be run, by specifying
-a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR
-or \fIMM.DD.YY\fR. You can also
+a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR or \fIMM.DD.YY\fR, 
+or a date and time as \fIYYYY\-MM\-DD[THH[:MM[:SS]]]\fR.  You can also
 give times like \fInow + count time\-units\fR, where the time\-units
 can be \fIminutes\fR, \fIhours\fR, \fIdays\fR, or \fIweeks\fR
 and you can tell SLURM to run the job today with the keyword
@@ -422,15 +460,19 @@ Use quotes to enclose a reason having more than one word.
 .TP
 \fIState\fP=<state>
 Identify the state to be assigned to the node. Possible values are  "NoResp", 
-"ALLOC", "ALLOCATED", "DOWN", "DRAIN", "FAIL", "FAILING", "IDLE" or "RESUME". 
-"RESUME is not an actual node state, but will return a DRAINED, DRAINING, 
-or DOWN node to service, either IDLE or ALLOCATED state as appropriate.
-Setting a node "DOWN" will cause all running and suspended jobs on that
-node to be terminated.
+"ALLOC", "ALLOCATED", "DOWN", "DRAIN", "FAIL", "FAILING", "IDLE", "MAINT",
+"POWER_DOWN", "POWER_UP", or "RESUME". 
 If you want to remove a node from service, you typically want to set 
 it's state to "DRAIN". 
 "FAILING" is similar to "DRAIN" except that some applications will  
 seek to relinquish those nodes before the job completes.
+"RESUME" is not an actual node state, but will return a "DRAINED", "DRAINING", 
+or "DOWN" node to service, either "IDLE" or "ALLOCATED" state as appropriate.
+Setting a node "DOWN" will cause all running and suspended jobs on that
+node to be terminated.
+"POWER_DOWN" and "POWER_UP" will use the configured \fISuspendProg\fR and
+\fIResumeProg\fR programs to explicitly place a node in or out of a power 
+saving mode.
 The "NoResp" state will only set the "NoResp" flag for a node without
 changing its underlying state.
 While all of the above states are valid, some of them are not valid new
@@ -438,7 +480,17 @@ node states given their prior state.
 Generally only "DRAIN", "FAIL" and "RESUME" should be used.
 
 .TP
-\fBSPECIFICATIONS FOR UPDATE AND DELETE COMMANDS, PARTITIONS\fR
+\fIWeight\fP=<weight>
+Identify weight to be associated with specified nodes. This allows 
+dynamic changes to weight associated with nodes, which will be used 
+for the subsequent node allocation decisions. 
+Any previously identified weight will be overwritten with the new value.\fBNOTE:\fR The \fIWeight\fP associated with nodes will be reset to
+the values specified in slurm.conf (if any) upon slurmctld restart
+or reconfiguration.
+Update slurm.conf with any changes meant to be persistent.
+
+.TP
+\fBSPECIFICATIONS FOR CREATE, UPDATE, AND DELETE COMMANDS, PARTITIONS\fR
 .TP
 \fIAllowGroups\fP=<name>
 Identify the user groups which may use this partition. 
@@ -458,12 +510,12 @@ that you want to become the new default.
 Specify if the partition and its jobs should be hidden from view. 
 Hidden partitions will by default not be reported by SLURM APIs 
 or commands. 
-Possible values are"YES" and "NO".
+Possible values are "YES" and "NO".
 .TP
 \fIMaxNodes\fP=<count>
 Set the maximum number of nodes which will be allocated to any single job 
 in the partition. Specify a number, "INFINITE" or "UNLIMITED".  (On a
-Bluegene type system this represents a c-node count.)
+Bluegene type system this represents a c\-node count.)
 
 .TP
 \fIMaxTime\fP=<time>
@@ -478,7 +530,7 @@ the next minute.
 .TP
 \fIMinNodes\fP=<count>
 Set the minimum number of nodes which will be allocated to any single job 
-in the partition.   (On a Bluegene type system this represents a c-node count.)
+in the partition.   (On a Bluegene type system this represents a c\-node count.)
 
 .TP
 \fINodes\fP=<name>
@@ -512,6 +564,95 @@ If a partition allocated nodes to running jobs, those jobs will continue
 execution even after the partition's state is set to "DOWN". The jobs 
 must be explicitly canceled to force their termination.
 
+.TP
+\fBSPECIFICATIONS FOR CREATE, UPDATE, AND DELETE COMMANDS, RESERVATIONS\fR
+.TP
+
+.TP
+\fIReservation\fP=<name>
+Identify the name of the reservation to be created, updated, or deleted.  
+This parameter is required for update and is the only parameter for delete.  
+For create, if you do not want to give a reservation name, use 
+"scontrol create res ..." and a name will be created automatically.
+
+.TP
+\fINodeCnt\fP=<num>
+Identify number of nodes to be reserved.  A new reservation must specify either
+NodeCnt or Nodes.
+
+.TP
+\fINodes\fP=<name>
+Identify the node(s) to be reserved. Multiple node names 
+may be specified using simple node range expressions (e.g. "Nodes=lx[10\-20]"). 
+Specify a blank data value to remove all nodes from a reservation: "Nodes=".
+A new reservation must specify either NodeCnt or Nodes.
+
+.TP
+\fIStartTime\fP=<time_spec>
+The start time for the reservation.  A new reservation must specify a start 
+time.  It accepts times of the form \fIHH:MM:SS\fR for
+a specific time of day (seconds are optional).
+(If that time is already past, the next day is assumed.)
+You may also specify \fImidnight\fR, \fInoon\fR, or
+\fIteatime\fR (4pm) and you can have a time\-of\-day suffixed
+with \fIAM\fR or \fIPM\fR for running in the morning or the evening.
+You can also say what day the job will be run, by specifying
+a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR or \fIMM.DD.YY\fR, 
+or a date and time as \fIYYYY\-MM\-DD[THH[:MM[:SS]]]\fR.  You can also
+give times like \fInow + count time\-units\fR, where the time\-units
+can be \fIminutes\fR, \fIhours\fR, \fIdays\fR, or \fIweeks\fR
+and you can tell SLURM to run the job today with the keyword
+\fItoday\fR and to run the job tomorrow with the keyword
+\fItomorrow\fR.
+
+.TP
+\fIEndTime\fP=<time_spec>
+The end time for the reservation.  A new reservation must specify an end 
+time or a duration.  Valid formats are the same as for StartTime.
+
+.TP
+\fIDuration\fP=<time>
+The length of a reservation.  A new reservation must specify an end 
+time or a duration.  Valid formats are minutes, minutes:seconds, 
+hours:minutes:seconds, days\-hours, days\-hours:minutes, 
+days\-hours:minutes:seconds, or UNLIMITED.  Time resolution is one minute and 
+second values are rounded up to the next minute.
+
+.TP
+\fIPartitionName\fP=<name>
+Identify the partition to be reserved.
+
+.TP
+\fIFlags\fP=<flags>
+Flags associated with the reservation. 
+Currently "MAINT" (maintenance mode, receives special accounting treatment), 
+"DAILY" (repeat the reservation every day), 
+"WEEKLY" (repeat the reservation every week) and
+"SPEC_NODES" (reservation is for specific nodes, output only) is supported.
+In order to remove the "MAINT", "DAILY" or "WEEKLY" flag with the update
+option, preceed the name with a minus sign. For example:
+Flags=\-MAINT.
+
+.TP
+\fIFeatures\fP=<features>
+Set the reservation's required node features. Multiple values 
+may be comma separated if all features are required (AND operation) or 
+separated by "|" if any of the specified features are required (OR operation).
+Value may be cleared with blank data value, "Features=".
+
+.TP
+\fIUsers\fP=<user list>
+List of users permitted to use the reserved nodes.  
+E.g.  Users=jones1,smith2.
+A new reservation must specify Users and/or Accounts.
+
+.TP
+\fIAccounts\fP=<account list>
+List of accounts permitted to use the reserved nodes.  
+E.g.  Accounts=physcode1,physcode2.  A user in any of the accounts
+may use the reserved nodes.
+A new reservation must specify Users and/or Accounts.
+
 .TP
 \fBSPECIFICATIONS FOR UPDATE, BLOCK \fR
 .TP
@@ -527,7 +668,7 @@ to run on the block. \fBWARNING!!!!\fR This will cancel any
 running job on the block!
 .TP
 \fISubBPName\fP=<name>
-Identify the bluegene ionodes to be updated (i.e. bg000[0-3]). This
+Identify the bluegene ionodes to be updated (i.e. bg000[0\-3]). This
 specification is required.
 .TP
 
@@ -585,13 +726,21 @@ tux2
 .br
 tux3
 .br
+scontrol: create res StartTime=2009-04-01T08:00:00 Duration=5:00:00 Users=dbremer NodeCnt=10
+.br
+Reservation created: dbremer_1
+.br
+scontrol: update Reservation=dbremer_1 Flags=Maint NodeCnt=20
+.br
+scontrol: delete Reservation=dbremer_1
+.br
 scontrol: quit
 .ec
 
 .SH "COPYING"
 Copyright (C) 2002\-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
@@ -611,12 +760,14 @@ details.
 .SH "SEE ALSO"
 \fBscancel\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1), 
 \fBslurm_checkpoint\fR(3),
+\fBslurm_create_partition\fR(3),
 \fBslurm_delete_partition\fR(3),
 \fBslurm_load_ctl_conf\fR(3), 
 \fBslurm_load_jobs\fR(3), \fBslurm_load_node\fR(3), 
 \fBslurm_load_partitions\fR(3), 
 \fBslurm_reconfigure\fR(3),  \fBslurm_requeue\fR(3), \fBslurm_resume\fR(3),
 \fBslurm_shutdown\fR(3), \fBslurm_suspend\fR(3),
+\fBslurm_takeover\fR(3),
 \fBslurm_update_job\fR(3), \fBslurm_update_node\fR(3), 
 \fBslurm_update_partition\fR(3),
 \fBslurm.conf\fR(5)
diff --git a/doc/man/man1/sinfo.1 b/doc/man/man1/sinfo.1
index d0c73845fc0ffc6b17de54d32162e705f432cc4e..7203cad044b9399f252464ec348b2ddc6afb2cfd 100644
--- a/doc/man/man1/sinfo.1
+++ b/doc/man/man1/sinfo.1
@@ -1,4 +1,4 @@
-.TH SINFO "1" "July 2007" "sinfo 1.3" "Slurm components"
+.TH SINFO "1" "May 2008" "sinfo 2.0" "Slurm components"
 
 .SH "NAME"
 sinfo \- view information about SLURM nodes and partitions.
@@ -94,7 +94,7 @@ when running with various options are
 "%#N %.5D %9P %11T %.4c %.8z %.6m %.8d %.6w %8f %R"
 .TP
 .I "\-\-list\-reasons"
-"%35R %N"
+"%50R %N"
 .TP
 .I "\-\-long \-\-list\-reasons"
 "%50R %6t %N"
@@ -153,6 +153,9 @@ Jobs may share nodes, "yes", "no", or "force"
 \fB%l\fR 
 Maximum time for any job in the format "days\-hours:minutes:seconds"
 .TP
+\fB%L\fR
+Default time for any job in the format "days\-hours:minutes:seconds"
+.TP
 \fB%m\fR 
 Size of memory per node in megabytes
 .TP
@@ -172,6 +175,9 @@ fail or failing states)
 \fB%s\fR 
 Maximum job size in nodes
 .TP
+\fB%S\fR 
+Allowed allocating nodes
+.TP
 \fB%t\fR 
 State of nodes, compact form
 .TP
@@ -251,8 +257,9 @@ default sort value is "N" (increasing node name).
 List nodes only having the given state(s).  Multiple states
 may be comma separated and the comparison is case insensitive.
 Possible values include (case insensitive): ALLOC, ALLOCATED,
-COMP, COMPLETING, DOWN, DRAIN, DRAINED, DRNG, DRAINING, FAIL, 
-FAILING, IDLE, UNK, and UNKNOWN.  
+COMP, COMPLETING, DOWN, DRAIN (for node in DRAINING or DRAINED
+states), DRAINED, DRAINING, FAIL, FAILING, IDLE, MAINT, NO_RESPOND, 
+POWER_SAVE, UNK, and UNKNOWN.  
 By default nodes in the specified state are reported whether 
 they are responding or not.  
 The \fB\-\-dead\fR and \fB\-\-responding\fR options may be 
@@ -420,6 +427,9 @@ man page or the \fBslurm.conf\fR(5) man page for more information.
 \fBIDLE\fR
 The node is not allocated to any jobs and is available for use.
 .TP
+\fBMAINT\fR
+The node is currently in a reservation with a flag value of "maintainence".
+.TP
 \fBUNKNOWN\fR
 The SLURM controller has just started and the node's state
 has not yet been determined.
@@ -514,7 +524,7 @@ Not Responding                      dev8
 .SH "COPYING"
 Copyright (C) 2002\-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/slurm.1 b/doc/man/man1/slurm.1
index 3e4626bf6c80f96046a1331e9a328692a8b84f40..20ab95650a6a22489aeadb3a790c94c11f5cd61e 100644
--- a/doc/man/man1/slurm.1
+++ b/doc/man/man1/slurm.1
@@ -1,27 +1,32 @@
-.TH SLURM "1" "November 2006" "slurm 1.2" "Slurm system"
+.TH SLURM "1" "March 2009" "slurm 2.0" "Slurm system"
 
 .SH "NAME"
 slurm \- SLURM system overview.
 
 .SH "DESCRIPTION"
 The Simple Linux Utility for Resource Management (SLURM) is an open source,
-fault\-tolerant, and highly scalable cluster management and job scheduling system 
+fault-tolerant, and highly scalable cluster management and job scheduling system
 for large and small Linux clusters. SLURM requires no kernel modifications for
-its operation and is relatively self\-contained. As a cluster resource manager,
-SLURM has three key functions. First, it allocates exclusive and/or non\-exclusive 
+its operation and is relatively self-contained. As a cluster resource manager,
+SLURM has three key functions. First, it allocates exclusive and/or non-exclusive
 access to resources (compute nodes) to users for some duration of time so they
 can perform work. Second, it provides a framework for starting, executing, and
-monitoring work (normally a parallel job) on the set of allocated nodes. Finally, 
-it arbitrates conflicting requests for resources by managing a queue of pending
-work.
+monitoring work (normally a parallel job) on the set of allocated nodes.
+Finally, it arbitrates contention for resources by managing a queue of
+pending work.
+Optional plugins can be used for accounting, advanced reservation,
+gang scheduling (time sharing for parallel jobs), backfill scheduling,
+resource limits by user or bank account,
+and sophisticated multifactor job prioritization algorithms.
 
 SLURM has a centralized manager, \fBslurmctld\fR, to monitor resources and
 work. There may also be a backup manager to assume those responsibilities in the 
 event of failure. Each compute server (node) has a \fBslurmd\fR daemon, which
 can be compared to a remote shell: it waits for work, executes that work, returns 
-status, and waits for more work. 
+status, and waits for more work. An optional \fBslurmDBD\fR (SLURM DataBase Daemon)
+can be used for accounting purposes and to maintain resource limit information.
 
-User tools include \fBsrun\fR to initiate jobs, 
+Basic user tools include \fBsrun\fR to initiate jobs, 
 \fBscancel\fR to terminate queued or running jobs, \fBsinfo\fR to report system 
 status, and \fBsqueue\fR to report the status of jobs. There is also an administrative
 tool \fBscontrol\fR available to monitor and/or modify configuration and state
@@ -36,8 +41,9 @@ Extensive documenation is also available on the internet at
 
 .SH "COPYING"
 Copyright (C) 2005\-2007 The Regents of the University of California.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
@@ -53,10 +59,13 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBsacct\fR(1), \fBsalloc\fR(1), \fBsattach\fR(1), \fBsbatch\fR(1), 
-\fBsbcast\fR(1), \fBscancel\fR(1), \fBscontrol\fR(1), \fBsinfo\fR(1), 
-\fBsmap\fR(1), \fBsqueue\fR(1), \fBsrun\fR(1),
+\fBsacct\fR(1), \fBsacctmgr\fR(1), \fBsalloc\fR(1), \fBsattach\fR(1), 
+\fBsbatch\fR(1), \fBsbcast\fR(1), \fBscancel\fR(1), \fBscontrol\fR(1), 
+\fBsinfo\fR(1), \fBsmap\fR(1), \fBsqueue\fR(1), \fBsreport\fR(1), 
+\fBsrun\fR(1),\fBsshare\fR(1), \fBsstate\fR(1), \fBstrigger\fR(1),
 \fBsview\fR(1), 
-\fBbluegene.conf\fR(5), \fBslurm.conf\fR(5), \fBwiki.conf\fR(5),
-\fBslurmctld\fR(8), \fBslurmd\fR(8), \fBslurmstepd\fR(8), \fBspank\fR(8)
+\fBbluegene.conf\fR(5), \fBslurm.conf\fR(5), \fBslurmdbd.conf\fR(5), 
+\fBwiki.conf\fR(5),
+\fBslurmctld\fR(8), \fBslurmd\fR(8), \fBslurmdbd\fR(8), \fBslurmstepd\fR(8), 
+\fBspank\fR(8)
 
diff --git a/doc/man/man1/smap.1 b/doc/man/man1/smap.1
index 3319ce082ee12e11b99b1756038d615eb9d6cf12..c0522ab6c7604c8a17d226db189a3071953a22c9 100644
--- a/doc/man/man1/smap.1
+++ b/doc/man/man1/smap.1
@@ -1,4 +1,4 @@
-.TH SMAP "1" "May 2007" "smap 1.3" "Slurm components"
+.TH SMAP "1" "April 2009" "smap 2.0" "Slurm components"
 
 .SH "NAME"
 smap \- graphically view information about SLURM jobs, partitions, and set 
@@ -30,17 +30,22 @@ Note that unallocated nodes are indicated by a '.' and nodes in the
 DOWN, DRAINED or FAIL state by a '#'.
 .RS
 .TP 15
+.I "b"
+Displays information about BlueGene partitions on the system
+.TP
+.I "c"
+Displays current BlueGene node states and allows users to configure the system.
+.TP
 .I "j"
 Displays information about jobs running on system.
+.TP
+.I "r"
+Display information about advanced reservations. 
+While all current and future reservations will be listed, 
+only currently active reservations will appear on the node map.
 .TP 
 .I "s"
 Displays information about slurm partitions on the system
-.TP
-.I "b"
-Displays information about BG partitions on the system
-.TP
-.I "c"
-Displays current node states and allows users to configure the system.
 .RE
 
 .TP
@@ -90,6 +95,11 @@ scroll the window containing the text information.
 
 .SH "OUTPUT FIELD DESCRIPTIONS"
 .TP
+\fBACCESS_CONTROL\fR
+Identifies the users or bank accounts which can use this advanced reservation.
+A prefix of "A:" indicates that the following account names may use this reservation.
+A prefix of "U:" indicates that the following user names may use this reservation.
+.TP
 \fBAVAIL\fR
 Partition state: \fBup\fR or \fBdown\fR.
 .TP
@@ -99,6 +109,9 @@ BlueGene Block Name\fR.
 \fBCONN\fR
 Connection Type: \fBTORUS\fR or \fBMESH\fR or \fBSMALL\fR (for small blocks).
 .TP
+\fBEND_TIME\fR
+The time when an advanced reservation ended.
+.TP
 \fBID\fR
 Key to identify the nodes associated with this entity in the node chart.
 .TP
@@ -106,10 +119,11 @@ Key to identify the nodes associated with this entity in the node chart.
 Mode Type: \fBCOPROCESS\fR or \fBVIRTUAL\fR.
 .TP
 \fBNAME\fR
-Name of the job.
+Name of the job or advanced reservation.
 .TP
 \fBNODELIST\fR or \fBBP_LIST\fR
-Names of nodes or base partitions associated with this configuration/partition.
+Names of nodes or base partitions associated with this configuration, 
+partition or reservation.
 .TP
 \fBNODES\fR
 Count of nodes or base partitions with this particular configuration.
@@ -125,6 +139,9 @@ CG (completing), CD  (completed),
 F (failed), TO (timeout), and NF (node failure). See 
 \fBJOB STATE CODES\fR section below for more information.
 .TP
+\fBSTART_TIME\fR
+The time when an advanced reservation started.
+.TP
 \fBSTATE\fR
 State of the nodes. 
 Possible states include: allocated, completing, down, 
@@ -388,6 +405,9 @@ man page or the \fBslurm.conf\fR(5) man page for more information.
 \fBIDLE\fR
 The node is not allocated to any jobs and is available for use.
 .TP
+\fBMAINT\fR
+The node is currently in a reservation with a flag value of "maintainence".
+.TP
 \fBUNKNOWN\fR
 The SLURM controller has just started and the node's state
 has not yet been determined.
@@ -436,8 +456,9 @@ The location of the SLURM configuration file.
 
 .SH "COPYING"
 Copyright (C) 2004\-2007 The Regents of the University of California.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/sprio.1 b/doc/man/man1/sprio.1
new file mode 100644
index 0000000000000000000000000000000000000000..379432758b219731900dda47f8b2715dc0f63559
--- /dev/null
+++ b/doc/man/man1/sprio.1
@@ -0,0 +1,221 @@
+.TH SPRIO "1" "March 2009" "sprio 2.0" "SLURM commands"
+
+.SH "NAME"
+sprio \- view the factors that comprise a job's scheduling priority
+
+.SH "SYNOPSIS"
+\fBsprio\fR [\fIOPTIONS\fR...]
+
+.SH "DESCRIPTION"
+\fBsprio\fR is used to view the components of a job's scheduling
+priority when the multi-factor priority plugin is installed.
+\fBsprio\fR is a read-only utility that extracts information from the
+multi-factor priority plugin.  By default, \fBsprio\fR returns
+information for all pending jobs.  Options exist to display specific
+jobs by job ID and user name.
+
+.SH "OPTIONS"
+
+.TP
+\fB\-h\fR, \fB\-\-noheader\fR
+Do not print a header on the output.
+
+.TP
+\fB\-\-help\fR
+Print a help message describing all options \fBsprio\fR.
+
+.TP
+\fB\-j <job_id_list>\fR, \fB\-\-jobs=<job_id_list>\fR
+Requests a comma separated list of job ids to display.  Defaults to all jobs.
+
+.TP
+\fB\-l\fR, \fB\-\-long\fR
+Report more of the available information for the selected jobs.
+
+.TP
+\fB\-n\fR, \fB\-\-norm\fR
+Display the normalized priority factors for the selected jobs.
+
+.TP
+\fB\-o <output_format>\fR, \fB\-\-format=<output_format>\fR
+Specify the information to be displayed, its size and position
+(right or left justified).
+The default formats with various options are
+
+.RS
+.TP 15
+\fIdefault\fR
+"%.7i %.8u %.10A %.10F %.10J %.10P %.10Q"
+.TP
+\fI\-l, \-\-long\fR
+"%.7i %.8u %.10Y %.10A %.10F %.10J %.10P %.10Q %.6N"
+.RE
+
+.IP
+The format of each field is "%[.][size]type".
+.RS
+.TP 8
+\fIsize\fR
+is the minimum field size.
+If no size is specified, whatever is needed to print the information will be used.
+.TP
+\fI .\fR
+indicates the output should be left justified.
+By default, output is right justified.
+.RE
+
+.IP
+Valid \fItype\fR specifications include:
+
+.RS
+.TP 4
+\fB%a\fR
+Normalized age priority
+.TP
+\fB%A\fR
+Weighted age priority
+.TP
+\fB%f\fR
+Normalized fair-share priority
+.TP
+\fB%F\fR
+Weighted fair-share priority
+.TP
+\fB%i\fR
+Job ID
+.TP
+\fB%j\fR
+Normalized job size priority
+.TP
+\fB%J\fR
+Weighted job size priority
+.TP
+\fB%N\fR
+Nice adjustment
+.TP
+\fB%p\fR
+Normalized partition priority
+.TP
+\fB%P\fR
+Weighted partition priority
+.TP
+\fB%q\fR
+Normalized quality of service priority
+.TP
+\fB%Q\fR
+Weighted quality of service priority
+.TP
+\fB%u\fR
+User name for a job
+.TP
+\fB%Y\fR
+Job priority
+.TP
+\fB%y\fR
+Normalized job priority
+.RE
+
+.TP
+\fB\-u <user_list>\fR, \fB\-\-user=<user_list>\fR
+Request jobs from a comma separated list of users.  The list can
+consist of user names or user id numbers.
+
+.TP
+\fB\-\-usage\fR
+Print a brief help message listing the \fBsprio\fR options.
+
+.TP
+\fB\-v\fR, \fB\-\-verbose\fR
+Report details of sprios actions.
+
+.TP
+\fB\-V\fR , \fB\-\-version\fR
+Print version information and exit.
+
+.TP
+\fB\-w\fR , \fB\-\-weights\fR Display the configured weights for each
+factor.  This is for information purposes only.  Actual job data is
+suppressed.
+
+.SH "EXAMPLES"
+.eo
+Print the list of all pending jobs with their weighted priorities
+.br
+> sprio
+.br
+  JOBID   PRIORITY        AGE  FAIRSHARE    JOBSIZE  PARTITION        QOS
+.br
+  65539      62664          0      51664       1000      10000          0
+.br
+  65540      62663          0      51663       1000      10000          0
+.br
+  65541      62662          0      51662       1000      10000          0
+.ec
+
+.eo
+Print the list of all pending jobs with their normalized priorities
+.br
+> sprio -n
+.br
+  JOBID PRIORITY   AGE        FAIRSHARE  JOBSIZE    PARTITION  QOS
+.br
+  65539 0.00001459 0.0007180  0.5166470  1.0000000  1.0000000  0.0000000
+.br
+  65540 0.00001459 0.0007180  0.5166370  1.0000000  1.0000000  0.0000000
+.br
+  65541 0.00001458 0.0007180  0.5166270  1.0000000  1.0000000  0.0000000
+.ec
+
+.eo
+Print the job priorities for specific jobs
+.br
+> sprio --jobs=65548,65547
+.br
+  JOBID   PRIORITY        AGE  FAIRSHARE    JOBSIZE  PARTITION        QOS
+.br
+  65547      62078          0      51078       1000      10000          0
+.br
+  65548      62077          0      51077       1000      10000          0
+.ec
+
+.eo
+Print the job priorities for jobs of specific users
+.br
+> sprio --users=fred,sally
+.br
+  JOBID     USER  PRIORITY       AGE  FAIRSHARE   JOBSIZE  PARTITION     QOS
+.br
+  65548     fred     62079         1      51077      1000      10000       0
+.br
+  65549    sally     62080         1      51078      1000      10000       0
+.ec
+
+.eo
+Print the configured weights for each priority component
+.br
+> sprio -w
+.br
+  JOBID   PRIORITY        AGE  FAIRSHARE    JOBSIZE  PARTITION        QOS
+.br
+  Weights                1000     100000       1000      10000          1
+.ec
+
+.SH "COPYING"
+Copyright (C) 2009 Lawrence Livermore National Security.
+Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+CODE\-OCEC\-09\-009. All rights reserved.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <https://computing.llnl.gov/linux/slurm/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+.SH "SEE ALSO"
+\fBsqueue\fR(1), \fBsshare\fR(1)
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index f933846ed19896d23fc8003435bf018a896516d3..349da10d5f93cf913d933557d80c52d861601681 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -1,4 +1,4 @@
-.TH SQUEUE "1" "May 2008" "squeue 1.3" "Slurm components"
+.TH SQUEUE "1" "May 2008" "squeue 2.0" "Slurm components"
 
 .SH "NAME"
 squeue \- view information about jobs located in the SLURM scheduling queue.
@@ -39,10 +39,8 @@ specified (in seconds).
 By default, prints a time stamp with the header.
 
 .TP
-\fB\-j\fR, \fB\-\-jobs\fR
-Specify the jobs to view.  This flag indicates that a comma separated list 
-of jobs to view follows without an equal sign (see examples).
-Defaults to all jobs.
+\fB\-j <job_id_list>\fR, \fB\-\-jobs=<job_id_list>\fR
+Requests a comma separated list of job ids to display.  Defaults to all jobs.
 
 .TP
 \fB\-l\fR, \fB\-\-long\fR
@@ -65,10 +63,10 @@ The default formats with various options are
 .RS
 .TP 15
 \fIdefault\fR
-"%.7i %.9P %.8j %.8u %.2t %.9M %.6D %R"
+"%.7i %.9P %.8j %.8u %.2t %.10M %.6D %R"
 .TP
 \fI\-l, \-\-long\fR
-".7i %.9P %.8j %.8u %.8T %.9M %.9l %.6D %R"
+"%.7i %.9P %.8j %.8u %.8T %.10M %.9l %.6D %R"
 .TP
 \fI\-s, \-\-steps\fR
 "%10i %.8j %.9P %.8u %.9M %N"
@@ -247,6 +245,9 @@ User name for a job or job step.
 \fB%U\fR 
 User ID for a job or job step.
 .TP
+\fB%v\fR 
+Reservation for the job.
+.TP
 \fB%x\fR 
 List of node names explicitly excluded by the job.
 .TP
@@ -287,7 +288,7 @@ This uses the same field specifciation as the <output_format>.
 Multiple sorts may be performed by listing multiple sort fields 
 separated by commas.
 The field specifications may be preceeded by "+" or "\-" for 
-assending (default) and desending order respectively. 
+ascending (default) and descending order respectively. 
 For example, a sort value of "P,U" will sort the
 records by partition name then by user id. 
 The default value of sort for jobs is "P,t,\-p" (increasing partition 
@@ -310,8 +311,8 @@ See the \fBJOB STATE CODES\fR section below for more information.
 
 .TP
 \fB\-u <user_list>\fR, \fB\-\-user=<user_list>\fR
-Specifies a comma separated list of users whose jobs or job steps are to be
-reported. The list can consist of user names or user id numbers.
+Request jobs or job steps from a comma separated list of users.  The
+list can consist of user names or user id numbers.
 
 .TP
 \fB\-\-usage\fR
@@ -494,7 +495,7 @@ Print information only about job step 65552.1:
 .SH "COPYING"
 Copyright (C) 2002\-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index 3232b168f56e8d61b9566a71583be1f0dd350215..115f29d45e168f7b56f72a7e32a4883cb69083ee 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -1,14 +1,13 @@
-.TH SREPORT "1" "October 2008" "sreport 1.3" "Slurm components"
+.TH SREPORT "1" "February 2009" "sreport 2.0" "Slurm components"
 
 .SH "NAME"
-sreport \- Used to generate reports from the slurm accounting data.
+sreport \- Generate reports from the slurm accounting data.
 
 .SH "SYNOPSIS"
 \fBsreport\fR [\fIOPTIONS\fR...] [\fICOMMAND\fR...]
 
 .SH "DESCRIPTION"
-\fBsreport\fR is used to generate certain reports.  More can be added
-at any time.  It provides a view into accounting data gathered from slurm via
+\fBsreport\fR is used to generate certain reports. It provides a view into accounting data gathered from slurm via
 the account information maintained within a database with the interface 
 being provided by the \fBslurmdbd\fR (Slurm Database daemon).
 
@@ -16,49 +15,43 @@ being provided by the \fBslurmdbd\fR (Slurm Database daemon).
 
 .TP
 \fB\-a\fR, \fB\-\-all_clusters\fR
-Use all clusters instead of only cluster from where the command was run.
-
+Use all clusters instead of only the cluster from where the command was run.
 .TP
 \fB\-h\fR, \fB\-\-help\fR
 Print a help message describing the usage of \fBsreport\fR.
-This is equivalent to the \fBhelp\fR command.
-
 .TP
-\fB\-n\fR, \fB\-\-no_header\fR
+\fB\-n\fR, \fB\-\-noheader\fR
 Don't display header when listing results.
-
 .TP
 \fB\-p\fR, \fB\-\-parsable\fR
 Output will be '|' delimited with a '|' at the end.
-
 .TP
 \fB\-P\fR, \fB\-\-parsable2\fR
 Output will be '|' delimited without a '|' at the end.
-
 .TP
 \fB\-q\fR, \fB\-\-quiet\fR
 Print no warning or informational messages, only error messages.
-This is equivalent to the \fBquiet\fR command.
-
 .TP
 \fB\-t <format>\fR
 Specify the output time format. 
 Time format options are case insensitive and may be abbreviated.
 The default format is Minutes.
-Supported time format options are listed with the \fBtime\fP command below.
-
+Supported time format options are listed in the \fBtime\fP command
+section below.
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
 Print detailed event logging. 
-This is equivalent to the \fBverbose\fR command.
-
 .TP
 \fB\-V\fR , \fB\-\-version\fR
 Print version information and exit.
-This is equivalent to the \fBversion\fR command.
 
 .SH "COMMANDS"
 
+.TP
+\<keyword\> may be omitted from the execute line and sreport will
+execute in interactive mode. sreport will process commands as entered until
+explicitly terminated.
+
 .TP
 \fBexit\fP
 Terminate the execution of sreport.
@@ -68,6 +61,14 @@ Identical to the \fBquit\fR command.
 \fBhelp\fP
 Display a description of sreport options and commands.
 
+.TP
+\fBparsable\fP
+Output will be | delimited with an ending '|'.
+
+.TP
+\fBparsable2\fP
+Output will be | delimited without an ending '|'.
+
 .TP
 \fBquiet\fP
 Print no warning or informational messages, only fatal error messages.
@@ -109,14 +110,11 @@ Percentage of Total
 
 .TP
 \fBverbose\fP
-Print detailed event logging. 
-This includes time\-stamps on data structures, record counts, etc.
-This is an independent command with no options meant for use in interactive mode.
+Enable detailed event logging. 
 
 .TP
 \fBversion\fP
-Display the version number of sreport being executed.
-     \-q or \-\-quiet: equivalent to \"quiet\" command                        \n\
+Display the sreport version number.
 
 .TP
 \fB!!\fP
@@ -124,17 +122,28 @@ Repeat the last command executed.
 
 .SH "REPORT TYPES"
 .TP
-\fBcluster\fP, \fBjob\fP, and \fBuser\fP
+Valid report types are:
+\fBcluster\fP \<REPORT\> \<OPTIONS\>
+
+\fBjob\fP \<REPORT\> \<OPTIONS\>
+
+\fBuser\fP \<REPORT\> \<OPTIONS\>
+.RE
 
 .TP
-Various reports are as follows...
-     cluster \- AccountUtilizationByUser, UserUtilizationByAccount,
-     UserUtilizationByWckey, Utilization, WCKeyUtilizationByUser
-     job     \- SizesByAccount, SizesByWckey
-     user    \- TopUsage
+\<REPORT\> options for each type include:
+\fBcluster\fP	  \- AccountUtilizationByUser, UserUtilizationByAccount,
+UserUtilizationByWckey, Utilization, WCKeyUtilizationByUser
+
+\fBjob\fP	  \- SizesByAccount, SizesByWckey
+
+\fBreservation\fP \- Utilization
+
+\fBuser\fP	\- TopUsage
 
 .TP
 
+
 .TP
 REPORT DESCRIPTION
 .RS
@@ -158,7 +167,7 @@ on separate lines.
 .TP
 .B cluster Utilization
 This report will display total usage divided by Allocated, Down,
-Idle, and resrved time for selected clusters.  Reserved time
+Idle, and Reserved time for selected clusters.  Reserved time
 refers to time that a job was waiting for resources after the job
 had become eligible.  If the value is not of importance for you
 the number should be grouped with idle time.
@@ -180,6 +189,10 @@ accounts listed.
 This report will dispay the amount of time for each wckey for job ranges
 specified by the 'grouping=' option.  
 
+.TP
+.B reservation Utilization
+This report will display total usage for reservations on the systems.
+
 .TP
 .B user TopUsage
 Displays the top users on a cluster.  Use the group option to group
@@ -194,7 +207,7 @@ COMMON FOR ALL TYPES
 .RS
 .TP
 .B All_Clusters
-Use all monitored clusters default is local cluster.
+Use all monitored clusters. Default is local cluster.
 .TP
 .B Clusters=<OPT>
 List of clusters to include in report.  Default is local cluster.
@@ -205,6 +218,7 @@ Valid time formats are...
 HH:MM[:SS] [AM|PM]
 MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
 MM/DD[/YY]-HH:MM[:SS]         
+YYYY-MM-DD[THH[:MM[:SS]]]
 .TP
 .B Format=<OPT>
 Comma separated list of fields to display in report.
@@ -215,6 +229,7 @@ Valid time formats are...
 HH:MM[:SS] [AM|PM]
 MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
 MM/DD[/YY]-HH:MM[:SS]         
+YYYY-MM-DD[THH[:MM[:SS]]]
 .RE
 
 .TP
@@ -228,7 +243,7 @@ Default is all.
 .TP
 .B Tree
 When used with the AccountUtilizationByUser report will span the
-accounts as they in the hierarchy.
+accounts as they are in the hierarchy.
 .TP
 .B Users=<OPT>
 When used with any report other than Utilization, List of users to
@@ -265,6 +280,9 @@ Comma separated list of size groupings.   (i.e. 50,100,150 would group job cpu c
 .B Jobs=<OPT>
 List of jobs/steps to include in report.  Default is all.
 .TP
+.B Nodes=<OPT>
+Only show jobs that ran on these nodes. Default is all.
+.TP
 .B Partitions=<OPT>
 List of partitions jobs ran on to include in report.  Default is all.
 .TP
@@ -281,6 +299,16 @@ SizesbyWckey report all users summed together.  If you want only
 certain users specify them them with the Users= option.
 .RE
 
+.TP
+RESERVATION
+.TP
+.B Names=<OPT>
+List of reservations to use for the report. Default is all.
+.TP
+.B Nodes=<OPT>
+Only show reservations that used these nodes. Default is all.
+.RE
+
 .TP
 USER
 .RS
@@ -298,7 +326,7 @@ Default is 10.
 .TP
 .B Users=<OPT>
 List of users jobs to include in report.  Default is all.
-.RE
+.RE  
 
 .TP
  
@@ -321,16 +349,24 @@ List of users jobs to include in report.  Default is all.
        SizesByWckey
              \- Wckey, Cluster
 
+\fBReservation\fP
+       Utilization
+             \- Allocated, Associations, Cluster, CPUCount, CPUTime,
+                End, Idle, Name, Nodes, Start, TotalTime
+
 \fBUser\fP
        TopUsage
              \- Account, Cluster, Login, Proper, Used
                                                                            
-.TP         
+.TP
+All commands and options are case-insensitive.
+.TP
+
 .SH "EXAMPLES"
 .TP
 \fBsreport job sizesbyaccount\fP
 .TP
-\fBreport cluster utilization\fP
+\fBsreport cluster utilization\fP
 .TP
 \fBsreport user top\fP
 .TP
@@ -348,12 +384,11 @@ cluster zeus
 \fBsreport user topusage start=2/16/09 end=2/23/09 \-t percent account=lc\fP
 Report top usage in percent of the lc account during the specified week
 .TP
-               
 
 .SH "COPYING"
-Copyright (C) 2008 Lawrence Livermore National Security.
+Copyright (C) 2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
@@ -369,4 +404,4 @@ FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
 
 .SH "SEE ALSO"
-\fBsacct\fR(1), \fBslurmdbe\fR(8)
+\fBsacct\fR(1), \fBslurmdbd\fR(8)
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index c4d024785ee14112a5515950b39e3d85ddd741ca..d7696d45a64f4d73875d005ecbb88ea5372757eb 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -1,12 +1,12 @@
-.TH SRUN "1" "August 2008" "srun 1.3" "slurm components"
+.TH "srun" "1" "SLURM 2.0" "April 2009" "SLURM Commands"
 
 .SH "NAME"
-srun \- run parallel jobs
+srun \- Run parallel jobs
 
-.SH SYNOPSIS
+.SH "SYNOPSIS"
 \fBsrun\fR            [\fIOPTIONS\fR...]  \fIexecutable \fR[\fIargs\fR...]
 
-.SH DESCRIPTION
+.SH "DESCRIPTION"
 Run a parallel job on cluster managed by SLURM.  If necessary, srun will
 first create a resource allocation in which to run the parallel job.
 
@@ -14,15 +14,15 @@ first create a resource allocation in which to run the parallel job.
 .LP
 
 .TP
-\fB\-\-acctg\-freq\fR=\fIseconds\fR
+\fB\-\-acctg\-freq\fR=<\fIseconds\fR>
 Define the job accounting sampling interval.
-This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's 
+This can be used to override the \fIJobAcctGatherFrequency\fR parameter in SLURM's
 configuration file, \fIslurm.conf\fR.
 A value of zero disables real the periodic job sampling and provides accounting
 information only on job termination (reducing SLURM interference with the job).
 
 .TP
-\fB\-B\fR \fB\-\-extra\-node\-info\fR=\fIsockets\fR[:\fIcores\fR[:\fIthreads\fR]]
+\fB\-B\fR \fB\-\-extra\-node\-info\fR=<\fIsockets\fR[:\fIcores\fR[:\fIthreads\fR]]>
 Request a specific allocation of resources with details as to the
 number and type of computational resources within a cluster:
 number of sockets (or physical processors) per node,
@@ -35,9 +35,9 @@ resources of that type are to be utilized.
 As with nodes, the individual levels can also be specified in separate
 options if desired:
 .nf
-    \fB\-\-sockets\-per\-node\fR=\fIsockets\fR
-    \fB\-\-cores\-per\-socket\fR=\fIcores\fR
-    \fB\-\-threads\-per\-core\fR=\fIthreads\fR
+    \fB\-\-sockets\-per\-node\fR=<\fIsockets\fR>
+    \fB\-\-cores\-per\-socket\fR=<\fIcores\fR>
+    \fB\-\-threads\-per\-core\fR=<\fIthreads\fR>
 .fi
 When the task/affinity plugin is enabled,
 specifying an allocation in this manner also instructs SLURM to use
@@ -50,21 +50,22 @@ If select/cons_res is configured, it must have a parameter of CR_Core,
 CR_Core_Memory, CR_Socket, or CR_Socket_Memory. 
 
 .TP
-\fB\-\-begin\fR=\fItime\fR
+\fB\-\-begin\fR=<\fItime\fR>
 Defer initiation of this job until the specified time.
-It accepts times of the form \fIHH:MM:SS\fR to run a job at 
+It accepts times of the form \fIHH:MM:SS\fR to run a job at
 a specific time of day (seconds are optional).
-(If that time is already past, the next day is assumed.) 
-You may also specify \fImidnight\fR, \fInoon\fR, or 
-\fIteatime\fR (4pm) and you can have a time\-of\-day suffixed 
-with \fIAM\fR or \fIPM\fR for running in the morning or the evening.  
+(If that time is already past, the next day is assumed.)
+You may also specify \fImidnight\fR, \fInoon\fR, or
+\fIteatime\fR (4pm) and you can have a time\-of\-day suffixed
+with \fIAM\fR or \fIPM\fR for running in the morning or the evening.
 You can also say what day the job will be run, by specifying
 a date of the form \fIMMDDYY\fR or \fIMM/DD/YY\fR
-or \fIMM.DD.YY\fR. You can also
+\fIYYYY-MM-DD\fR. Combine date and time using the following
+format \fIYYYY\-MM\-DD[THH[:MM[:SS]]]\fR. You can also
 give times like \fInow + count time\-units\fR, where the time\-units
-can be \fIseconds\fR (default), \fIminutes\fR, \fIhours\fR, 
-\fIdays\fR, or \fIweeks\fR and you can tell SLURM to run 
-the job today with the keyword \fItoday\fR and to run the 
+can be \fIseconds\fR (default), \fIminutes\fR, \fIhours\fR,
+\fIdays\fR, or \fIweeks\fR and you can tell SLURM to run
+the job today with the keyword \fItoday\fR and to run the
 job tomorrow with the keyword \fItomorrow\fR.
 The value may be changed after job submission using the
 \fBscontrol\fR command.
@@ -73,11 +74,11 @@ For example:
    \-\-begin=16:00
    \-\-begin=now+1hour
    \-\-begin=now+60           (seconds by default)
-   \-\-begin=02/22/08-12:34:67
+   \-\-begin=2010-01-20T12:34:00
 .fi
 
 .TP
-\fB\-\-checkpoint\fR=\fItime\fR
+\fB\-\-checkpoint\fR=<\fItime\fR>
 Specifies the interval between creating checkpoints of the job step. 
 By default, the job step will no checkpoints created.
 Acceptable time formats include "minutes", "minutes:seconds", 
@@ -85,12 +86,19 @@ Acceptable time formats include "minutes", "minutes:seconds",
 "days\-hours:minutes:seconds".
 
 .TP
-\fB\-\-checkpoint\-path\fR=\fIdirectory\fR
-Specifies the directory into which the job step's checkpoint should 
-be written (used by the checkpoint/xlch plugin only).
+\fB\-\-checkpoint\-dir\fR=<\fIdirectory\fR>
+Specifies the directory into which the job or job step's checkpoint should 
+be written (used by the checkpoint/blcr and checkpoint/xlch plugins only).
+The default value is the current working directory.
+Checkpoint files will be of the form "<job_id>.ckpt" for jobs
+and "<job_id>.<step_id>.ckpt" for job steps.
 
 .TP
-\fB\-C\fR, \fB\-\-constraint\fR[=]<\fIlist\fR>
+\fB\-\-comment\fR=<\fIstring\fR>
+An arbitrary comment.
+
+.TP
+\fB\-C\fR, \fB\-\-constraint\fR=<\fIlist\fR>
 Specify a list of constraints. 
 The constraints are features that have been assigned to the nodes by 
 the slurm administrator. 
@@ -105,7 +113,7 @@ There is no mechanism to specify that you want one node with feature
 node has both features.
 If only one of a set of possible options should be used for all allocated 
 nodes, then use the OR operator and enclose the options within square brackets. 
-For example: "\fB\-\-constraint="[rack1|rack2|rack3|rack4]"\fR might 
+For example: "\fB\-\-constraint=[rack1|rack2|rack3|rack4]"\fR might 
 be used to specify that all nodes must be allocated on a single rack of 
 the cluster, but any of those four racks can be used.
 A request can also specify the number of nodes needed with some feature
@@ -117,84 +125,144 @@ Constraints with node counts may only be combined with AND operators.
 If no nodes have the requested features, then the job will be rejected 
 by the slurm job manager.
 
+.TP
+\fB\-\-contiguous\fR
+If set, then the allocated nodes must form a contiguous set.
+Not honored with the \fBtopology/tree\fR or \fBtopology/3d_torus\fR
+plugins, both of which can modify the node ordering.
+Not honored for a job step's allocation.
+
+.TP
+\fB\-\-core\fR=<\fItype\fR>
+Adjust corefile format for parallel job. If possible, srun will set
+up the environment for the job such that a corefile format other than
+full core dumps is enabled. If run with type = "list", srun will
+print a list of supported corefile format types to stdout and exit.
+
 .TP
 \fB\-\-cpu_bind\fR=[{\fIquiet,verbose\fR},]\fItype\fR
 Bind tasks to CPUs. Used only when the task/affinity plugin is enabled.
+The configuration parameter \fBTaskPluginParam\fR may override these options.
+For example, if \fBTaskPluginParam\fR is configured to bind to cores,
+your job will not be able to bind tasks to sockets.
 NOTE: To have SLURM always report on the selected CPU binding for all
 commands executed in a shell, you can enable verbose mode by setting
 the SLURM_CPU_BIND environment variable value to "verbose".
 
-Note that the SLURM_CPU_BIND environment variable will propagate into the
-tasks' environment in order to perform binding in batch submissions.
-If you do not wish to propagate \-\-cpu_bind to successive srun commands,
-simply clear the variable in the task's script before executing srun:
-
-.nf
-        unsetenv SLURM_CPU_BIND
-.fi
-
-In addition, to SLURM_CPU_BIND, the following informational environment
-variables are also set when SLURM_CPU_BIND is in use:
+The following informational environment variables are set when \fB\-\-cpu_bind\fR
+is in use:
 .nf
         SLURM_CPU_BIND_VERBOSE
         SLURM_CPU_BIND_TYPE
         SLURM_CPU_BIND_LIST
 .fi
 
-See the \fBENVIRONMENT VARIABLE\fR section for a more detailed description 
+See the \fBENVIRONMENT VARIABLE\fR section for a more detailed description
 of the individual SLURM_CPU_BIND* variables.
 
-When using \-\-cpus\-per\-task to run multithreaded tasks, be aware that
+When using \fB\-\-cpus\-per\-task\fR to run multithreaded tasks, be aware that
 CPU binding is inherited from the parent of the process.  This means that
 the multithreaded task should either specify or clear the CPU binding
 itself to avoid having all threads of the multithreaded task use the same
-mask/CPU as the parent.  Alternatively, fat masks (masks which specify more 
+mask/CPU as the parent.  Alternatively, fat masks (masks which specify more
 than one allowed CPU) could be used for the tasks in order to provide
 multiple CPUs for the multithreaded tasks.
 
+By default, a job step has access to every CPU allocated to the job.
+To ensure that distinct CPUs are allocated to each job step, us the
+\fB\-\-exclusive\fR option.
+
+If the job step allocation includes an allocation with a number of
+sockets, cores, or threads equal to the number of tasks to be started
+then the tasks will by default be bound to the appropriate resources.
+Disable this mode of operation by explicitly setting "-\-cpu\-bind=none".
+
+Note that a job step can be allocated different numbers of CPUs on each node
+or be allocated CPUs not starting at location zero. Therefore one of the
+options which automatically generate the task binding is recommended.
+Explicitly specified masks or bindings are only honored when the job step
+has been allocated every available CPU on the node.
+
+Binding a task to a NUMA locality domain means to bind the task to the set of
+CPUs that belong to the NUMA locality domain or "NUMA node".
+If NUMA locality domain options are used on systems with no NUMA support, then
+each socket is considered a locality domain.
+
 Supported options include:
 .PD 1
 .RS
 .TP
 .B q[uiet]
-quietly bind before task runs (default)
+Quietly bind before task runs (default)
 .TP
 .B v[erbose]
-verbosely report binding before task runs
+Verbosely report binding before task runs
 .TP
 .B no[ne]
-don't bind tasks to CPUs (default)
+Do not bind tasks to CPUs (default)
 .TP
 .B rank
-bind by task rank
+Automatically bind by task rank.
+Task zero is bound to socket (or core or thread) zero, etc.
+Not supported unless the entire node is allocated to the job.
 .TP
 .B map_cpu:<list>
-bind by mapping CPU IDs to tasks as specified
+Bind by mapping CPU IDs to tasks as specified
 where <list> is <cpuid1>,<cpuid2>,...<cpuidN>.
 CPU IDs are interpreted as decimal values unless they are preceded
-with '0x' in which case they interpreted as hexadecimal values.
+with '0x' in which case they are interpreted as hexadecimal values.
+Not supported unless the entire node is allocated to the job.
 .TP
 .B mask_cpu:<list>
-bind by setting CPU masks on tasks as specified
+Bind by setting CPU masks on tasks as specified
 where <list> is <mask1>,<mask2>,...<maskN>.
 CPU masks are \fBalways\fR interpreted as hexadecimal values but can be
 preceded with an optional '0x'.
+Not supported unless the entire node is allocated to the job.
+.TP
+.B rank_ldom
+Bind to a NUMA locality domain by rank
+.TP
+.B map_ldom:<list>
+Bind by mapping NUMA locality domain IDs to tasks as specified where
+<list> is <ldom1>,<ldom2>,...<ldomN>.
+The locality domain IDs are interpreted as decimal values unless they are
+preceded with '0x' in which case they areinterpreted as hexadecimal values.
+Not supported unless the entire node is allocated to the job.
+.TP
+.B mask_ldom:<list>
+Bind by setting NUMA locality domain masks on tasks as specified
+where <list> is <mask1>,<mask2>,...<maskN>.
+NUMA locality domain masks are \fBalways\fR interpreted as hexadecimal
+values but can be preceded with an optional '0x'.
+Not supported unless the entire node is allocated to the job.
 .TP
 .B sockets
-auto\-generated masks bind to sockets
+Automatically generate masks binding tasks to sockets.
+If the number of tasks differs from the number of allocated sockets
+this can result in sub\-optimal binding.
 .TP
 .B cores
-auto\-generated masks bind to cores
+Automatically generate masks binding tasks to cores.
+If the number of tasks differs from the number of allocated cores
+this can result in sub\-optimal binding.
 .TP
 .B threads
-auto\-generated masks bind to threads
+Automatically generate masks binding tasks to threads.
+If the number of tasks differs from the number of allocated threads
+this can result in sub\-optimal binding.
+.TP
+.B ldoms
+Automatically generate masks binding tasks to NUMA locality domains.
+If the number of tasks differs from the number of allocated locality domains
+this can result in sub\-optimal binding.
 .TP
 .B help
-show this help message
+Show this help message
 .RE
 
 .TP
-\fB\-c\fR, \fB\-\-cpus\-per\-task\fR=\fIncpus\fR
+\fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
 Request that \fIncpus\fR be allocated \fBper process\fR. This may be
 useful if the job is multithreaded and requires more than one CPU
 per task for optimal performance. The default is one CPU per process.
@@ -210,44 +278,20 @@ resources to the job step from the job's allocation when running
 multiple job steps with the \fB\-\-exclusive\fR option.
 
 .TP
-\fB\-\-comment\fR=<\fIstring\fR>
-An arbitrary comment.
-
-.TP
-\fB\-\-contiguous\fR
-Demand a contiguous range of nodes. The default is "yes". Specify
-\-\-contiguous=no if a contiguous range of nodes is not a constraint.
-
-.TP
-\fB\-\-core\fR=\fItype\fR
-Adjust corefile format for parallel job. If possible, srun will set
-up the environment for the job such that a corefile format other than
-full core dumps is enabled. If run with type = "list", srun will
-print a list of supported corefile format types to stdout and exit.
-
-.TP
-\fB\-\-ctrl\-comm\-ifhn\fR=\fIaddr\fR
-Specify the address or hostname to be used for PMI communications only
-(task communication and synchronization primitives for MPCIH2).
-Defaults to hostname (response from getnodename function).
-Use of this is required if a DNS lookup can not be performed on 
-the hostname or if that address is blocked from the compute nodes.
-
-.TP
-\fB\-D\fR, \fB\-\-chdir\fR=\fIpath\fR
+\fB\-D\fR, \fB\-\-chdir\fR=<\fIpath\fR>
 have the remote processes do a chdir to \fIpath\fR before beginning
 execution. The default is to chdir to the current working directory
 of the \fBsrun\fR process.
 
 .TP
-\fB\-d\fR, \fB\-\-slurmd\-debug\fR=\fIlevel\fR
+\fB\-d\fR, \fB\-\-slurmd\-debug\fR=<\fIlevel\fR>
 Specify a debug level for slurmd(8). \fIlevel\fR may be an integer value
 between 0 [quiet, only errors are displayed] and 4 [verbose operation]. 
 The slurmd debug information is copied onto the stderr of
 the job. By default only errors are displayed. 
 
 .TP
-\fB\-e\fR, \fB\-\-error\fR=\fImode\fR
+\fB\-e\fR, \fB\-\-error\fR=<\fImode\fR>
 Specify how stderr is to be redirected. By default in interactive mode,
 .B srun
 redirects stderr to the same file as stdout, if one is specified. The
@@ -257,7 +301,13 @@ See \fBIO Redirection\fR below for more options.
 If the specified file already exists, it will be overwritten.
 
 .TP
-\fB\-\-epilog\fR=\fIexecutable\fR
+\fB\-E\fR, \fB\-\-preserve-env\fR
+Pass the current values of environment variables SLURM_NNODES and 
+SLURM_NPROCS through to the \fIexecutable\fR, rather than computing them
+from commandline parameters.
+
+.TP
+\fB\-\-epilog\fR=<\fIexecutable\fR>
 \fBsrun\fR will run \fIexecutable\fR just after the job step completes.
 The command line arguments for \fIexecutable\fR will be the command
 and arguments of the job step.  If \fIexecutable\fR is "none", then
@@ -276,14 +326,16 @@ an existing resource allocation and you want separate processors to
 be dedicated to each job step. If sufficient processors are not 
 available to initiate the job step, it will be deferred. This can 
 be thought of as providing resource management for the job within
-it's allocation. Since resource management is provided  by 
+it's allocation. Note that all CPUs allocated to a job are available
+to each job step unless the \fB\-\-exclusive\fR option is used plus
+task affinity is configured. Since resource management is provided by 
 processor, the \fB\-\-ntasks\fR option must be specified, but the 
 following options should NOT be specified \fB\-\-nodes\fR, 
-\fB\-\-relative\fR, \fB\-\-relative\fR=\fIarbitrary\fR.
+\fB\-\-relative\fR, \fB\-\-distribution\fR=\fIarbitrary\fR.
 See \fBEXAMPLE\fR below.
 
 .TP
-\fB\-\-gid\fR=\fIgroup\fR
+\fB\-\-gid\fR=<\fIgroup\fR>
 If \fBsrun\fR is run as root, and the \fB\-\-gid\fR option is used, 
 submit the job with \fIgroup\fR's group access permissions.  \fIgroup\fR 
 may be the group name or the numerical group ID.
@@ -296,10 +348,10 @@ may be the group name or the numerical group ID.
 
 .TP
 \fB\-\-help\fR
-Display verbose help message and exit.
+Display help information and exit.
 
 .TP
-\fB\-\-hint\fR=\fItype\fR
+\fB\-\-hint\fR=<\fItype\fR>
 Bind tasks according to application hints
 .RS
 .TP
@@ -321,13 +373,13 @@ show this help message
 
 .TP
 \fB\-I\fR, \fB\-\-immediate\fR
-exit if resources are not immediately
-available. By default, \fB\-\-immediate\fR is off, and
+exit if resources are not immediately available. By default,
+\fB\-\-immediate\fR is off, and
 .B srun
 will block until resources become available.
 
 .TP
-\fB\-i\fR, \fB\-\-input\fR=\fImode\fR
+\fB\-i\fR, \fB\-\-input\fR=<\fImode\fR>
 Specify how stdin is to redirected. By default,
 .B srun
 redirects stdin from the terminal all tasks. See \fBIO Redirection\fR
@@ -336,16 +388,16 @@ For OS X, the poll() function does not support stdin, so input from
 a terminal is not possible.
 
 .TP
-\fB\-J\fR, \fB\-\-job\-name\fR=\fIjobname\fR
+\fB\-J\fR, \fB\-\-job\-name\fR=<\fIjobname\fR>
 Specify a name for the job. The specified name will appear along with
 the job id number when querying running jobs on the system. The default
 is the supplied \fBexecutable\fR program's name.
 
 .TP
-\fB\-\-jobid\fR=\fIid\fR
+\fB\-\-jobid\fR=<\fIjobid\fR>
 Initiate a job step under an already allocated job with job id \fIid\fR.
 Using this option will cause \fBsrun\fR to behave exactly as if the
-SLURM_JOBID environment variable was set.
+SLURM_JOB_ID environment variable was set.
 
 .TP
 \fB\-K\fR, \fB\-\-kill\-on\-bad\-exit\fR
@@ -353,7 +405,7 @@ Terminate a job if any task exits with a non\-zero exit code.
 
 .TP
 \fB\-k\fR, \fB\-\-no\-kill\fR
-Do not automatically terminate a job of one of the nodes it has been 
+Do not automatically terminate a job of one of the nodes it has been
 allocated fails.  This option is only recognized on a job allocation, 
 not for the submission of individual job steps. 
 The job will assume all responsibilities for fault\-tolerance. The 
@@ -370,7 +422,7 @@ The \fB\-\-label\fR option will prepend lines of output with the remote
 task id.
 
 .TP
-\fB\-L\fR, \fB\-\-licenses\fR=
+\fB\-L\fR, \fB\-\-licenses\fR=<\fBlicense\fR>
 Specification of licenses (or other resources available on all 
 nodes of the cluster) which must be allocated to this job.
 License names can be followed by an asterisk and count 
@@ -379,8 +431,8 @@ Multiple license names should be comma separated (e.g.
 "\-\-licenses=foo*4,bar").
 
 .TP
-\fB\-m\fR, \fB\-\-relative\fR
-(\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>\fR)
+\fB\-m\fR, \fB\-\-distribution\fR=
+<\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>\fR>
 Specify an alternate distribution method for remote processes.
 .RS
 .TP
@@ -424,19 +476,19 @@ contain at minimum the number of hosts requested.  If requesting tasks
 .RE
 
 .TP
-\fB\-\-mail\-type\fR=\fItype\fR
+\fB\-\-mail\-type\fR=<\fItype\fR>
 Notify user by email when certain event types occur. 
 Valid \fItype\fR values are BEGIN, END, FAIL, ALL (any state change). 
 The user to be notified is indicated with \fB\-\-mail\-user\fR. 
 
 .TP
-\fB\-\-mail\-user\fR=\fIuser\fR
+\fB\-\-mail\-user\fR=<\fIuser\fR>
 User to receive email notification of state changes as defined by 
 \fB\-\-mail\-type\fR.
 The default value is the submitting user.
 
 .TP
-\fB\-\-mem\fR[=]<\fIMB\fR>
+\fB\-\-mem\fR=<\fIMB\fR>
 Specify the real memory required per node in MegaBytes.
 Default value is \fBDefMemPerNode\fR and the maximum value is
 \fBMaxMemPerNode\fR. If configured, both of parameters can be
@@ -447,7 +499,7 @@ Also see \fB\-\-mem\-per\-cpu\fR.
 \fB\-\-mem\fR and \fB\-\-mem\-per\-cpu\fR are mutually exclusive.
 
 .TP
-\fB\-\-mem\-per\-cpu\fR[=]<\fIMB\fR>
+\fB\-\-mem\-per\-cpu\fR=<\fIMB\fR>
 Mimimum memory required per allocated CPU in MegaBytes.
 Default value is \fBDefMemPerCPU\fR and the maximum value is
 \fBMaxMemPerCPU\fR. If configured, both of parameters can be 
@@ -471,21 +523,12 @@ If you want greater control, try running a simple test code with the
 options "\-\-cpu_bind=verbose,none \-\-mem_bind=verbose,none" to determine 
 the specific configuration.
 
-NOTE: To have SLURM always report on the selected memory binding for 
-all commands executed in a shell, you can enable verbose mode by 
+NOTE: To have SLURM always report on the selected memory binding for
+all commands executed in a shell, you can enable verbose mode by
 setting the SLURM_MEM_BIND environment variable value to "verbose".
 
-Note that the SLURM_MEM_BIND environment variable will propagate into the
-tasks' environment in order to perform binding in batch submissions.
-If you do not wish to propagate \-\-mem_bind to successive srun commands,
-simply clear the variable in the task's script before executing srun:
-
-.nf
-        unsetenv SLURM_MEM_BIND
-.fi
-
-In addition, to SLURM_MEM_BIND, the following informational environment
-variables are also set when SLURM_MEM_BIND is in use:
+The following informational environment variables are set when \fB\-\-mem_bind\
+is in use:
 
 .nf
         SLURM_MEM_BIND_VERBOSE
@@ -533,29 +576,29 @@ show this help message
 .RE
 
 .TP
-\fB\-\-mincpus\fR=\fIn\fR
-Specify a minimum number of logical cpus/processors per node.
+\fB\-\-mincores\fR=<\fIn\fR>
+Specify a minimum number of cores per socket.
 
 .TP
-\fB\-\-minsockets\fR=\fIn\fR
-Specify a minimum number of sockets (physical processors) per node.
+\fB\-\-mincpus\fR=<\fIn\fR>
+Specify a minimum number of logical cpus/processors per node.
 
 .TP
-\fB\-\-mincores\fR=\fIn\fR
-Specify a minimum number of cores per socket.
+\fB\-\-minsockets\fR=<\fIn\fR>
+Specify a minimum number of sockets (physical processors) per node.
 
 .TP
-\fB\-\-minthreads\fR=\fIn\fR
+\fB\-\-minthreads\fR=<\fIn\fR>
 Specify a minimum number of threads per core.
 
 .TP
-\fB\-\-msg\-timeout\fR=\fIseconds\fR
+\fB\-\-msg\-timeout\fR=<\fIseconds\fR>
 Modify the job launch message timeout.
 The default value is \fBMessageTimeout\fR in the SLURM configuration file slurm.conf. 
 Changes to this are typically not recommended, but could be useful to diagnose problems.
 
 .TP
-\fB\-\-mpi\fR=\fImpi_type\fR
+\fB\-\-mpi\fR=<\fImpi_type\fR>
 Identify the type of MPI to be used. May result in unique initiation 
 procedures.
 .RS
@@ -595,13 +638,13 @@ arguments for each task. See \fBMULTIPLE PROGRAM CONFIGURATION\fR
 below for details on the configuration file contents. 
 
 .TP
-\fB\-N\fR, \fB\-\-nodes\fR=\fIminnodes\fR[\-\fImaxnodes\fR]
+\fB\-N\fR, \fB\-\-nodes\fR=<\fIminnodes\fR[\-\fImaxnodes\fR]>
 Request that a minimum of \fIminnodes\fR nodes be allocated to this job.
 The scheduler may decide to launch the job on more than \fIminnodes\fR nodes.
 A limit on the maximum node count may be specified with \fImaxnodes\fR
 (e.g. "\-\-nodes=2\-4").  The minimum and maximum node count may be the
 same to specify a specific number of nodes (e.g. "\-\-nodes=2\-2" will ask
-for two and ONLY two nodes).  
+for two and ONLY two nodes).
 The partition's node limits supersede those of the job. 
 If a job's node limits are outside of the range permitted for its 
 associated partition, the job will be left in a PENDING state. 
@@ -619,7 +662,7 @@ The job will be allocated as many nodes as possible within the range specified
 and without delaying the initiation of the job.
 
 .TP
-\fB\-n\fR, \fB\-\-ntasks\fR=\fIntasks\fR
+\fB\-n\fR, \fB\-\-ntasks\fR=<\fInumber\fR>
 Specify the number of tasks to run. Request that \fBsrun\fR
 allocate resources for \fIntasks\fR tasks.  
 The default is one task per socket or core (depending upon the value 
@@ -627,53 +670,55 @@ of the \fISelectTypeParameters\fR parameter in slurm.conf), but note
 that the \fB\-\-cpus\-per\-task\fR option will change this default.
 
 .TP
-\fB\-\-network\fR=\fItype\fR
+\fB\-\-network\fR=<\fItype\fR>
 Specify the communication protocol to be used. 
 This option is supported on AIX systems.
 Since POE is used to launch tasks, this option is not normally used or 
 is specified using the \fBSLURM_NETWORK\fR environment variable.
-The interpretation of \fItype\fR is system dependent. 
+The interpretation of \fItype\fR is system dependent.
 For systems with an IBM Federation switch, the following 
 comma\-separated and case insensitive types are recognized:
 \fBIP\fR (the default is user\-space), \fBSN_ALL\fR, \fBSN_SINGLE\fR, 
 \fBBULK_XFER\fR and adapter names  (e.g. \fBSNI0\fR and \fBSNI1\fR). 
-For more information, on IBM systems see \fIpoe\fR documentation on 
+For more information, on IBM systems see \fIpoe\fR documentation on
 the environment variables \fBMP_EUIDEVICE\fR and \fBMP_USE_BULK_XFER\fR.
 Note that only four jobs steps may be active at once on a node with the 
 \fBBULK_XFER\fR option due to limitations in the Federation switch driver.
 
 .TP
-\fB\-\-nice\fR[=\fIadjustment]\fR
-Run the job with an adjusted scheduling priority within SLURM. 
-With no adjustment value the scheduling priority is decreased 
+\fB\-\-nice\fR[=\fIadjustment\fR]
+Run the job with an adjusted scheduling priority within SLURM.
+With no adjustment value the scheduling priority is decreased
 by 100. The adjustment range is from \-10000 (highest priority)
-to 10000 (lowest priority). Only privileged users can specify 
-a negative adjustment. NOTE: This option is presently 
+to 10000 (lowest priority). Only privileged users can specify
+a negative adjustment. NOTE: This option is presently
 ignored if \fISchedulerType=sched/wiki\fR or 
 \fISchedulerType=sched/wiki2\fR.
 
 .TP
-\fB\-\-ntasks\-per\-core\fR=\fIntasks\fR
+\fB\-\-ntasks\-per\-core\fR=<\fIntasks\fR>
 Request that no more than \fIntasks\fR be invoked on each core.
 Similar to \fB\-\-ntasks\-per\-node\fR except at the core level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific core unless \fB\-\-cpu_bind=none\fR
 is specified.
-NOTE: This option is not supported unless \fISelectType=CR_Core\fR
-or \fISelectType=CR_Core_Memory\fR is configured.
+NOTE: This option is not supported unless
+\fISelectTypeParameters=CR_Core\fR or
+\fISelectTypeParameters=CR_Core_Memory\fR is configured.
 
 .TP
-\fB\-\-ntasks\-per\-socket\fR=\fIntasks\fR
+\fB\-\-ntasks\-per\-socket\fR=<\fIntasks\fR>
 Request that no more than \fIntasks\fR be invoked on each socket.
 Similar to \fB\-\-ntasks\-per\-node\fR except at the socket level
 instead of the node level.  Masks will automatically be generated
 to bind the tasks to specific sockets unless \fB\-\-cpu_bind=none\fR
 is specified.
-NOTE: This option is not supported unless \fISelectType=CR_Socket\fR 
-or \fISelectType=CR_Socket_Memory\fR is configured.
+NOTE: This option is not supported unless
+\fISelectTypeParameters=CR_Socket\fR or
+\fISelectTypeParameters=CR_Socket_Memory\fR is configured.
 
 .TP
-\fB\-\-ntasks\-per\-node\fR=\fIntasks\fR
+\fB\-\-ntasks\-per\-node\fR=<\fIntasks\fR>
 Request that no more than \fIntasks\fR be invoked on each node.
 This is similar to using \fB\-\-cpus\-per\-task\fR=\fIncpus\fR
 but does not require knowledge of the actual number of cpus on
@@ -683,20 +728,18 @@ on each node.  Examples of this include submitting
 a hybrid MPI/OpenMP app where only one MPI "task/rank" should be
 assigned to each node while allowing the OpenMP portion to utilize
 all of the parallelism present in the node, or submitting a single
-setup/cleanup/monitoring job to each node of a pre\-existing  
+setup/cleanup/monitoring job to each node of a pre\-existing
 allocation as one step in a larger job script.
 
 .TP
 \fB\-O\fR, \fB\-\-overcommit\fR
-overcommit resources. Normally,
-.B srun
-will not allocate more than one process per cpu. By specifying
-\fB\-\-overcommit\fR you are explicitly allowing more than one process
-per cpu. However no more than \fBMAX_TASKS_PER_NODE\fR tasks are 
-permitted to execute per node.
+Overcommit resources.  Normally, \fBsrun\fR will allocate one task
+per processor.  By specifying \fB\-\-overcommit\fR you are explicitly
+allowing more than one task per processor.  However no more than
+\fBMAX_TASKS_PER_NODE\fR tasks are permitted to execute per node.
 
 .TP
-\fB\-o\fR, \fB\-\-output\fR=\fImode\fR
+\fB\-o\fR, \fB\-\-output\fR=<\fImode\fR>
 Specify the mode for stdout redirection. By default in interactive mode,
 .B srun
 collects stdout from all tasks and line buffers this output to
@@ -710,13 +753,13 @@ If \fB\-\-error\fR is not also specified on the command line, both
 stdout and stderr will directed to the file specified by \fB\-\-output\fR.
 
 .TP
-\fB\-\-open\-mode\fR=append|truncate
+\fB\-\-open\-mode\fR=<\fIappend|truncate\fR>
 Open the output and error files using append or truncate mode as specified.
 The default value is specified by the system configuration parameter
 \fIJobFileAppend\fR.
 
-.TP 
-\fB\-P\fR, \fB\-\-dependency\fR[=]<\fIdependency_list\fR>
+.TP
+\fB\-P\fR, \fB\-\-dependency\fR=<\fIdependency_list\fR>
 Defer the start of this job until the specified dependencies have been
 satisfied completed.
 <\fIdependency_list\fR> is of the form 
@@ -748,13 +791,13 @@ job name and user have terminated.
 .RE
 
 .TP
-\fB\-p\fR, \fB\-\-partition\fR=\fIpartition\fR
-Request resources from partition "\fIpartition\fR." Partitions
-are created by the slurm administrator, who also identify one 
-of those partitions as the default. 
+\fB\-p\fR, \fB\-\-partition\fR=<\fIpartition name\fR>
+Request a specific partition for the resource allocation.  If not specified,
+the default behaviour is to allow the slurm controller to select the default
+partition as designated by the system administrator.
 
 .TP
-\fB\-\-prolog\fR=\fIexecutable\fR
+\fB\-\-prolog\fR=<\fIexecutable\fR>
 \fBsrun\fR will run \fIexecutable\fR just before launching the job step.
 The command line arguments for \fIexecutable\fR will be the command
 and arguments of the job step.  If \fIexecutable\fR is "none", then
@@ -815,8 +858,7 @@ Not currently supported on AIX platforms.
 
 .TP
 \fB\-Q\fR, \fB\-\-quiet\fR
-Quiet operation. Suppress informational messages. Errors will still
-be displayed.
+Suppress informational messages from srun. Errors will still be displayed.
 
 .TP
 \fB\-q\fR, \fB\-\-quit\-on\-interrupt\fR
@@ -826,7 +868,7 @@ a single Ctrl\-C and causes \fBsrun\fR to instead immediately terminate the
 running job. 
 
 .TP
-\fB\-r\fR, \fB\-\-relative\fR=\fIn\fR
+\fB\-r\fR, \fB\-\-relative\fR=<\fIn\fR>
 Run a job step relative to node \fIn\fR of the current allocation. 
 This option may be used to spread several job steps out among the
 nodes of the current job. If \fB\-r\fR is used, the current job
@@ -834,18 +876,32 @@ step will begin at node \fIn\fR of the allocated nodelist, where
 the first node is considered node 0.  The \fB\-r\fR option is not 
 permitted along with \fB\-w\fR or \fB\-x\fR, and will be silently
 ignored when not running within a prior allocation (i.e. when
-SLURM_JOBID is not set). The default for \fIn\fR is 0. If the 
+SLURM_JOB_ID is not set). The default for \fIn\fR is 0. If the 
 value of \fB\-\-nodes\fR exceeds the number of nodes identified 
 with the \fB\-\-relative\fR option, a warning message will be 
 printed and the \fB\-\-relative\fR option will take precedence.
 
+.TP
+\fB\-\-resv-ports\fR
+Reserve communication ports for this job.
+Used for OpenMPI.
+
+.TP
+\fB\-\-reservation\fR=<\fIname\fR>
+Allocate resources for the job from the named reservation.
+
+.TP
+\fB\-\-restart\-dir\fR=<\fIdirectory\fR>
+Specifies the directory from which the job or job step's checkpoint should 
+be read (used by the checkpoint/blcrm and checkpoint/xlch plugins only).
+
 .TP
 \fB\-s\fR, \fB\-\-share\fR
 The job can share nodes with other running jobs. This may result in faster job 
 initiation and higher system utilization, but lower application performance.
 
 .TP
-\fB\-T\fR, \fB\-\-threads\fR=\fInthreads\fR
+\fB\-T\fR, \fB\-\-threads\fR=<\fInthreads\fR>
 Request that \fBsrun\fR
 use \fInthreads\fR to initiate and control the parallel job. The 
 default value is the smaller of 60 or the number of nodes allocated.
@@ -853,28 +909,28 @@ This should only be used to set a low thread count for testing on
 very small memory computers.
 
 .TP
-\fB\-t\fR, \fB\-\-time\fR=\fItime\fR
-Establish a time limit to terminate the job after the specified period of
-time. If the job's time limit exceeds the partition's time limit, the 
-job will be left in a PENDING state. The default value is the partition's 
-time limit. When the time limit is reached, the job's processes are sent 
-SIGTERM followed by SIGKILL. The interval between signals is specified by 
-the SLURM configuration parameter \fBKillWait\fR. A time limit of 0 minutes
-indicates that an infinite timelimit should be used. 
-Acceptable time formats include "minutes", "minutes:seconds", 
-"hours:minutes:seconds", "days\-hours", "days\-hours:minutes" and 
-"days\-hours:minutes:seconds".
+\fB\-t\fR, \fB\-\-time\fR=<\fItime\fR>
+Set a limit on the total run time of the job step.  If the
+requested time limit exceeds the partition's time limit, the job will
+be left in a PENDING state (possibly indefinitely).  The default time
+limit is the partition's time limit.  When the time limit is reached,
+all of the job's tasks are sent SIGTERM followed by SIGKILL.  The
+interval between signals is specified by the SLURM configuration
+parameter \fBKillWait\fR.  A time limit of zero requests that no time
+limit be imposed.  Acceptable time formats include "minutes",
+"minutes:seconds", "hours:minutes:seconds", "days\-hours",
+"days\-hours:minutes" and "days\-hours:minutes:seconds".
 
 .TP
-\fB\-\-task\-epilog\fR=\fIexecutable\fR
+\fB\-\-task\-epilog\fR=<\fIexecutable\fR>
 The \fBslurmd\fR daemon will run \fIexecutable\fR just after each task
-terminates. This will be before after any TaskEpilog parameter      
+terminates. This will be before after any TaskEpilog parameter
 in slurm.conf is executed. This is meant to be a very short\-lived 
 program. If it fails to terminate within a few seconds, it will 
 be killed along with any descendant processes.
 
 .TP
-\fB\-\-task\-prolog\fR=\fIexecutable\fR
+\fB\-\-task\-prolog\fR=<\fIexecutable\fR>
 The \fBslurmd\fR daemon will run \fIexecutable\fR just before launching 
 each task. This will be executed after any TaskProlog parameter 
 in slurm.conf is executed.
@@ -885,23 +941,27 @@ Standard output from this program of the form
 for the task being spawned.
 
 .TP
-\fB\-\-tmp\fR=\fIMB\fR
+\fB\-\-tmp\fR=<\fIMB\fR>
 Specify a minimum amount of temporary disk space.
 
 .TP
-\fB\-U\fR, \fB\-\-account\fR=\fIaccount\fR
+\fB\-U\fR, \fB\-\-account\fR=<\fIaccount\fR>
 Change resource use by this job to specified account.
-The \fIaccount\fR is an arbitrary string. The may 
-be changed after job submission using the \fBscontrol\fR 
+The \fIaccount\fR is an arbitrary string. The account name may
+be changed after job submission using the \fBscontrol\fR
 command.
 
 .TP
 \fB\-u\fR, \fB\-\-unbuffered\fR
-do not line buffer stdout from remote tasks. This option cannot be used
-with \fI\-\-label\fR. 
+Do not line buffer stdout from remote tasks. This option cannot be used
+with \fI\-\-label\fR.
 
 .TP
-\fB\-\-uid\fR=\fIuser\fR
+\fB\-\-usage\fR
+Display brief help message and exit.
+
+.TP
+\fB\-\-uid\fR=<\fIuser\fR>
 Attempt to submit and/or run a job as \fIuser\fR instead of the
 invoking user id. The invoking user's credentials will be used
 to check access permissions for the target partition. User root
@@ -910,21 +970,18 @@ partition for example. If run as root, \fBsrun\fR will drop
 its permissions to the uid specified after node allocation is
 successful. \fIuser\fR may be the user name or numerical user ID.
 
-.TP
-\fB\-\-usage\fR
-Display brief help message and exit.
-
 .TP
 \fB\-V\fR, \fB\-\-version\fR
 Display version information and exit.
 
 .TP
 \fB\-v\fR, \fB\-\-verbose\fR
-Print detailed event logging. Multiple \fB\-v\fR's will further increase 
-the verbosity of logging. By default only errors will be displayed.
+Increase the verbosity of srun's informational messages.  Multiple
+\fB\-v\fR's will further increase srun's verbosity.  By default only
+errors will be displayed.
 
 .TP
-\fB\-W\fR, \fB\-\-wait\fR=\fIseconds\fR
+\fB\-W\fR, \fB\-\-wait\fR=<\fIseconds\fR>
 Specify how long to wait after the first task terminates before terminating
 all remaining tasks. A value of 0 indicates an unlimited wait (a warning will
 be issued after 60 seconds). The default value is set by the WaitTime
@@ -933,7 +990,7 @@ option can be useful to insure that a job is terminated in a timely fashion
 in the event that one or more tasks terminate prematurely.
 
 .TP
-\fB\-w\fR, \fB\-\-nodelist\fR=\fIhost1,host2,...\fR or \fIfilename\fR
+\fB\-w\fR, \fB\-\-nodelist\fR=<\fIhost1,host2,...\fR or \fIfilename\fR>
 Request a specific list of hosts. The job will contain \fIat least\fR
 these hosts. The list may be specified as a comma\-separated list of
 hosts, a range of hosts (host[1\-5,7,...] for example), or a filename.
@@ -943,9 +1000,9 @@ than 2 hosts in the file only the first 2 nodes will be used in the
 request list.
 
 .TP
-\fB\-\-wckey\fR=\fIwckey\fR
+\fB\-\-wckey\fR=<\fIwckey\fR>
 Specify wckey to be used with job.  If TrackWCKey=no (default) in the
-slurm.conf this value does not get looked at. 
+slurm.conf this value is ignored.
 
 .TP
 \fB\-X\fR, \fB\-\-disable\-status\fR
@@ -956,27 +1013,27 @@ A second Ctrl\-C in one second will forcibly terminate the job and
 variable SLURM_DISABLE_STATUS.
 
 .TP
-\fB\-x\fR, \fB\-\-exclude\fR=\fIhost1,host2,...\fR or \fIfilename\fR
-Request that a specific list of hosts not be included in the resources 
-allocated to this job. The host list will be assumed to be a filename 
+\fB\-x\fR, \fB\-\-exclude\fR=<\fIhost1,host2,...\fR or \fIfilename\fR>
+Request that a specific list of hosts not be included in the resources
+allocated to this job. The host list will be assumed to be a filename
 if it contains a "/"character.
 
 .PP
-The following options support Blue Gene systems, but may be 
+The following options support Blue Gene systems, but may be
 applicable to other systems as well.
 
 .TP
-\fB\-\-blrts\-image\fR=\fIpath\fR
+\fB\-\-blrts\-image\fR=<\fIpath\fR>
 Path to blrts image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-cnload\-image\fR=\fIpath\fR
+\fB\-\-cnload\-image\fR=<\fIpath\fR>
 Path to compute node image for bluegene block.  BGP only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-conn\-type\fR=\fItype\fR
+\fB\-\-conn\-type\fR=<\fItype\fR>
 Require the partition connection type to be of a certain type.  
 On Blue Gene the acceptable of \fItype\fR are MESH, TORUS and NAV.  
 If NAV, or if not set, then SLURM will try to fit a TORUS else MESH.
@@ -987,7 +1044,7 @@ midplane and below).  You can use HTC_S for SMP, HTC_D for Dual, HTC_V
 for virtual node mode, and HTC_L for Linux mode.
 
 .TP
-\fB\-g\fR, \fB\-\-geometry\fR=\fIXxYxZ\fR
+\fB\-g\fR, \fB\-\-geometry\fR=<\fIXxYxZ\fR>
 Specify the geometry requirements for the job. The three numbers 
 represent the required geometry giving dimensions in the X, Y and 
 Z directions. For example "\-\-geometry=2x3x4", specifies a block 
@@ -995,17 +1052,17 @@ of nodes having 2 x 3 x 4 = 24 nodes (actually base partitions on
 Blue Gene).
 
 .TP
-\fB\-\-ioload\-image\fR=\fIpath\fR
+\fB\-\-ioload\-image\fR=<\fIpath\fR>
 Path to io image for bluegene block.  BGP only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-linux\-image\fR=\fIpath\fR
+\fB\-\-linux\-image\fR=<\fIpath\fR>
 Path to linux image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
 .TP
-\fB\-\-mloader\-image\fR=\fIpath\fR
+\fB\-\-mloader\-image\fR=<\fIpath\fR>
 Path to mloader image for bluegene block.
 Default from \fIblugene.conf\fR if not set.
 
@@ -1016,7 +1073,7 @@ appropriate partition.
 By default the specified geometry can rotate in three dimensions.
 
 .TP
-\fB\-\-ramdisk\-image\fR=\fIpath\fR
+\fB\-\-ramdisk\-image\fR=<\fIpath\fR>
 Path to ramdisk image for bluegene block.  BGL only.
 Default from \fIblugene.conf\fR if not set.
 
@@ -1037,7 +1094,7 @@ will terminate if resources are not immediately available.
 When initiating remote processes
 .B srun
 will propagate the current working directory, unless
-\fB\-\-chdir\fR=\fIpath\fR is specified, in which case \fIpath\fR will
+\fB\-\-chdir\fR=<\fIpath\fR> is specified, in which case \fIpath\fR will
 become the working directory for the remote processes.
 .PP
 The \fB\-n\fB, \fB\-c\fR, and \fB\-N\fR options control how CPUs  and
@@ -1158,7 +1215,7 @@ job128\-00.out, job128\-01.out, ...
 .RS -10
 .PP
 
-.SH "ENVIRONMENT VARIABLES"
+.SH "INPUT ENVIRONMENT VARIABLES"
 .PP
 Some srun options may be set via environment variables. 
 These environment variables, along with their corresponding options, 
@@ -1203,22 +1260,28 @@ may be required.
 The location of the SLURM configuration file.
 .TP
 \fBSLURM_ACCOUNT\fR
-Same as \fB\-U, \-\-account\fR=\fIaccount\fR
+Same as \fB\-U, \-\-account\fR
 .TP
-\fBSLURM_CHECKPOINT\fR
-Same as \fB\-\-checkpoint\fR=\fItime\fR
+\fBSLURM_ACCTG_FREQ\fR
+Same as \fB\-\-acctg\-freq\fR
 .TP
-\fBSLURM_CPU_BIND\fR
-Same as \fB\-\-cpu_bind\fR=\fItype\fR
+\fBSLURM_CHECKPOINT\fR
+Same as \fB\-\-checkpoint\fR
 .TP
-\fBSLURM_CPUS_PER_TASK\fR
-Same as \fB\-c, \-\-ncpus\-per\-task\fR=\fIn\fR
+\fBSLURM_CHECKPOINT_DIR\fR
+Same as \fB\-\-checkpoint\-dir\fR
 .TP
 \fBSLURM_CONN_TYPE\fR
-Same as \fB\-\-conn\-type\fR=(\fImesh|nav|torus\fR)
+Same as \fB\-\-conn\-type\fR
 .TP
 \fBSLURM_CORE_FORMAT\fR
-Same as \fB\-\-core\fR=\fIformat\fR
+Same as \fB\-\-core\fR
+.TP
+\fBSLURM_CPU_BIND\fR
+Same as \fB\-\-cpu_bind\fR
+.TP
+\fBSLURM_CPUS_PER_TASK\fR
+Same as \fB\-c, \-\-ncpus\-per\-task\fR
 .TP
 \fBSLURM_DEBUG\fR
 Same as \fB\-v, \-\-verbose\fR
@@ -1227,7 +1290,7 @@ Same as \fB\-v, \-\-verbose\fR
 Same as \fB\-d, \-\-slurmd\-debug\fR
 .TP
 \fBSLURM_DEPENDENCY\fR
-\fB\-P, \-\-dependency\fR=\fIjobid\fR
+\fB\-P, \-\-dependency\fR=<\fIjobid\fR>
 .TP
 \fBSLURM_DISABLE_STATUS\fR
 Same as \fB\-X, \-\-disable\-status\fR
@@ -1236,48 +1299,48 @@ Same as \fB\-X, \-\-disable\-status\fR
 Same as \fB\-m plane\fR
 .TP
 \fBSLURM_DISTRIBUTION\fR
-Same as \fB\-m, \-\-distribution\fR=(\fIblock|cyclic|arbitrary\fR)
+Same as \fB\-m, \-\-distribution\fR
 .TP
 \fBSLURM_EPILOG\fR
-Same as \fB\-\-epilog\fR=\fIexecutable\fR
+Same as \fB\-\-epilog\fR
 .TP
 \fBSLURM_EXCLUSIVE\fR
 Same as \fB\-\-exclusive\fR
 .TP
 \fBSLURM_GEOMETRY\fR
-Same as \fB\-g, \-\-geometry\fR=\fIX,Y,Z\fR
+Same as \fB\-g, \-\-geometry\fR
 .TP
 \fBSLURM_JOB_NAME\fR
-Same as \fB\-J, \-\-job\-name\fR=\fIjobname\fR except within an existing 
+Same as \fB\-J, \-\-job\-name\fR except within an existing
 allocation, in which case it is ignored to avoid using the batch job's name
-as the name of each job step. 
+as the name of each job step.
 .TP
 \fBSLURM_LABELIO\fR
 Same as \fB\-l, \-\-label\fR
 .TP
 \fBSLURM_MEM_BIND\fR
-Same as \fB\-\-mem_bind\fR=\fItype\fR
+Same as \fB\-\-mem_bind\fR
 .TP
 \fBSLURM_NETWORK\fR
-Same as \fB\-\-network\fR=\fItype\fR
+Same as \fB\-\-network\fR
 .TP
 \fBSLURM_NNODES\fR
-Same as \fB\-N, \-\-nodes\fR=(\fIn|min\-max\fR)
+Same as \fB\-N, \-\-nodes\fR
 .TP
-\fBSLURN_NTASKS_PER_CORE\fR
+\fBSLURM_NTASKS_PER_CORE\fR
 Same as \fB\-\-ntasks\-per\-core\fR
 .TP
-\fBSLURN_NTASKS_PER_NODE\fR
-Same as \fB\-\-ntasks\-per\-node\fRa
+\fBSLURM_NTASKS_PER_NODE\fR
+Same as \fB\-\-ntasks\-per\-node\fR
 .TP
-\fBSLURN_NTASKS_PER_SOCKET\fR
-Same as \fB\-\-ntasks\-per\-socket\fRa
+\fBSLURM_NTASKS_PER_SOCKET\fR
+Same as \fB\-\-ntasks\-per\-socket\fR
 .TP
 \fBSLURM_NO_ROTATE\fR
-Same as \fB\-\-no\-rotate\fR
+Same as \fB\-R, \-\-no\-rotate\fR
 .TP
 \fBSLURM_NPROCS\fR
-Same as \fB\-n, \-\-ntasks\fR=\fIn\fR
+Same as \fB\-n, \-\-ntasks\fR
 .TP
 \fBSLURM_OPEN_MODE\fR
 Same as \fB\-\-open\-mode\fR
@@ -1286,54 +1349,62 @@ Same as \fB\-\-open\-mode\fR
 Same as \fB\-O, \-\-overcommit\fR
 .TP
 \fBSLURM_PARTITION\fR
-Same as \fB\-p, \-\-partition\fR=\fIpartition\fR
+Same as \fB\-p, \-\-partition\fR
 .TP
 \fBSLURM_PROLOG\fR
-Same as \fB\-\-prolog\fR=\fIexecutable\fR
+Same as \fB\-\-prolog\fR
 .TP
 \fBSLURM_REMOTE_CWD\fR
-Same as \fB\-D, \-\-chdir=\fR=\fIdir\fR
+Same as \fB\-D, \-\-chdir=\fR
 .TP
-\fBSLURM_SRUN_COMM_IFHN\fR
-Same as \fB\-\-ctrl\-comm\-ifhn\fR=\fIaddr\fR
+\fBSLURM_RESTART_DIR\fR
+Same as \fB\-\-restart-dir\fR
 .TP
 \fBSLURM_STDERRMODE\fR
-Same as \fB\-e, \-\-error\fR=\fImode\fR
+Same as \fB\-e, \-\-error\fR
 .TP
 \fBSLURM_STDINMODE\fR
-Same as \fB\-i, \-\-input\fR=\fImode\fR
+Same as \fB\-i, \-\-input\fR
 .TP
 \fBSLURM_STDOUTMODE\fR
-Same as \fB\-o, \-\-output\fR=\fImode\fR
+Same as \fB\-o, \-\-output\fR
 .TP
 \fBSLURM_TASK_EPILOG\fR
-Same as \fB\-\-task\-epilog\fR=\fIexecutable\fR
-.TP
-\fBSLURM_TASK_MEM\fR
-Same as \fB\-\-task\-mem\fR
+Same as \fB\-\-task\-epilog\fR
 .TP
 \fBSLURM_TASK_PROLOG\fR
-Same as \fB\-\-task\-prolog\fR=\fIexecutable\fR
+Same as \fB\-\-task\-prolog
 .TP
 \fBSLURM_THREADS\fR
 Same as \fB\-T, \-\-threads\fR
 .TP
 \fBSLURM_TIMELIMIT\fR
-Same as \fB\-t, \-\-time\fR=\fIminutes\fR
+Same as \fB\-t, \-\-time\fR
 .TP
 \fBSLURM_UNBUFFEREDIO\fR
 Same as \fB\-u, \-\-unbuffered\fR
 .TP
 \fBSLURM_WAIT\fR
-Same as \fB\-W, \-\-wait\fR=\fIseconds\fR
+Same as \fB\-W, \-\-wait\fR
 .TP
 \fBSLURM_WORKING_DIR\fR
-\fB\-D, \-\-chdir\fR=\fIpath\fR
+\fB\-D, \-\-chdir\fR
+
+.SH "OUTPUT ENVIRONMENT VARIABLES"
 .PP
-Additionally, srun will set some environment variables in the environment 
+srun will set some environment variables in the environment 
 of the executing tasks on the remote compute nodes. 
 These environment variables are:
 
+.TP 22
+\fBBASIL_RESERVATION_ID\fR
+The reservation ID on Cray systems running ALPS/BASIL only.
+
+.TP
+\fBSLURM_CHECKPOINT_IMAGE_DIR\fR
+Directory into which checkpoint images should be written
+if specified on the execute line.
+
 .TP
 \fBSLURM_CPU_BIND_VERBOSE\fR
 \-\-cpu_bind verbosity (quiet,verbose).
@@ -1361,7 +1432,7 @@ Zero origin and comma separated.
 \fBSLURM_JOB_DEPENDENCY\fR
 Set to value of the \-\-dependency option.
 .TP
-\fBSLURM_JOBID\fR
+\fBSLURM_JOB_ID\fR (and \fBSLURM_JOBID\fR for backwards compatibility)
 Job id of the executing job
 
 .TP
@@ -1409,12 +1480,12 @@ The step ID of the current job
 The process ID of the task being started.
 .TP
 \fBSLURM_TASKS_PER_NODE\fR
-Number of tasks to be initiated on each node. Values are 
+Number of tasks to be initiated on each node. Values are
 comma separated and in the same order as SLURM_NODELIST.
-If two or more consecutive nodes are to have the same task 
-count, that count is followed by "(x#)" where "#" is the 
+If two or more consecutive nodes are to have the same task
+count, that count is followed by "(x#)" where "#" is the
 repetition count. For example, "SLURM_TASKS_PER_NODE=2(x3),1"
-indicates that the first three nodes will each execute three 
+indicates that the first three nodes will each execute three
 tasks and the fourth node will execute one task.
 .TP
 \fBSLURM_UMASK\fR
@@ -1466,7 +1537,7 @@ These tasks initiated outside of SLURM's monitoring
 or control. SLURM's epilog should be configured to purge
 these tasks when the job's allocation is relinquished.
 
-See \fIhttps://computing.llnl.gov/linux/slurm/quickstart.html#mpi\fR 
+See \fIhttps://computing.llnl.gov/linux/slurm/mpi_guide.html\fR 
 for more information on use of these various MPI implementation 
 with SLURM.
 
@@ -1602,7 +1673,7 @@ the script to be executed follow.
 
 > cat test.sh
 #!/bin/sh
-MACHINEFILE="nodes.$SLURM_JOBID"
+MACHINEFILE="nodes.$SLURM_JOB_ID"
 
 # Generate Machinefile for mpich such that hosts are in the same
 #  order as if run via srun
@@ -1670,9 +1741,9 @@ wait
 
 .SH "COPYING"
 Copyright (C) 2006\-2007 The Regents of the University of California.
-Copyright (C) 2008 Lawrence Livermore National Security.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/srun_cr.1 b/doc/man/man1/srun_cr.1
new file mode 100644
index 0000000000000000000000000000000000000000..6e67a44d8cb784ac323eef9f2ee55b780f671cb6
--- /dev/null
+++ b/doc/man/man1/srun_cr.1
@@ -0,0 +1,73 @@
+.TH SRUN_CR "1" "March 2009" "srun_cr 2.0" "slurm components"
+
+.SH "NAME"
+srun_cr \- run parallel jobs with checkpoint/restart support
+
+.SH SYNOPSIS
+\fBsrun_cr\fR [\fIOPTIONS\fR...]
+
+.SH DESCRIPTION
+The design of \fBsrun_cr\fR is inspired by \fBmpiexec_cr\fR from MVAPICH2 and
+\fBcr_restart\fR form BLCR.
+It is a wrapper around the \fBsrun\fR command to enable batch job 
+checkpoint/restart support when used with SLURM's \fBcheckpoint/blcr\fR plugin.
+
+.SH "OPTIONS"
+
+The \fBsrun_cr\fR execute line options are identical to those of the \fBsrun\fR
+command.
+See "man srun" for details.
+
+.SH "DETAILS"
+After initialization, \fBsrun_cr\fR registers a thread context callback
+function.
+Then it forks a process and executes "cr_run --omit srun" with its arguments.
+\fBcr_run\fR is employed to exclude the \fBsrun\fR process from being dumped 
+upon checkpoint.
+All catchable signals except SIGCHLD sent to \fBsrun_cr\fR will be forwarded 
+to the child \fBsrun\fR process.
+SIGCHLD will be captured to mimic the exit status of \fBsrun\fR when it exits.
+Then \fBsrun_cr\fR loops waiting for termination of tasks being launched 
+from \fBsrun\fR.
+
+The step launch logic of SLURM is augmented to check if \fBsrun\fR is running
+under \fBsrun_cr\fR.
+If true, the environment variable \fBSURN_SRUN_CR_SOCKET\fR should be present,
+the value of which is the address of a Unix domain socket created and listened
+to be \fBsrun_cr\fR.
+After launching the tasks, \fBsrun\fR tires to connect to the socket and sends
+the job ID, step ID and the nodes allocated to the step to \fBsrun_cr\fR.
+
+Upon checkpoint, \fRsrun_cr\fR checks to see if the tasks have been launched.
+If not \fRsrun_cr\fR first forwards the checkpoint request to the tasks by 
+calling the SLURM API \fBslurm_checkpoint_tasks()\fR before dumping its process
+context.
+
+Upon restart, \fBsrun_cr\fR checks to see if the tasks have been previously 
+launched and checkpointed. 
+If true, the environment variable \fRSLURM_RESTART_DIR\fR is set to the directory
+of the checkpoint image files of the tasks.
+Then \fBsrun\fR is forked and executed again. 
+The environment variable will be used by the \fBsrun\fR command to restart 
+execution of the tasks from the previous checkpoint.
+
+.SH "COPYING"
+Copyright (C) 2009 National University of Defense Technology, China.
+Produced at National University of Defense Technology, China (cf, DISCLAIMER).
+CODE\-OCEC\-09\-009. All rights reserved.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <https://computing.llnl.gov/linux/slurm/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+
+.SH "SEE ALSO"
+\fBsrun\fR(1)
diff --git a/doc/man/man1/sshare.1 b/doc/man/man1/sshare.1
new file mode 100644
index 0000000000000000000000000000000000000000..2f8a890ba03bb115200fe7cd3a184aed7ebca95d
--- /dev/null
+++ b/doc/man/man1/sshare.1
@@ -0,0 +1,95 @@
+.TH SSHARE "1" "November 2008" "sshare 2.0" "SLURM Commands"
+
+.SH "NAME"
+sshare \- Tool for listing the shares of associations to a cluster.
+
+.SH "SYNOPSIS"
+\fBsshare\fR [\fIOPTIONS\fR...]
+
+.SH "DESCRIPTION"
+\fBsshare\fR is used to view SLURM share information.  This command is
+only viable when running with the priority/multifactor plugin.
+The sshare information is derived from a database with the interface 
+being provided by \fBslurmdbd\fR (SLURM Database daemon) which is
+read in from the slurmctld and used to process the shares available
+to a given association.  sshare provides SLURM share information of
+Account, User, Raw Shares, Normalized Shares, Raw Usage, Normalized
+Usage, Effective Usage, and the Fair-share factor for each association.
+
+
+.SH "OPTIONS"
+
+.TP
+\fB\-A\fR, \fB\-\-accounts=\fR
+Display information for specific accounts (comma separated list).
+
+.TP
+\fB\-a\fR, \fB\-\-all\fR
+Display information for all users.
+
+.TP
+\fB\-h\fR, \fB\-\-noheader\fR
+No header will be added to the beginning of the output.
+
+.TP
+\fB\-l\fR, \fB\-\-long\fR
+Long listing - includes the normalized usage information.
+
+.TP
+\fB\-p\fR, \fB\-\-parsable\fR
+Output will be '|' delimited with a '|' at the end.
+
+.TP
+\fB\-P\fR, \fB\-\-parsable2\fR
+Output will be '|' delimited without a '|' at the end.
+
+.TP
+\fB\-u\fR, \fB\-\-users=\fR
+Display information for specific users (comma separated list).
+
+.TP
+\fB\-v\fR, \fB\-\-verbose\fR
+Display more information about the specified options.
+
+.TP
+\fB\-V\fR, \fB\-\-version\fR
+Display the version number of sshare.
+
+.TP
+\fB\-\-help\fR
+\fB\-\-usage\fR
+Display a description of sshare options and commands.
+
+
+.SH "EXAMPLES"
+.eo
+.br
+> sshare -A <Account>
+.br
+.br
+> sshare --parsable --users=<User>
+.br
+
+.ec
+
+.SH "COPYING"
+Copyright (C) 2008 Lawrence Livermore National Security.
+Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+CODE\-OCEC\-09\-009. All rights reserved.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <https://computing.llnl.gov/linux/slurm/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+
+.SH "SEE ALSO"
+\fBslurm.conf\fR(5)
+\fBslurmdbd\fR(8)
diff --git a/doc/man/man1/sstat.1 b/doc/man/man1/sstat.1
index d8ca661fc7dc1b7e4ac522bb5cf6a0172a3e9f3f..cca62bdbf5e30145db3c2b7ca92c0e3c549827de 100644
--- a/doc/man/man1/sstat.1
+++ b/doc/man/man1/sstat.1
@@ -1,25 +1,24 @@
-.TH SSTAT "1" "May 2008" "sacctmgr 1.3" "Slurm components"
+.TH SSTAT "1" "January 2009" "sstat 2.0" "Slurm components"
 
 .SH "NAME"
-sstat \- Used to query running job and see various usage information
-of job/step running.
+sstat \- Display various status information
+of a running job/step.
 
 .SH "SYNOPSIS"
-.BR "sstat "
-\fBsstat\fR [\fIOPTIONS\fR...] 
+.BR "sstat " 
+[\fIOPTIONS\fR...] 
 
 .SH "DESCRIPTION"
 .PP
-Status information for jobs invoked with SLURM.
+Status information for running jobs invoked with SLURM.
 .PP
 The 
 .BR "sstat "
-command displays job accounting data stored in the job accounting log 
-file in a variety of forms for your analysis.
+command displays job status information for your analysis.
 The 
 .BR "sstat "
-command displays information on jobs, job steps, status, and exitcodes by 
-default.
+command displays information pertaining to CPU, Task, Node, Resident
+Set Size (RSS) and Virtual Memory (VM).
 You can tailor the output with the use of the 
 \f3\-\-fields=\fP 
 option to specify the fields to be shown.
@@ -31,219 +30,155 @@ command displays job status data for any job running on the system.
 For the non\-root user, the 
 .BR "sstat "
 command limits the display of job status data to jobs that were 
-launched with their own user identifier (UID) by default.
-
-.SS "Options"
-.TP "10"
-
-.TP "3"
-\(bu
-\f3jobid\fP 
-.IP 
-and 
-\f3\-\-fields=\fP 
-options.
-.IP 
+launched with their own user identifier (UID).
 
 .TP 
-\f3\-F \fP\f2field_list\fP \f3,\fP  \f3\-\-fields\fP\f3=\fP\f2field_list\fP
-Displays the job status data specified by the 
-\f2field_list\fP 
-operand, which is a comma\-separated list of fields.
-Space characters are not allowed in the 
-\f2field_list\fP\c
-\&. 
-.IP 
-See the 
-\f3\-\-help\-fields\fP 
-option for a list of the available fields.
-See the section titled "Job Status Fields" for a description of 
-each field.
-.IP 
-The job accounting data is displayed in the order specified by the 
-\f2field_list\fP 
-operand.
-Thus, the following two commands display the same data but in different order:
-.RS 
-.PP
-.nf 
-.ft 3
-# sstat \-\-fields=jobid,state
-Jobid    State
-\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\-
-3          COMPLETED
-3.0        COMPLETED
-
-.ft 1
-.fi 
-.RE 
-.RS 
-.PP
-.nf 
-.ft 3
-# sacct \-\-fields=status,jobid
-State     Jobid
-\-\-\-\-\-\-\-\-\-\- \-\-\-\-\-\-\-\-\-\-
-COMPLETED  3
-COMPLETED  3.0
-
-.ft 1
-.fi 
-.RE 
-.IP 
-The default value for the 
-\f2field_list\fP 
-operand is 
-\f3"jobid,vsize,rss,pages,cputime,ntasks,state"\fP\c
-\&.
-.IP 
-This option has no effect when the 
-\f3\-\-dump\fP 
-option is also specified.
+\f3\-a \fP\f3,\fP \f3\-\-allsteps\fP
+Print all steps for the given job(s) when no step is specified.
 
+.TP 
+\f3\-e \fP\f3,\fP \f3\-\-helpformat\fP
+Print a list of fields that can be specified with the '\-\-format' option.
 
 .TP 
 \f3\-h \fP\f3,\fP \f3\-\-help\fP
 Displays a general help message.
+
 .TP 
-\f3\-\-help\-fields\fP
-Displays a list of fields that can be specified with the 
-\f3\-\-fields\fP 
-option.
-.RS 
-.PP
-.nf 
-.ft 3
-Fields available:
-cputime     jobid       ntasks      pages
-rss         state       vsize
-
-.ft 1
-.fi 
-.RE 
-.IP 
-The section titled "Job Accounting Fields" describes these fields.
+\f3\-j \fP\f3,\fP \f3\-\-jobs\fP
+Format is <job(.step)>. Stat this job step or comma-separated list of
+job steps. This option is required.  The step portion will default to
+step 0 if not specified, unless the --allsteps flag is set where not
+specifing a step will result in all running steps to be displayed.
 
 .TP 
-\f3\-j \fP\f2job(.step)\fP \f3,\fP  \f3\-\-jobs\fP\f3=\fP\f2job(.step)\fP
-Displays information about the specified job(.step) or list of job(.step)s.
-.IP 
-The 
-\f2job(.step)\fP 
-parameter is a comma\-separated list of jobs.
-Space characters are not permitted in this list.
-.IP 
-The default is to display information on all jobs.
+\f3\-n \fP\f3,\fP \f3\-\-noheader\fP
+No header will be added to the beginning of output. The default is to print a header.
 
 .TP 
-\f3\-\-noheader\fP
-Prevents the display of the heading over the output.
-The default action is to display a header.
-.IP 
-This option has no effect when used with the 
-\f3\-\-dump\fP 
-option.
-
-\f3\-S \fP\f3,\fP \f3\-\-stat\fP
-.IP
-Queries the status of a job as the job is running displaying
-the following data:
-.RS 
-.TP "3"
-\(bu
-\f3jobid\fP 
-.TP "3"
-\(bu
-\f3vsize\fP 
-.TP "3"
-\(bu
-\f3rss\fP 
-.TP "3"
-\(bu
-\f3pages\fP 
-.TP "3"
-\(bu
-\f3cputime\fP 
-.TP "3"
-\(bu
-\f3ntasks\fP 
-.TP "3"
-\(bu
-\f3status\fP 
-.RE 
-.IP
-You must also include the \-\-jobs=job(.step) option if no (.step) is 
-given you will recieve the job.0 step.
+\f3\-o \fP\f3,\fP \f3\-\-format\fP,\fP \f3\-\-fields\fP
+Comma seperated list of fields. (use
+'\-\-helpformat' for a list of available fields).
+
+.TP 
+\f3\-p \fP\f3,\fP \f3\-\-parsable\fP
+output will be '|' delimited with a '|' at the end.
+
+.TP 
+\f3\-P \fP\f3,\fP \f3\-\-parsable2\fP
+output will be '|' delimited without a '|' at the end
 
 .TP 
 \f3\-\-usage\fP
-Displays a help message.
+Display brief usage message.
 
 .TP 
-\f3\-v \fP\f3,\fP \f3\-\-verbose\fP
-Reports the state of certain variables during processing.
-This option is primarily used for debugging.
+\f3\-v\fP\f3,\fP \f3\-\-verbose\fP
+Primarily for debugging purposes, report the state of various variables during processing.
+
+.TP 
+\f3\-V \fP\f3,\fP \f3\-\-version\fP
+Print version.
+
 
 .SS "Job Status Fields"
-The following describes each job accounting field:
+The following are the field options:
 .RS 
 .TP
-\f3cputime\fP
-Minimum CPU time of any process followed by its task id along with
-the average of all processes running in the step.
+\f3AveCPU\fP
+
 
 .TP 
-\f3jobid\fP 
-The number of the job or job step.
-It is in the form: 
-\f2job.jobstep\fP\c
-\&.
+\f3AvePages\fP 
+
 
 .TP
-\f3ntasks\fP 
-Total number of tasks in job.
+\f3AveRSS\fP 
+
 
 .TP
-\f3pages\fP
-Maximum page faults of any process followed by its task id along with
-the average of all processes running in the step.
+\f3AveVMSize\fP
+
 
 .TP 
-\f3rss\fP 
-Maximum resident set size of any process followed by its task id along with
-the average of all processes running in the step.
+\f3JobID\fP 
+
+
+.TP
+\f3MaxPages\fP
+
+
+.TP
+\f3MaxPagesNode\fP
+
+
+.TP
+\f3MaxPagesTask\fP
+
+
+.TP
+\f3MaxRSS\fP
+
+
+.TP
+\f3MaxRSSNode\fP
+
+
+.TP
+\f3MaxRSSTask\fP
+
+
+.TP
+\f3MaxVMSize\fP
+
+
+.TP
+\f3MaxVMSizeNode\fP
+
+
+.TP
+\f3MaxVMSizeTask\fP
+
+
+.TP
+\f3MinCPU\fP
+
+
+.TP
+\f3MinCPUNode\fP
+
+
+.TP
+\f3MinCPUTask\fP
+
+
+.TP
+\f3NTasks\fP
+
 
 .TP
-\f3state\fP
-Displays the job state.
-.IP 
-Output can be 
-\f3RUNNING\fP\c
-\&, 
-\f3SUSPENDED\fP\c
-\&,
-\f3COMPLETED\fP\c
-\&, 
-\f3CANCELLED\fP\c
-\&, 
-\f3FAILED\fP\c
-\&, 
-\f3TIMEOUT\fP\c
-\&, or 
-\f3NODE_FAIL\fP\c
-\&.
+\f3SystemCPU\fP
+
 
 .TP
-\f3vsize\fP
-Maximum Virtual Memory size of any process followed by its task id along with
-the average of all processes running in the step.
+\f3TotalCPU\fP
+
+
 
 .SH "EXAMPLES"
 
+.TP
+\f3sstat --format=AveCPU,AvePages,AveRSS,AveVSize,JobID -j 11\fP
+25:02.000  0K         1.37M      5.93M      9.0
+
+.TP
+\f3sstat -p --format=AveCPU,AvePages,AveRSS,AveVSize,JobID -j 11\fP
+25:02.000|0K|1.37M|5.93M|9.0|
+
 .SH "COPYING"
-Copyright (C) 2008 Lawrence Livermore National Security.
+Copyright (C) 2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/strigger.1 b/doc/man/man1/strigger.1
index f7f298ad8034a2a4078e0ab4df157909f36cd34e..f3119511e5144d2e63f40f41a680b3c358cfa98c 100644
--- a/doc/man/man1/strigger.1
+++ b/doc/man/man1/strigger.1
@@ -1,4 +1,4 @@
-.TH STRIGGER "1" "May 2008" "strigger 1.3" "Slurm components"
+.TH STRIGGER "1" "May 2008" "strigger 2.0" "Slurm components"
 
 .SH "NAME"
 strigger \- Used set, get or clear Slurm trigger information.
@@ -272,7 +272,7 @@ Execute /home/joe/job_fini upon completion of job 1237.
 .SH "COPYING"
 Copyright (C) 2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man1/sview.1 b/doc/man/man1/sview.1
index a6ca8cf1fb879665d05e56741e0c4c97aa477b9d..5d9fad1ead45d2270afd47e240857a9be9c1de84 100644
--- a/doc/man/man1/sview.1
+++ b/doc/man/man1/sview.1
@@ -1,4 +1,4 @@
-.TH "sview" "1" "SLURM 1.2" "October 2006" "SLURM Commands"
+.TH "sview" "1" "SLURM 2.0" "October 2006" "SLURM Commands"
 .SH "NAME"
 .LP 
 sview \- graphical user interface to view and modify SLURM state.
@@ -49,7 +49,7 @@ the sview command.
 .SH "COPYING"
 Copyright (C) 2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_allocate_resources.3 b/doc/man/man3/slurm_allocate_resources.3
index d157071c98fd34076b2b811300e8d3abef9925b4..6f18756e27031c39fb0d91c0764957af9ad0f242 100644
--- a/doc/man/man3/slurm_allocate_resources.3
+++ b/doc/man/man3/slurm_allocate_resources.3
@@ -418,7 +418,7 @@ which must be linked to your process for use
 .SH "COPYING"
 Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_checkpoint_error.3 b/doc/man/man3/slurm_checkpoint_error.3
index f79cb65ece85f656d12f8399a1863cd2ddd7764c..a99912742cddf2a1ea1f87c92547a28bf48e1672 100644
--- a/doc/man/man3/slurm_checkpoint_error.3
+++ b/doc/man/man3/slurm_checkpoint_error.3
@@ -1,4 +1,4 @@
-.TH "Slurm API" "3" "December 2005" "Morris Jette" "Slurm checkpoint functions"
+.TH "Slurm API" "3" "March 2009" "Morris Jette" "Slurm checkpoint functions"
 
 .SH "NAME"
 slurm_checkpoint_able, slurm_checkpoint_complete, slurm_checkpoint_create,
@@ -40,7 +40,9 @@ int \fBslurm_checkpoint_create\fR (
 .br
 	uint32_t \fIstep_id\fP,
 .br
-	uint16_t \fImax_wait\fP
+	uint16_t \fImax_wait\fP,
+.br
+	char *\fIimage_dir\fP
 .br
 );
 .LP
@@ -77,7 +79,28 @@ int \fBslurm_checkpoint_restart\fR (
 .br
 	uint32_t \fIjob_id\fP,
 .br
-	uint32_t \fIstep_id\fP
+	uint32_t \fIstep_id\fP,
+.br
+	uint16_t \fIstick\fP,
+.br
+	char *\fIimage_dir\fP
+.br
+);
+.LP
+.LP
+int \fBslurm_checkpoint_tasks\fR (
+.br
+	uint32_t \fIjob_id\fP,
+.br
+	uint32_t \fIstep_id\fP,
+.br
+	time_t \fIbegin_time\fP,
+.br
+	char *\fIimage_dir\fP,
+.br
+	uint16_t \fImax_wait\fP,
+.br
+	char *\fInodelist\fP
 .br
 );
 .LP
@@ -87,13 +110,18 @@ int \fBslurm_checkpoint_vacate\fR (
 .br
 	uint32_t \fIstep_id\fP,
 .br
-	uint16_t \fImax_wait\fP
+	uint16_t \fImax_wait\fP,
+.br
+	char *\fIimage_dir\fP
 .br
 );
 
 .SH "ARGUMENTS"
 .LP 
 .TP
+\fIbegin_time\fP
+When to begin the operation.
+.TP
 \fIerror_code\fP
 Error code for checkpoint operation. Only the highest value is preserved.
 .TP
@@ -101,18 +129,30 @@ Error code for checkpoint operation. Only the highest value is preserved.
 Error message for checkpoint operation. Only the \fIerror_msg\fP value for the highest 
 \fIerror_code\fP is preserved.
 .TP
+\fIimage_dir\fP
+Directory specification for where the checkpoint file should be read from or 
+written to. The default value is specified by the \fIJobCheckpointDir\fP
+SLURM configuration parameter.
+.TP
 \fIjob_id\fP
 SLURM job ID to perform the operation upon.
 .TP
 \fImax_wait\fP
 Maximum time to allow for the operation to complete in seconds.
 .TP
+\fInodelist\fP
+Nodes to send the request.
+.TP
 \fIstart_time\fP
 Time at which last checkpoint operation began (if one is in progress), otherwise zero.
 .TP
 \fIstep_id\fP
 SLURM job step ID to perform the operation upon. 
 May be NO_VAL if the operation is to be performed on all steps of the specified job.
+Specify SLURM_BATCH_SCRIPT to checkpoint a batch job.
+.TP
+\fIstick\fP
+If non\-zero then restart the job on the same nodes that it was checkpointed from.
 
 .SH "DESCRIPTION"
 .LP
@@ -217,9 +257,10 @@ which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
 .SH "COPYING"
-Copyright (C) 2004 The Regents of the University of California.
+Copyright (C) 2004\-2007 The Regents of the University of California.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
@@ -236,4 +277,4 @@ details.
 
 .SH "SEE ALSO"
 .LP 
-\fBsrun\fR(1), \fBsqueue\fR(1), \fBfree\fR(3) 
+\fBsrun\fR(1), \fBsqueue\fR(1), \fBfree\fR(3), \fBslurm.conf\fR(5)
diff --git a/doc/man/man3/slurm_checkpoint_tasks.3 b/doc/man/man3/slurm_checkpoint_tasks.3
new file mode 100644
index 0000000000000000000000000000000000000000..32120a6bbb2293314b111f8b985f85a5cb4f7347
--- /dev/null
+++ b/doc/man/man3/slurm_checkpoint_tasks.3
@@ -0,0 +1 @@
+.so man3/slurm_checkpoint_error.3
diff --git a/doc/man/man3/slurm_complete_job.3 b/doc/man/man3/slurm_complete_job.3
index 5c3a1221b74a3f1ae18a1a94eb6c05f0ef65ed9f..882d926c0f822087af3072404945e6c20e786212 100644
--- a/doc/man/man3/slurm_complete_job.3
+++ b/doc/man/man3/slurm_complete_job.3
@@ -1,6 +1,6 @@
-.TH "Slurm API" "3" "September 2003" "Morris Jette" "Slurm job completion calls"
+.TH "Slurm API" "3" "March 2009" "Morris Jette" "Slurm job completion calls"
 .SH "NAME"
-slurm_complete_job, slurm_complete_job_step \- Slurm job completion calls
+slurm_complete_job \- Slurm job completion call
 .SH "SYNTAX"
 .LP 
 #include <slurm/slurm.h>
@@ -9,21 +9,7 @@ int \fBslurm_complete_job\fR (
 .br 
 	uint32_t \fIjob_id\fP, 
 .br
-	uint32_t \fIjob_return_code\fP,
-.br
-	uint32_t \fIsystem_return_code\fP
-.br 
-);
-.LP
-int \fBslurm_complete_job_step\fR (
-.br 
-	uint32_t \fIjob_id\fP,
-.br 
-	uint32_t \fIjob_step_id\fP,
-.br
-	uint32_t \fIjob_return_code\fP,
-.br
-	uint32_t \fIsystem_return_code\fP
+	uint32_t \fIjob_return_code\fP
 .br 
 );
 .SH "ARGUMENTS"
@@ -31,26 +17,20 @@ int \fBslurm_complete_job_step\fR (
 .TP 
 \fIjob_id\fP
 Slurm job id number.
-.TP
-\fIjob_step_id\fp
-Slurm job step id number.
 .TP 
 \fIjob_return_code\fP
-Highest exit code of any task of the user's application. A non\-zero value is considered as an indication of job failure.
-.TP 
-\fIsystem_return_code\fP
-Highest exit code of any system daemon executing the user's application. A non\-zero value is considered as an indication of failure, such as error setting the job's session ID, error creating a log file, etc.
+Exit code of the program executed.
+
 .SH "DESCRIPTION"
 .LP 
 \fBslurm_complete_job\fR Note the termination of a job. This function may only be 
 successfully executed by the job's owner or user root.
-.LP 
-\fBslurm_complete_job_step\fR Note the termination of a job step. This function 
-may only be successfully executed by the job's owner or user root.
+
 .SH "RETURN VALUE"
 .LP
 On success, zero is returned. On error, \-1 is returned, and Slurm error code 
 is set appropriately.
+
 .SH "ERRORS"
 .LP
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your code.
@@ -73,9 +53,10 @@ which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
 .SH "COPYING"
-Copyright (C) 2002 The Regents of the University of California.
+Copyright (C) 2002\-2007 The Regents of the University of California.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_complete_job_step.3 b/doc/man/man3/slurm_complete_job_step.3
deleted file mode 100644
index 117b5dc59184103baefb8f2cf206977dcd59c3e2..0000000000000000000000000000000000000000
--- a/doc/man/man3/slurm_complete_job_step.3
+++ /dev/null
@@ -1 +0,0 @@
-.so man3/slurm_complete_job.3
diff --git a/doc/man/man3/slurm_create_partition.3 b/doc/man/man3/slurm_create_partition.3
new file mode 100644
index 0000000000000000000000000000000000000000..8c2ed98140da9b138eaf0ab3b329016669dac41f
--- /dev/null
+++ b/doc/man/man3/slurm_create_partition.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_create_reservation.3 b/doc/man/man3/slurm_create_reservation.3
new file mode 100644
index 0000000000000000000000000000000000000000..8c2ed98140da9b138eaf0ab3b329016669dac41f
--- /dev/null
+++ b/doc/man/man3/slurm_create_reservation.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_delete_reservation.3 b/doc/man/man3/slurm_delete_reservation.3
new file mode 100644
index 0000000000000000000000000000000000000000..8c2ed98140da9b138eaf0ab3b329016669dac41f
--- /dev/null
+++ b/doc/man/man3/slurm_delete_reservation.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_free_ctl_conf.3 b/doc/man/man3/slurm_free_ctl_conf.3
index 04faaed7a5a8e4e361bfd18a18d1efe7e34e9100..aa9091b80dac74072e6899385899f23da7848e0b 100644
--- a/doc/man/man3/slurm_free_ctl_conf.3
+++ b/doc/man/man3/slurm_free_ctl_conf.3
@@ -148,7 +148,7 @@ which must be linked to your process for use
 .SH "COPYING"
 Copyright (C) 2002\-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_free_job_info_msg.3 b/doc/man/man3/slurm_free_job_info_msg.3
index ff193a185046d5812f926a9fe544d07cabb16c19..e8ec50e9e97012b2d3d45420420c70346e106b9a 100644
--- a/doc/man/man3/slurm_free_job_info_msg.3
+++ b/doc/man/man3/slurm_free_job_info_msg.3
@@ -110,7 +110,7 @@ REM_TIME = ISLURM_GET_REM_TIME(JOBID)
 REM_TIME = ISLURM_GET_REM_TIME2()
 .LP
 ISLURM_GET_REM_TIME2() is equivalent to ISLURM_GET_REM_TIME() except 
-that the JOBID is taken from the SLURM_JOBID environment variable, 
+that the JOBID is taken from the SLURM_JOB_ID environment variable, 
 which is set by SLURM for tasks which it launches.
 Both functions return the number of seconds remaining before the job 
 reaches the end of it's allocated time.
@@ -143,7 +143,7 @@ number into the node information records and the data is terminated with a
 value of \-1. See slurm.h for full details on the data structure's contents. 
 .TP 
 \fIjob_id\fP
-Specifies a slurm job id. If zero, use the SLURM_JOBID environment variable
+Specifies a slurm job id. If zero, use the SLURM_JOB_ID environment variable
 to get the jobid.
 .TP 
 \fIjob_id_ptr\fP
@@ -356,7 +356,7 @@ expressions into a collection of individual node names.
 Copyright (C) 2002\-2006 The Regents of the University of California.
 Copyright (C) 2008 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_free_job_step_info_response_msg.3 b/doc/man/man3/slurm_free_job_step_info_response_msg.3
index cc76f32dfe89708a581389b25241f92af19d95a3..a393e79bf04d1e14746841feeca564ec9aaedbea 100644
--- a/doc/man/man3/slurm_free_job_step_info_response_msg.3
+++ b/doc/man/man3/slurm_free_job_step_info_response_msg.3
@@ -204,7 +204,7 @@ expressions into a collection of individual node names.
 .SH "COPYING"
 Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_free_node_info.3 b/doc/man/man3/slurm_free_node_info.3
index 3ce4424e44fcb1c8e7c71829bb030f64c62908b0..f1a868f8a6feba294a09e15c7b46c9f7c60a9581 100644
--- a/doc/man/man3/slurm_free_node_info.3
+++ b/doc/man/man3/slurm_free_node_info.3
@@ -241,7 +241,7 @@ data, these index values will be invalid.
 .SH "COPYING"
 Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_free_partition_info.3 b/doc/man/man3/slurm_free_partition_info.3
index 37191e03bcd4f7da81195a053fdfd2b95ebe1044..617f7fa311234c2400a41781cabe1f50ba4d41cd 100644
--- a/doc/man/man3/slurm_free_partition_info.3
+++ b/doc/man/man3/slurm_free_partition_info.3
@@ -1,15 +1,15 @@
 .TH "Slurm API" "3" "September 2006" "Morris Jette" "Slurm partition information reporting functions"
 .SH "NAME"
-slurm_free_partition_info, slurm_load_partitions, 
+slurm_free_partition_info_msg, slurm_load_partitions, 
 slurm_print_partition_info, slurm_print_partition_info_msg
-\- Slurm partitioninformation reporting functions
+\- Slurm partition information reporting functions
 .SH "SYNTAX"
 .LP
 #include <stdio.h>
 .br
 #include <slurm/slurm.h>
 .LP
-void \fBslurm_free_partition_info\fR (
+void \fBslurm_free_partition_info_msg\fR (
 .br 
 	partition_info_msg_t *\fIpartition_info_msg_ptr\fP
 .br 
@@ -27,7 +27,7 @@ int \fBslurm_load_partitions\fR (
 .LP 
 void \fBslurm_print_partition_info\fR (
 .br
-	FILE *\fIout_file\fp,
+	FILE *\fIout_file\fP,
 .br
 	partition_info_t *\fIpartition_ptr\fP,
 .br
@@ -37,7 +37,7 @@ void \fBslurm_print_partition_info\fR (
 .LP 
 void \fBslurm_print_partition_info_msg\fR (
 .br
-	FILE *\fIout_file\fp,
+	FILE *\fIout_file\fP,
 .br
 	partition_info_msg_t *\fIpartition_info_msg_ptr\fP,
 .br
@@ -81,7 +81,7 @@ not returned.  Otherwise all the configuration. job, node, or partition records
 are returned.
 .SH "DESCRIPTION"
 .LP 
-\fBslurm_free_partition_info\fR Release the storage generated by the 
+\fBslurm_free_partition_info_msg\fR Release the storage generated by the 
 \fBslurm_load_partitions\fR function.
 .LP 
 \fBslurm_load_partitions\fR Returns a partition_info_msg_t that contains an 
@@ -108,6 +108,8 @@ SLURM controller.
 .LP 
 #include <stdio.h>
 .br
+#include <stdlib.h>
+.br
 #include <slurm/slurm.h>
 .br
 #include <slurm/slurm_errno.h>
@@ -126,7 +128,7 @@ int main (int argc, char *argv[])
 .br
 	if (slurm_load_partitions((time_t)NULL,
 .br
-	                          &part_buffer_ptr, SHOW_ALL)) {
+	                          &part_info_ptr, SHOW_ALL)) {
 .br
 		slurm_perror ("slurm_load_partitions error");
 .br
@@ -138,15 +140,15 @@ int main (int argc, char *argv[])
 .br
 	slurm_print_partition_info_msg (stdout, 
 .br
-	                                part_buffer_ptr);
+	                                part_info_ptr, 0);
 .LP
 	/* A harder way.. */
 .br
-	for (i = 0; i < part_buffer_ptr\->record_count; i++) {
+	for (i = 0; i < part_info_ptr\->record_count; i++) {
 .br
 		part_ptr = &part_info_ptr\->partition_array[i];
 .br
-		slurm_print_partition_info(stdout, part_ptr);
+		slurm_print_partition_info(stdout, part_ptr, 0);
 .br
 	}
 .LP
@@ -154,11 +156,11 @@ int main (int argc, char *argv[])
 .br
 	printf("Partitions updated at %lx, records=%d\\n",
 .br
-	       part_buffer_ptr\->last_update, 
+	       part_info_ptr\->last_update, 
 .br
-	       part_buffer_ptr\->record_count);
+	       part_info_ptr\->record_count);
 .br
-	for (i = 0; i < part_buffer_ptr\->record_count; i++) {
+	for (i = 0; i < part_info_ptr\->record_count; i++) {
 .br
 		printf ("PartitionName=%s Nodes=%s\\n", 
 .br
@@ -168,7 +170,7 @@ int main (int argc, char *argv[])
 .br
 	}
 .LP
-	slurm_free_partition_info (part_buffer_ptr);
+	slurm_free_partition_info_msg (part_info_ptr);
 .br
 	exit (0);
 .br 
@@ -189,7 +191,7 @@ expressions into a collection of individual node names.
 .SH "COPYING"
 Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_free_reservation_info_msg.3 b/doc/man/man3/slurm_free_reservation_info_msg.3
new file mode 100644
index 0000000000000000000000000000000000000000..3c9d6c9bc250988070f2a7b4fad19b2abd63732b
--- /dev/null
+++ b/doc/man/man3/slurm_free_reservation_info_msg.3
@@ -0,0 +1,2 @@
+.so man3/slurm_load_reservations.3
+
diff --git a/doc/man/man3/slurm_get_errno.3 b/doc/man/man3/slurm_get_errno.3
index 51b93cd690f6b8195f804bcbf0aa5313fc65e361..6b03c37abd56dc4d226408b2d074a0af34d0b60e 100644
--- a/doc/man/man3/slurm_get_errno.3
+++ b/doc/man/man3/slurm_get_errno.3
@@ -74,7 +74,7 @@ which must be linked to your process for use
 .SH "COPYING"
 Copyright (C) 2002 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_hostlist_create.3 b/doc/man/man3/slurm_hostlist_create.3
index d08f47feb1e86c75506a4219ac864cf900c18307..5cbb3a64f6aa4a5744dcb33c2e485fadc14a2556 100644
--- a/doc/man/man3/slurm_hostlist_create.3
+++ b/doc/man/man3/slurm_hostlist_create.3
@@ -108,7 +108,7 @@ which must be linked to your process for use
 .SH "COPYING"
 Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_init_resv_desc_msg.3 b/doc/man/man3/slurm_init_resv_desc_msg.3
new file mode 100644
index 0000000000000000000000000000000000000000..8c2ed98140da9b138eaf0ab3b329016669dac41f
--- /dev/null
+++ b/doc/man/man3/slurm_init_resv_desc_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_init_update_node_msg.3 b/doc/man/man3/slurm_init_update_node_msg.3
new file mode 100644
index 0000000000000000000000000000000000000000..8c2ed98140da9b138eaf0ab3b329016669dac41f
--- /dev/null
+++ b/doc/man/man3/slurm_init_update_node_msg.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_job_step_create.3 b/doc/man/man3/slurm_job_step_create.3
index 67d848ba0194d2022544229e6329627f1cede663..af9356b5129ffae7a2979dfa872c262440f40572 100644
--- a/doc/man/man3/slurm_job_step_create.3
+++ b/doc/man/man3/slurm_job_step_create.3
@@ -75,7 +75,7 @@ which must be linked to your process for use
 .SH "COPYING"
 Copyright (C) 2002-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_kill_job.3 b/doc/man/man3/slurm_kill_job.3
index e07d6f7db2ee3253329907624e2eb8f4fc804234..c1695350105c37f709bc0a203b026f8c946bff76 100644
--- a/doc/man/man3/slurm_kill_job.3
+++ b/doc/man/man3/slurm_kill_job.3
@@ -122,7 +122,7 @@ which must be linked to your process for use
 .SH "COPYING"
 Copyright (C) 2002 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_load_reservations.3 b/doc/man/man3/slurm_load_reservations.3
new file mode 100644
index 0000000000000000000000000000000000000000..8aebc9e443d9051da8e2a50d8793e4a7bdc60d63
--- /dev/null
+++ b/doc/man/man3/slurm_load_reservations.3
@@ -0,0 +1,216 @@
+.TH "Slurm API" "3" "January 2009" "David Bremer" "Slurm reservation information reporting functions"
+.SH "NAME"
+slurm_load_reservations, slurm_free_reservation_info_msg, 
+slurm_print_reservation_info, slurm_sprint_reservation_info,
+slurm_print_reservation_info_msg
+\- Slurm reservation information reporting functions
+.SH "SYNTAX"
+.LP
+#include <stdio.h>
+.br
+#include <slurm/slurm.h>
+.LP 
+int \fBslurm_load_reservations\fR (
+.br 
+	time_t \fIupdate_time\fR, 
+.br 
+	reserve_info_msg_t **\fIreservation_info_msg_pptr\fP
+.br 
+ );
+.LP
+void \fBslurm_free_reservation_info_msg\fR (
+.br 
+	reserve_info_msg_t *\fIreservation_info_msg_ptr\fP
+.br 
+);
+.LP 
+void \fBslurm_print_reservation_info\fR (
+.br
+	FILE *\fIout_file\fP, 
+.br
+	reserve_info_t *\fIreservation_ptr\fP,
+.br
+	int \fIone_liner\fP
+.br 
+);
+.LP 
+char * \fBslurm_sprint_reservation_info\fR (
+.br
+	reserve_info_t *\fIreservation_ptr\fP,
+.br
+	int \fIone_liner\fP
+.br 
+);
+.LP 
+void \fBslurm_print_reservation_info_msg\fR (
+.br
+	FILE *\fIout_file\fP,
+.br
+	reserve_info_msg_t *\fIreservation_info_msg_ptr\fP,
+.br
+	int \fIone_liner\fP
+.br 
+);
+.SH "ARGUMENTS"
+.LP 
+.TP 
+\fIone_liner\fP
+Print one record per line if non\-zero.
+.TP 
+\fIout_file\fP
+Specifies the file to print data to.
+.TP
+\fIreservation_info_msg_pptr\fP
+Specifies the double pointer to the structure to be created and filled 
+with the time of the last reservation update, a record count, and detailed 
+information about each reservation.  Detailed reservation information is 
+written to fixed sized records and includes:  reservation name, time limits, 
+access restrictions, etc.  See slurm.h for full details on the data 
+structure's contents. 
+.TP 
+\fIreservation_info_msg_ptr\fP
+Specifies the pointer to the structure created by \fBslurm_load_reservations\fP. 
+.TP
+\fIupdate_time\fP
+For all of the following informational calls, if update_time is equal to or greater 
+than the last time changes where made to that information, new information is 
+not returned.  Otherwise all the configuration. job, node, or reservation records 
+are returned.
+.SH "DESCRIPTION"
+.LP 
+\fBslurm_load_reservations\fR Returns a reserve_info_msg_t that contains an 
+update time, record count, and array of reservation_table records for all reservations.
+.LP 
+\fBslurm_free_reservation_info_msg\fR Release the storage generated by the 
+\fBslurm_load_reservations\fR function.
+.LP 
+\fBslurm_print_reservation_info\fR  Prints the contents of the data structure 
+describing one of the reservation records from the data loaded by the 
+\fBslurm_load_reservations\fR function.
+.LP 
+\fBslurm_sprint_reservation_info\fR  Prints the sames info as 
+\fBslurm_print_reservation_info\fR, but prints to a string that must be freed 
+by the caller, rather than printing to a file.
+.LP 
+\fBslurm_print_reservation_info_msg\fR Prints the contents of the data 
+structure describing all reservation records loaded by the 
+\fBslurm_load_reservations\fR function.
+.SH "RETURN VALUE"
+.LP
+On success, zero is returned. On error, \-1 is returned, and Slurm error code 
+is set appropriately.
+.SH "ERRORS"
+.LP
+\fBSLURM_NO_CHANGE_IN_DATA\fR Data has not changed since \fBupdate_time\fR.
+.LP
+\fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link 
+your code.
+.LP
+\fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
+SLURM controller.
+.SH "EXAMPLE"
+.LP 
+#include <stdio.h>
+.br
+#include <stdlib.h>
+.br
+#include <slurm/slurm.h>
+.br
+#include <slurm/slurm_errno.h>
+.LP 
+int main (int argc, char *argv[])
+.br 
+{
+.br
+	int i;
+.br
+	reserve_info_msg_t *res_info_ptr = NULL;
+.br
+	reserve_info_t *res_ptr;
+.LP
+	/* get and dump all reservation information */
+.br
+	if (slurm_load_reservations((time_t)NULL,
+.br
+	                            &res_info_ptr)) {
+.br
+		slurm_perror ("slurm_load_reservations error");
+.br
+		exit (1);
+.br
+	}
+.LP
+	/* The easy way to print... */
+.br
+	slurm_print_reservation_info_msg(stdout, 
+.br
+	                                 res_info_ptr, 0);
+.LP
+	/* A harder way.. */
+.br
+	for (i = 0; i < res_info_ptr\->record_count; i++) {
+.br
+		res_ptr = &res_info_ptr\->reservation_array[i];
+.br
+		slurm_print_reservation_info(stdout, res_ptr, 0);
+.br
+	}
+.LP
+	/* The hardest way. */
+.br
+	printf("reservations updated at %lx, records=%d\\n",
+.br
+	       res_info_ptr\->last_update, 
+.br
+	       res_info_ptr\->record_count);
+.br
+	for (i = 0; i < res_info_ptr\->record_count; i++) {
+.br
+		printf ("reservationName=%s Nodes=%s\\n", 
+.br
+			res_info_ptr\->reservation_array[i].name, 
+.br
+			res_info_ptr\->reservation_array[i].node_list );
+.br
+	}
+.LP
+	slurm_free_reservation_info_msg (res_info_ptr);
+.br
+	return 0;
+.br 
+}
+
+.SH "NOTES"
+These functions are included in the libslurm library, 
+which must be linked to your process for use
+(e.g. "cc \-lslurm myprog.c").
+.LP
+The \fBslurm_hostlist_\fR functions can be used to convert SLURM node list
+expressions into a collection of individual node names.
+
+.SH "COPYING"
+Copyright (C) 2002\-2006 The Regents of the University of California.
+Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+CODE\-OCEC\-09\-009. All rights reserved.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <https://computing.llnl.gov/linux/slurm/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+
+.SH "SEE ALSO"
+.LP 
+\fBscontrol\fR(1), \fBsinfo\fR(1), \fBsqueue\fR(1), 
+\fBslurm_hostlist_create\fR(3), \fBslurm_hostlist_shift\fR(3), 
+\fBslurm_hostlist_destroy\fR(3),
+\fBslurm_get_errno\fR(3), \fBslurm_load_node\fR(3), 
+\fBslurm_perror\fR(3), \fBslurm_strerror\fR(3)
+
diff --git a/doc/man/man3/slurm_print_reservation_info.3 b/doc/man/man3/slurm_print_reservation_info.3
new file mode 100644
index 0000000000000000000000000000000000000000..3c9d6c9bc250988070f2a7b4fad19b2abd63732b
--- /dev/null
+++ b/doc/man/man3/slurm_print_reservation_info.3
@@ -0,0 +1,2 @@
+.so man3/slurm_load_reservations.3
+
diff --git a/doc/man/man3/slurm_print_reservation_info_msg.3 b/doc/man/man3/slurm_print_reservation_info_msg.3
new file mode 100644
index 0000000000000000000000000000000000000000..3c9d6c9bc250988070f2a7b4fad19b2abd63732b
--- /dev/null
+++ b/doc/man/man3/slurm_print_reservation_info_msg.3
@@ -0,0 +1,2 @@
+.so man3/slurm_load_reservations.3
+
diff --git a/doc/man/man3/slurm_reconfigure.3 b/doc/man/man3/slurm_reconfigure.3
index 60c06fd62536184d61e1fc60c16d5a3ff0d788c6..a04c25569db67b26a34c678379a91189adc18989 100644
--- a/doc/man/man3/slurm_reconfigure.3
+++ b/doc/man/man3/slurm_reconfigure.3
@@ -1,26 +1,61 @@
-.TH "Slurm API" "3" "May 2007" "Morris Jette" "Slurm administrative calls"
+.TH "Slurm API" "3" "May 2009" "Morris Jette" "Slurm administrative calls"
 .SH "NAME"
-slurm_delete_partition, slurm_init_part_desc_msg,
-slurm_reconfigure, slurm_shutdown, slurm_update_job, 
-slurm_update_node, slurm_update_partition
+slurm_create_partition, slurm_create_reservation, 
+slurm_delete_partition, slurm_delete_reservation, 
+slurm_init_part_desc_msg, slurm_init_resv_desc_msg,
+slurm_reconfigure, slurm_shutdown, slurm_takeover, slurm_update_job, 
+,slurm_init_update_node_msg slurm_update_node, slurm_update_partition,
+slurm_update_reservation
 \- Slurm administrative functions 
 .SH "SYNTAX"
 .LP 
 #include <slurm/slurm.h>
 .LP
+int \fBslurm_create_partition\fR ( 
+.br 
+	update_part_msg_t *\fIupdate_part_msg_ptr\fP 
+.br 
+);
+.LP
+int \fBslurm_create_reservation\fR ( 
+.br 
+	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP 
+.br 
+);
+.LP
+int \fBslurm_delete_partition\fR ( 
+.br 
+	delete_part_msg_t *\fIdelete_part_msg_ptr\fP 
+.br 
+);
+.LP
+int \fBslurm_delete_reservation\fR ( 
+.br 
+	reservation_name_msg_t *\fIdelete_resv_msg_ptr\fP 
+.br 
+);
+.LP
 void \fBslurm_init_part_desc_msg\fR (
 .br
 	update_part_msg_t *\fIupdate_part_msg_ptr\fP 
 .br 
 );
+.LP
+void \fBslurm_init_resv_desc_msg\fR (
+.br
+	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP 
+.br 
+);
 .LP 
 int \fBslurm_reconfigure\fR ( );
 .LP 
 int \fBslurm_shutdown\fR ( 
 .br
-	uint16_t \fIoptions\fP
+	uint16_t \fIshutdown_options\fP
 .br
 );
+.LP 
+int \fBslurm_takeover\fR ( );
 .LP
 int \fBslurm_update_job\fR (
 .br 
@@ -28,15 +63,15 @@ int \fBslurm_update_job\fR (
 .br 
 );
 .LP
-int \fBslurm_update_node\fR ( 
-.br 
+void \fBslurm_init_update_node_msg\fR(
+.br
 	update_node_msg_t *\fIupdate_node_msg_ptr\fP 
 .br 
 );
 .LP
-int \fBslurm_delete_partition\fR ( 
+int \fBslurm_update_node\fR ( 
 .br 
-	delete_part_msg_t *\fIdelete_part_msg_ptr\fP 
+	update_node_msg_t *\fIupdate_node_msg_ptr\fP 
 .br 
 );
 .LP
@@ -45,10 +80,16 @@ int \fBslurm_update_partition\fR (
 	update_part_msg_t *\fIupdate_part_msg_ptr\fP 
 .br 
 );
+.LP
+int \fBslurm_update_reservation\fR ( 
+.br 
+	reserve_request_msg_t *\fIupdate_resv_msg_ptr\fP 
+.br 
+);
 .SH "ARGUMENTS"
 .LP 
 .TP 
-\fIoptions\fP
+\fIshutdown_options\fP
 0: all slurm daemons are shutdown
 .br
 1: slurmctld generates a core file
@@ -56,37 +97,76 @@ int \fBslurm_update_partition\fR (
 2: only the slurmctld is shutdown (no core file)
 .TP 
 \fIdelete_part_msg_ptr\fP
-Specifies the pointer to a partition delete request specification. See slurm.h for 
-full details on the data structure's contents. 
+Specifies the pointer to a partition delete request specification. 
+See slurm.h for full details on the data structure's contents. 
+.TP 
+\fIdelete_resv_msg_ptr\fP
+Specifies the pointer to a reservation delete request specification. 
+See slurm.h for full details on the data structure's contents. 
 .TP
 \fIupdate_job_msg_ptr\fP
-Specifies the pointer to a job update request specification. See slurm.h for full 
-details on the data structure's contents. 
+Specifies the pointer to a job update request specification. See slurm.h 
+for full details on the data structure's contents. 
 .TP 
 \fIupdate_node_msg_ptr\fP
-Specifies the pointer to a node update request specification. See slurm.h for full 
-details on the data structure's contents. 
+Specifies the pointer to a node update request specification. See slurm.h 
+for full details on the data structure's contents. 
 .TP 
 \fIupdate_part_msg_ptr\fP
-Specifies the pointer to a partition update request specification. See slurm.h for 
-full details on the data structure's contents. 
+Specifies the pointer to a partition create or update request specification. 
+See slurm.h for full details on the data structure's contents. 
+.TP 
+\fIupdate_resv_msg_ptr\fP
+Specifies the pointer to a reservation create or update request specification. 
+See slurm.h for full details on the data structure's contents. 
 .SH "DESCRIPTION"
 .LP 
-\fBslurm_delete_partition\fR Request that the specified partition be deleted. 
-All jobs associated with the identified partition will be terminated and purged.
+\fBslurm_create_partition\fR Request that a new partition be created. 
+Initialize the data structure using the \fBslurm_init_part_desc_msg\fR 
+function prior to setting values of the parameters to be changed.
+Note: \fBslurm_init_part_desc_msg\fR is not equivalent to setting the data 
+structure values to zero.  A partition name must be set for the call to
+succeed.
+This function may only be successfully executed by user root.
+.LP 
+\fBslurm_create_reservation\fR Request that a new reservation be created. 
+Initialize the data structure using the \fBslurm_init_resv_desc_msg\fR 
+function prior to setting values of the parameters to be changed.
+Note: \fBslurm_init_resv_desc_msg\fR is not equivalent to setting the data 
+structure values to zero.  The reservation's time limits, user or 
+account restrictions, and node names or a node count must be specified for
+the call to succeed.
 This function may only be successfully executed by user root.
 .LP 
-\fBslurm_init_part_desc_msg\fR Initialize the contents of a partition descriptor 
-with default values. Note: \fBslurm_init_part_desc_msg\fR is not equivalent to 
-setting the data structure values to zero. Execute this function before executing 
-\fBslurm_update_part\fR.
+\fBslurm_delete_partition\fR Request that the specified partition be deleted. 
+All jobs associated with the identified partition will be terminated and 
+purged.  This function may only be successfully executed by user root.
+.LP 
+\fBslurm_delete_reservation\fR Request that the specified reservation be 
+deleted. This function may only be successfully executed by user root.
+.LP 
+\fBslurm_init_part_desc_msg\fR Initialize the contents of a partition 
+descriptor with default values. Note: \fBslurm_init_part_desc_msg\fR is 
+not equivalent to setting the data structure values to zero. Execute 
+this function before executing \fBslurm_create_partition\fR or 
+\fBslurm_update_partition\fR.
+.LP 
+\fBslurm_init_resv_desc_msg\fR Initialize the contents of a reservation 
+descriptor with default values. Note: \fBslurm_init_resv_desc_msg\fR is 
+not equivalent to setting the data structure values to zero. Execute this 
+function before executing \fBslurm_create_reservation\fR or 
+\fBslurm_update_reservation\fR.
 .LP 
-\fBslurm_reconfigure\fR Request that the Slurm controller re\-read its configuration 
-file. The new configuration parameters take effect immediately. This function may 
-only be successfully executed by user root.
+\fBslurm_reconfigure\fR Request that the Slurm controller re\-read its 
+configuration file. The new configuration parameters take effect 
+immediately. This function may only be successfully executed by user root.
 .LP 
-\fBslurm_shutdown\fR Request that the Slurm controller terminate. This function may 
-only be successfully executed by user root.
+\fBslurm_shutdown\fR Request that the Slurm controller terminate. This 
+function may only be successfully executed by user root.
+.LP 
+\fBslurm_takeover\fR Request that the Slurm primary controller shutdown 
+immediately and the backup controller take over.
+This function may only be successfully executed by user root.
 .LP 
 \fBslurm_update_job\fR Request that the configuration of a job be updated. Note 
 that most, but not all parameters of a job may be changed by this function. 
@@ -99,10 +179,15 @@ Slurm uses the priority one to represent jobs that can not be scheduled until
 additional nodes are returned to service (i.e. not DOWN, DRAINED, or FAILED). 
 This permits lower priority jobs to utilize those resources which are available.
 .LP 
+\fBslurm_init_update_node_msg\fR Initialize the contents of an update mpde 
+descriptor with default values. Note: \fBslurm_init_update_node_msg\fR is 
+not equivalent to setting the data structure values to zero. Execute 
+this function before executing \fBslurm_update_node\fR.
+.LP 
 \fBslurm_update_node\fR Request that the state of one or more nodes be updated. 
 Note that the state of a node (e.g. DRAINING, IDLE, etc.) may be changed, but 
 its hardware configuration may not be changed by this function. If the hardware 
-configuration of a node changes, update the Slurm configuration file and execute 
+configuration of a node changes, update the Slurm configuration file and execute
 the \fBslurm_reconfigure\fR function. This function may only be successfully 
 executed by user root. If used by some autonomous program, the state value 
 most likely to be used is \fBNODE_STATE_DRAIN\fR or \fBNODE_STATE_FAILING\fR. 
@@ -115,18 +200,29 @@ is not responding (and has not responded for an interval at least as long
 as the \fBSlurmdTimeout\fR configuration parameter). The node will leave the 
 \fBNODE_STATE_DOWN\fR state as soon as  the slurmd daemon communicates.
 .LP 
-\fBslurm_update_partition\fR Request that the configuration of a partition be updated. 
-Note that most, but not all parameters of a partition may be changed by this 
-function. Initialize the data structure using the \fBslurm_init_part_desc_msg\fR 
-function prior to setting values of the parameters to be changed. Note: 
-\fBslurm_init_part_desc_msg\fR is not equivalent to setting the data structure 
-values to zero. If the partition name specified by this function does not 
-already exist, a new partition is created with the supplied parameters. This 
+\fBslurm_update_partition\fR Request that the configuration of a 
+partition be updated.  Note that most, but not all parameters of a 
+partition may be changed by this function. Initialize the data 
+structure using the \fBslurm_init_part_desc_msg\fR function prior 
+to setting values of the parameters to be changed. Note: 
+\fBslurm_init_part_desc_msg\fR is not equivalent to setting the 
+data structure values to zero. This function may only be 
+successfully executed by user root.
+.LP 
+\fBslurm_update_reservation\fR Request that the configuration of a 
+reservation be updated.  Initialize the data structure using the 
+\fBslurm_init_resv_desc_msg\fR function prior to setting values of 
+the parameters to be changed. Note:  \fBslurm_init_resv_desc_msg\fR 
+is not equivalent to setting the data structure values to zero. This 
 function may only be successfully executed by user root.
 .SH "RETURN VALUE"
 .LP
 On success, zero is returned. On error, \-1 is returned, and the Slurm error 
 code is set appropriately.
+.LP
+Exception:  A successful slurm_create_reservation call returns a string 
+containing the name of the reservation, in memory to be freed by the caller.
+A failed call returns NULL and sets the Slurm error code.
 .SH "ERRORS"
 .LP
 \fBSLURM_PROTOCOL_VERSION_ERROR\fR Protocol version has changed, re\-link your 
@@ -141,7 +237,8 @@ node state transition is not valid.
 .LP
 \fBESLURM_INVALID_AUTHTYPE_CHANGE\fR The \fBAuthType\fR parameter can
 not be changed using the \fBslurm_reconfigure\fR function, but all SLURM
-daemons and commands must be restarted. See \fBslurm.conf\fR(5) for more information.
+daemons and commands must be restarted. See \fBslurm.conf\fR(5) for more 
+information.
 .LP
 \fBESLURM_INVALID_SCHEDTYPE_CHANGE\fR The \fBSchedulerType\fR parameter can 
 not be changed using the \fBslurm_reconfigure\fR function, but the 
@@ -158,6 +255,20 @@ the requested action (e.g. trying to delete or modify another user's job).
 .LP
 \fBSLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT\fR Timeout in communicating with 
 SLURM controller.
+.LP
+\fBESLURM_RESERVATION_ACCESS\fR  Requestor is not authorized to access the 
+reservation.
+.LP
+\fBESLURM_RESERVATION_INVALID\fR  Invalid reservation parameter given, 
+e.g. wrong name given.
+.LP
+\fBESLURM_INVALID_TIME_VALUE\fR  Invalid time value.
+.LP
+\fBESLURM_RESERVATION_BUSY\fR  Reservation is busy, e.g. trying to delete a 
+reservation while in use.
+.LP
+\fBESLURM_RESERVATION_NOT_USABLE\fR  Reservation not usable, e.g. trying to 
+use an expired reservation.
 .SH "EXAMPLE"
 .LP 
 #include <stdio.h>
@@ -170,13 +281,17 @@ int main (int argc, char *argv[])
 .br 
 {
 .br 
-	job_desc_msg_t update_job_msg;
+	job_desc_msg_t          update_job_msg;
+.br
+	update_node_msg_t       update_node_msg;
+.br
+	partition_desc_msg_t    update_part_msg;
 .br
-	update_node_msg_t update_node_msg;
+	delete_part_msg_t       delete_part_msg;
 .br
-	partition_desc_msg_t update_part_msg ;
+	reserve_request_msg_t   resv_msg;
 .br
-	delete_part_msg_t delete_part_msg ;
+	char                   *resv_name = NULL;
 .LP 
 	if (slurm_reconfigure ( )) {
 .br
@@ -205,6 +320,16 @@ int main (int argc, char *argv[])
 	update_part_msg.name = "test.partition";
 .br
 	update_part_msg.state_up = 0;  /* partition down */
+.br 
+	if (slurm_create_partition (&update_part_msg)) { 
+.br
+		slurm_perror ("slurm_create_partition error");
+.br 
+		exit (1);
+.br
+	}
+.LP 
+	update_part_msg.state_up = 1;  /* partition up */
 .br 
 	if (slurm_update_partition (&update_part_msg)) { 
 .br
@@ -224,6 +349,8 @@ int main (int argc, char *argv[])
 .br
 	}
 .LP 
+	slurm_init_update_node_msg (&update_node_msg);
+.br
 	update_node_msg.node_names = "lx[10\-12]";
 .br
 	update_node_msg.node_state = NODE_STATE_DRAINING ;
@@ -235,6 +362,28 @@ int main (int argc, char *argv[])
 		exit (1);
 .br 
 	}
+.LP 
+	slurm_init_resv_desc_msg ( &resv_msg );
+.br
+	resv_msg.start_time = time(NULL) + 60*60;  /* One hour from now */ 
+.br
+	resv_msg.duration = 720;  /* 12 hours/720 minutes */
+.br
+	resv_msg.node_cnt = 10; 
+.br
+	resv_msg.accounts = "admin";
+.br 
+	resv_name = slurm_create_reservation (&resv_msg); 
+.br
+	if (!resv_name) {
+.br
+		slurm_perror ("slurm_create_reservation error");
+.br 
+		exit (1);
+.br
+	}
+.br
+	free(resv_name);
 .br 
 	exit (0);
 .br 
@@ -248,7 +397,7 @@ which must be linked to your process for use
 .SH "COPYING"
 Copyright (C) 2002\-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_resume.3 b/doc/man/man3/slurm_resume.3
index c31c9741b0382adeea71c87550bd1a912342225f..70942cb8cca84da2ccd1aaf80b9c1591f2d8b253 100644
--- a/doc/man/man3/slurm_resume.3
+++ b/doc/man/man3/slurm_resume.3
@@ -75,7 +75,7 @@ which must be linked to your process for use
 .SH "COPYING"
 Copyright (C) 2005\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_slurmd_status.3 b/doc/man/man3/slurm_slurmd_status.3
index 60ef74bcbc548d73fb8b626bb1af055596e7f507..1f4ff24faf35bfd52dcfb071cbb5d4af91b8a215 100644
--- a/doc/man/man3/slurm_slurmd_status.3
+++ b/doc/man/man3/slurm_slurmd_status.3
@@ -52,7 +52,7 @@ message as loaded using slurm_load_slurmd_status.
 Copyright (C) 2006-2007 The Regents of the University of California.
 Copyright (C) 2008 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_sprint_reservation_info.3 b/doc/man/man3/slurm_sprint_reservation_info.3
new file mode 100644
index 0000000000000000000000000000000000000000..3c9d6c9bc250988070f2a7b4fad19b2abd63732b
--- /dev/null
+++ b/doc/man/man3/slurm_sprint_reservation_info.3
@@ -0,0 +1,2 @@
+.so man3/slurm_load_reservations.3
+
diff --git a/doc/man/man3/slurm_step_ctx_create.3 b/doc/man/man3/slurm_step_ctx_create.3
index 54a96c2998ea00192f9227977735ff66ec9c09fb..13b35e7f6931f5a76a920eb069630fac6e386a49 100644
--- a/doc/man/man3/slurm_step_ctx_create.3
+++ b/doc/man/man3/slurm_step_ctx_create.3
@@ -236,7 +236,7 @@ which must be linked to your process for use
 .SH "COPYING"
 Copyright (C) 2004-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_step_launch.3 b/doc/man/man3/slurm_step_launch.3
index 6e6c10c2094754fc3a4198c0b04eb1c0c59c6528..fb2c4a7393b3396df31187333e39021ecc20465b 100644
--- a/doc/man/man3/slurm_step_launch.3
+++ b/doc/man/man3/slurm_step_launch.3
@@ -20,8 +20,6 @@ void \fBslurm_step_launch_params_t_init\fR (
 int \fBslurm_step_launch\fR (
 .br
 	slurm_step_ctx \fIctx\fP,
-.br
-	char * \fIlauncher_host\fP,
 .br
 	const slurm_step_launch_params_t *\fIlaunch_req\fP,
 .br
@@ -68,12 +66,6 @@ function calls, and destroyed by \fBslurm_step_ctx_destroy\fR.
 \fIlaunch_req\fP
 Pointer to a structure allocated by the user containing specifications of 
 the job step to be launched.
-.TP
-\fIlauncher_host\fP
-Host name or address to be used to identify the destination of PMI communications
-for MPICH2. We intend to embed this information within \fIlaunch_req\fP in the
-next major release of SLURM, when changes to the protocol can be more easily
-addressed.
 
 .SH "DESCRIPTION"
 .LP
@@ -238,7 +230,7 @@ which must be linked to your process for use
 Copyright (C) 2006-2007 The Regents of the University of California.
 Copyright (C) 2008 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man3/slurm_takeover.3 b/doc/man/man3/slurm_takeover.3
new file mode 100644
index 0000000000000000000000000000000000000000..8c2ed98140da9b138eaf0ab3b329016669dac41f
--- /dev/null
+++ b/doc/man/man3/slurm_takeover.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man3/slurm_trigger.3 b/doc/man/man3/slurm_trigger.3
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/doc/man/man3/slurm_update_reservation.3 b/doc/man/man3/slurm_update_reservation.3
new file mode 100644
index 0000000000000000000000000000000000000000..8c2ed98140da9b138eaf0ab3b329016669dac41f
--- /dev/null
+++ b/doc/man/man3/slurm_update_reservation.3
@@ -0,0 +1 @@
+.so man3/slurm_reconfigure.3
diff --git a/doc/man/man5/bluegene.conf.5 b/doc/man/man5/bluegene.conf.5
index eb24261238bc925cacfc90800207b25e41cf1a58..484360c5112e4c18050680505100fecab7625a0a 100644
--- a/doc/man/man5/bluegene.conf.5
+++ b/doc/man/man5/bluegene.conf.5
@@ -1,4 +1,4 @@
-.TH "bluegene.conf" "5" "April 2008" "bluegene.conf 1.2" "Slurm configuration file"
+.TH "bluegene.conf" "5" "April 2008" "bluegene.conf 2.0" "Slurm configuration file"
 .SH "NAME"
 bluegene.conf \- Slurm configuration file for BlueGene systems 
 .SH "DESCRIPTION"
@@ -27,7 +27,7 @@ Alternative BlrtsImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
 the user groups allowed to use this image (i.e. Groups=da,jette) if 
 Groups= is not stated then this image will be able to be used by all
-groups. You can but as many alternative images as you want in the conf file.
+groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBAltLinuxImage\fR
@@ -35,7 +35,7 @@ Alternative LinuxImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
 the user groups allowed to use this image (i.e. Groups=da,jette) if 
 Groups= is not stated then this image will be able to be used by all
-groups. You can but as many alternative images as you want in the conf file.
+groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBAltRamDiskImage\fR
@@ -43,7 +43,7 @@ Alternative RamDiskImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
 the user groups allowed to use this image (i.e. Groups=da,jette) if 
 Groups= is not stated then this image will be able to be used by all
-groups. You can but as many alternative images as you want in the conf file.
+groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBBlrtsImage\fR
@@ -67,7 +67,7 @@ Alternative CnloadImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
 the user groups allowed to use this image (i.e. Groups=da,jette) if 
 Groups= is not stated then this image will be able to be used by all
-groups. You can but as many alternative images as you want in the conf file.
+groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBAltIoloadImage\fR
@@ -75,7 +75,7 @@ Alternative IoloadImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
 the user groups allowed to use this image (i.e. Groups=da,jette) if 
 Groups= is not stated then this image will be able to be used by all
-groups. You can but as many alternative images as you want in the conf file.
+groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBCnloadImage\fR
@@ -94,7 +94,7 @@ Alternative MloaderImage.  This is an optional field only used for
 mulitple images on a system and should be followed by a Groups= with
 the user groups allowed to use this image (i.e. Groups=da,jette) if 
 Groups= is not stated then this image will be able to be used by all
-groups. You can but as many alternative images as you want in the conf file.
+groups. You can put as many alternative images as you want in the conf file.
 
 .TP
 \fBBasePartitionNodeCount\fR
@@ -266,7 +266,7 @@ BPs=[333x333] Type=SMALL NodeCards=4 Quarters=3 # 1/16 * 4 + 1/4 * 3
 .SH "COPYING"
 Copyright (C) 2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index eb8618009d826317827c0c2ce04f3b48d9234f05..d82014881cff57c9adadb01136b1be84d76d4d08 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -1,4 +1,4 @@
-.TH "slurm.conf" "5" "September 2008" "slurm.conf 1.3" "Slurm configuration file"
+.TH "slurm.conf" "5" "May 2009" "slurm.conf 2.0" "Slurm configuration file"
 
 .SH "NAME"
 slurm.conf \- Slurm configuration file 
@@ -38,95 +38,96 @@ The overall configuration parameters available include:
 
 .TP
 \fBAccountingStorageEnforce\fR
-If set to a non-zero value and the user, partition, account association is not 
-defined for a job in the accounting database then prevent the job from being 
-executed. This needs to be set to '2' if you the association limits will also 
-be enforced.  If set to anything else limits of associations will not be 
-enforced.
-The default value is zero.
+This controls what level of enforcement you want on associations when new
+jobs are submitted.  Valid options are any combination of associations, limits,
+and wckeys, or all for all things.  If limits is set associations is implied.  
+If wckeys is set both limits and associations are implied along with 
+TrackWckey being set.  By enforcing Associations no new job is allowed to run 
+unless a corresponding association exists in the system.  If limits are 
+enforced users can be limited by association to how many nodes or how long 
+jobs can run or other limits.  With wckeys enforced jobs will not be scheduled 
+unless a valid workload characterization key is specified.
+
+.TP
+\fBAccountingStorageBackupHost\fR
+The name of the backup machine hosting the accounting storage database.
+Only used for accounting_storage/slurmdbd plugin, ignored otherwise.
 
 .TP
 \fBAccountingStorageHost\fR
-Define the name of the host where the database is running we are going
-to store the accounting data.
+The name of the machine hosting the accounting storage database.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBDefaultStorageHost\fR.
 
 .TP
 \fBAccountingStorageLoc\fR
-Specifies the location of the file or database where accounting 
-records are written.
+The fully qualified file name where accounting records are written
+when the \fBAccountingStorageType\fR is "accounting_storage/filetxt"
+or else the name of the database where accounting records are stored when the
+\fBAccountingStorageType\fR is a database.
 Also see \fBDefaultStorageLoc\fR.
 
 .TP
 \fBAccountingStoragePass\fR
-Define the password used to gain access to the database to store the
-accounting data.
-Only used for database type storage plugins, ignored otherwise.
-In the case of Slurm DBD (Data Base Daemon) with Munge authentication this can be 
-configured to use a Munge daemon specifically configured to provide authentication 
-between clusters while the default Munge daemon provides authentication within a cluster. 
-In that case, \fBAccountingStoragePass\fR should specify the named port to be used
-for communications with the alternate Munge daemon (e.g. 
-"/var/run/munge/global.socket.2"). The default value is NULL.
-Also see \fBDefaultStoragePass\fR.
+The password used to gain access to the database to store the
+accounting data.  Only used for database type storage plugins, ignored
+otherwise.  In the case of SLURM DBD (Database Daemon) with Munge
+authentication this can be configured to use a Munge daemon
+specifically configured to provide authentication between clusters
+while the default Munge daemon provides authentication within a
+cluster.  In that case, \fBAccountingStoragePass\fR should specify the
+named port to be used for communications with the alternate Munge
+daemon (e.g.  "/var/run/munge/global.socket.2"). The default value is
+NULL.  Also see \fBDefaultStoragePass\fR.
 
 .TP
 \fBAccountingStoragePort\fR
-Define the port the database server is listening on where we are going
-to store the accounting data.
+The listening port of the accounting storage database server.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBDefaultStoragePort\fR.
 
 .TP
 \fBAccountingStorageType\fR
-Define the accounting storage mechanism type.
-Acceptable values at present include 
-"accounting_storage/filetxt", "accounting_storage/gold",
-"accounting_storage/mysql", "accounting_storage/none", 
-"accounting_storage/pgsql", and "accounting_storage/slurmdbd".
-The value "accounting_storage/filetxt" indicates that accounting records
-will be written to a the file specified by the 
-\fBAccountingStorageLoc\fR parameter.
-The value "accounting_storage/gold" indicates that account records
-will be written to Gold
-(http://www.clusterresources.com/pages/products/gold-allocation-manager.php),
-which maintains its own database.
-The value "accounting_storage/mysql" indicates that accounting records
-should be written to a MySQL database specified by the 
-\fBAccountingStorageLoc\fR parameter.
-The default value is "accounting_storage/none", which means that
-account records are not maintained. 
-The value "accounting_storage/pgsql" indicates that accounting records
-should be written to a PostgreSQL database specified by the 
-\fBAccountingStorageLoc\fR parameter.  This plugin is not complete and 
-should not be used if wanting to use associations.  It will however work with
-basic accounting of jobs and job steps.  If interested in 
-completing please email slurm-dev@lists.llnl.gov.
-The value "accounting_storage/slurmdbd" indicates that accounting records
-will be written to SlurmDDB, which manages an underlying MySQL or 
-PostgreSQL database. See "man slurmdbd" for more information.
-Also see \fBDefaultStorageType\fR.
+The accounting storage mechanism type.  Acceptable values at
+present include "accounting_storage/filetxt",
+"accounting_storage/mysql", "accounting_storage/none",
+"accounting_storage/pgsql", and "accounting_storage/slurmdbd".  The
+"accounting_storage/filetxt" value indicates that accounting records
+will be written to the file specified by the
+\fBAccountingStorageLoc\fR parameter.  The "accounting_storage/mysql"
+value indicates that accounting records will be written to a MySQL
+database specified by the \fBAccountingStorageLoc\fR parameter.  The
+"accounting_storage/pgsql" value indicates that accounting records
+will be written to a PostgreSQL database specified by the
+\fBAccountingStorageLoc\fR parameter.  The
+"accounting_storage/slurmdbd" value indicates that accounting records
+will be written to the SLURM DBD, which manages an underlying MySQL or
+PostgreSQL database. See "man slurmdbd" for more information.  The
+default value is "accounting_storage/none" and indicates that account
+records are not maintained.  Note: the PostgreSQL plugin is not
+complete and should not be used if wanting to use associations.  It
+will however work with basic accounting of jobs and job steps.  If
+interested in completing, please email slurm-dev@lists.llnl.gov.  Also
+see \fBDefaultStorageType\fR.
 
 .TP
 \fBAccountingStorageUser\fR
-Define the name of the user we are going to connect to the database
-with to store the accounting data.
+The user account for accessing the accounting storage database.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBDefaultStorageUser\fR.
 
 .TP
 \fBAuthType\fR
-Define the authentication method for communications between SLURM 
+The authentication method for communications between SLURM 
 components. 
 Acceptable values at present include "auth/none", "auth/authd", 
 and "auth/munge".
-The default value is "auth/none", which means the UID included in 
-communication messages is not verified. 
+The default value is "auth/munge".
+"auth/none" includes the UID in each communication, but it is not verified. 
 This may be fine for testing purposes, but 
 \fBdo not use "auth/none" if you desire any security\fR.
 "auth/authd" indicates that Brett Chun's authd is to be used (see
-"http://www.theether.org/authd/" for more information, Note that
+"http://www.theether.org/authd/" for more information. Note that
 authd is no longer actively supported).
 "auth/munge" indicates that LLNL's MUNGE is to be used
 (this is the best supported authentication mechanism for SLURM, 
@@ -137,7 +138,7 @@ preserved).
 
 .TP
 \fBBackupAddr\fR
-Name that \fBBackupController\fR should be referred to in 
+The name that \fBBackupController\fR should be referred to in
 establishing a communications path. This name will 
 be used as an argument to the gethostbyname() function for 
 identification. For example, "elx0000" might be used to designate 
@@ -152,20 +153,20 @@ executed in the event that \fBControlMachine\fR fails. This node
 may also be used as a compute server if so desired. It will come into service 
 as a controller only upon the failure of ControlMachine and will revert 
 to a "standby" mode when the ControlMachine becomes available once again. 
-This should be a node name without the full domain name (e.g. "lx0002"). 
+This should be a node name without the full domain name.   I.e., the hostname
+returned by the \fIgethostname()\fR function cut at the first dot (e.g. use 
+"tux001" rather than "tux001.my.com"). 
 While not essential, it is recommended that you specify a backup controller.
 See  the \fBRELOCATING CONTROLLERS\fR section if you change this.
 
 .TP
 \fBBatchStartTimeout\fR
-The maximum time (in seconds) that a batch job is permitted for 
-launching before being considered missing and releasing the 
-allocation. The default value is 10 (seconds). Larger values may 
-be required if more time is required to execute the \fBProlog\fR, 
-loading user environment variables (for Moab spawned jobs), or the 
-slurmd daemon gets paged from memory.
-NOTE: The value will not be reported by "scontrol show config" command 
-until SLURM version 1.4.
+The maximum time (in seconds) that a batch job is permitted for
+launching before being considered missing and releasing the
+allocation. The default value is 10 (seconds). Larger values may be
+required if more time is required to execute the \fBProlog\fR, load
+user environment variables (for Moab spawned jobs), or if the slurmd
+daemon gets paged from memory.
 
 .TP
 \fBCacheGroups\fR
@@ -176,7 +177,7 @@ The default value is 0 to disable caching group data.
 
 .TP
 \fBCheckpointType\fR
-Define the system\-initiated checkpoint method to be used for user jobs. 
+The system\-initiated checkpoint method to be used for user jobs. 
 The slurmctld daemon must be restarted for a change in \fBCheckpointType\fR 
 to take effect. 
 Acceptable values at present include
@@ -188,9 +189,25 @@ The default value is "checkpoint/none".
 
 .TP
 \fBClusterName\fR
-The name by which this SLURM managed cluster is known for accounting 
-purposes. This is needed distinguish between accounting data from 
-multiple clusters being recorded in a single database.
+The name by which this SLURM managed cluster is known in the
+accounting database.  This is needed distinguish accounting records
+when multiple clusters report to the same database.
+
+.TP
+\fBCompleteWait\fR
+The time, in seconds, given for a job to remain in COMPLETING state
+before any additional jobs are scheduled. 
+If set to zero, pending jobs will be started as soon as possible.
+Since a COMPLETING job's resources are released for use by other 
+jobs as soon as the \fBEpilog\fR completes on each individual node, 
+this can result in very fragmented resource allocations. 
+To provide jobs with the minimum response time, a value of zero is 
+recommended (no waiting).
+To minimize fragmentation of resources, a value equal to \fBKillWait\fR 
+plus two is recommended. 
+In that case, setting \fBKillWait\fR to a small value may be beneficial.
+The default value of \fBCompleteWait\fR is zero seconds.
+The value may not exceed 65533.
 
 .TP
 \fBControlAddr\fR
@@ -204,20 +221,40 @@ By default the \fBControlAddr\fR will be identical in value to
 
 .TP
 \fBControlMachine\fR
-The name of the machine where SLURM control functions are executed
-as returned by the \fIgethostname()\fR function the cut at the first dot
-or the \fIhostname \-s\fR command (e.g. use "tux001" rather than "tux001.my.com"). 
-This value must be specified.
-See  the \fBRELOCATING CONTROLLERS\fR section if you change this.
+The short hostname of the machine where SLURM control functions are
+executed.  I.e., the hostname returned by the \fIgethostname()\fR
+function cut at the first dot (e.g. use "tux001" rather than
+"tux001.my.com").  This value must be specified.  See the
+\fBRELOCATING CONTROLLERS\fR section if you change this.
 
 .TP
 \fBCryptoType\fR
-Define the cryptographic signature tool to be used in the creation of 
+The cryptographic signature tool to be used in the creation of 
 job step credentials.
 The slurmctld daemon must be restarted for a change in \fBCryptoType\fR
 to take effect.
 Acceptable values at present include "crypto/munge" and "crypto/openssl".
-The default value is "crypto/openssl".
+The default value is "crypto/munge".
+
+.TP
+\fBDebugFlags\fR
+Defines specific subsystems which should provide more detailed event logging.
+Multiple subsystems can be specified with comma separators. 
+Valid subsystems available today (with more to come) include:
+.RS
+.TP 15
+\fBCPU_Bind\fR
+CPU binding details for jobs and steps
+.TP
+\fBSteps\fR
+Slurmctld resource allocation for job steps
+.TP
+\fBTriggers\fR
+Slurmctld triggers
+.TP
+\fBWiki\fR
+Sched/wiki and wiki2 communications
+.RE
 
 .TP
 \fBDefMemPerCPU\fR
@@ -228,6 +265,9 @@ are alocated to jobs (\fBSelectType=select/cons_res\fR).
 The default value is 0 (unlimited).
 Also see \fBDefMemPerNode\fR and \fBMaxMemPerCPU\fR.
 \fBDefMemPerCPU\fR and \fBDefMemPerNode\fR are mutually exclusive.
+NOTE: Enforcement of memory limits currently requires enabling of 
+accounting, which samples memory use on a periodic basis (data need
+not be stored, just collected).
 
 .TP
 \fBDefMemPerNode\fR
@@ -239,58 +279,59 @@ resources are shared (\fBShared=yes\fR or \fBShared=force\fR).
 The default value is 0 (unlimited).
 Also see \fBDefMemPerCPU\fR and \fBMaxMemPerNode\fR.
 \fBDefMemPerCPU\fR and \fBDefMemPerNode\fR are mutually exclusive.
+NOTE: Enforcement of memory limits currently requires enabling of 
+accounting, which samples memory use on a periodic basis (data need
+not be stored, just collected).
 
 .TP
 \fBDefaultStorageHost\fR
-Define the name of the host where the database is running and used to
-to store the accounting and job completion data.
-Only used for database type storage plugins, ignored otherwise.
-Also see \fBAccountingStorageHost\fR and \fBJobCompHost\fR.
+The default name of the machine hosting the accounting storage and
+job completion databases.
+Only used for database type storage plugins and when the
+\fBAccountingStorageHost\fR and \fBJobCompHost\fR have not been
+defined.
 
 .TP
 \fBDefaultStorageLoc\fR
-Specifies the location of the file or database where accounting 
-and job completion records are written.
+The fully qualified file name where accounting records and/or job
+completion records are written when the \fBDefaultStorageType\fR is
+"filetxt" or the name of the database where accounting records and/or job
+completion records are stored when the \fBDefaultStorageType\fR is a
+database.
 Also see \fBAccountingStorageLoc\fR and \fBJobCompLoc\fR.
 
 .TP
 \fBDefaultStoragePass\fR
-Define the password used to gain access to the database to store the
+The password used to gain access to the database to store the
 accounting and job completion data.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBAccountingStoragePass\fR and \fBJobCompPass\fR.
 
 .TP
 \fBDefaultStoragePort\fR
-Define the port the database server is listening on where we are going
-to store the accounting and job completion data.
+The listening port of the accounting storage and/or job completion
+database server.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBAccountingStoragePort\fR and \fBJobCompPort\fR.
 
 .TP
 \fBDefaultStorageType\fR
-Define the accounting and job completion storage mechanism type.
-Acceptable values at present include 
-"filetxt", "gold", "mysql", "none", "pgsql", and "slurmdbd".
-The value "filetxt" indicates that records will be written to a the file.
-The value "gold" indicates that records will be written to Gold
-(http://www.clusterresources.com/pages/products/gold-allocation-manager.php),
-.na
-which maintains its own database.
-.ad
-The value "mysql" indicates that accounting records will be written to 
-a mysql database.
-The default value is "none", which means that records are not maintained. 
-The value "pgsql" indicates that records will be written to a postresql 
-database.
-The value "slurmdbd" indicates that records will be written to SlurmDbd,
-which maintains its own database. See "man slurmdbd for more information".
-Also see \fBAccountingStorageType\fR  and \fBJobCompType\fR.
+The accounting and job completion storage mechanism type.  Acceptable
+values at present include "filetxt", "mysql", "none", "pgsql", and
+"slurmdbd".  The value "filetxt" indicates that records will be
+written to a file.  The value "mysql" indicates that accounting
+records will be written to a mysql database.  The default value is
+"none", which means that records are not maintained.  The value
+"pgsql" indicates that records will be written to a PostgreSQL
+database.  The value "slurmdbd" indicates that records will be written
+to the SLURM DBD, which maintains its own database. See "man slurmdbd"
+for more information.
+Also see \fBAccountingStorageType\fR and \fBJobCompType\fR.
 
 .TP
 \fBDefaultStorageUser\fR
-Define the name of the user we are going to connect to the database
-with to store the accounting and job completion data.
+The user account for accessing the accounting storage and/or job
+completion database.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBAccountingStorageUser\fR and \fBJobCompUser\fR.
 
@@ -307,10 +348,13 @@ will be rejected at submission time. If set to "NO" then the job will be
 accepted and remain queued until the partition limits are altered.
 The default value is "NO".
 
+.TP
 \fBEpilog\fR
 Fully qualified pathname of a script to execute as user root on every 
 node when a user's job completes (e.g. "/usr/local/slurm/epilog"). This may 
-be used to purge files, disable user login, etc. By default there is no epilog.
+be used to purge files, disable user login, etc. 
+By default there is no epilog.
+See \fBProlog and Epilog Scripts\fR for more information.
 
 .TP
 \fBEpilogMsgTime\fR
@@ -323,28 +367,43 @@ The default value is 2000 microseconds.
 For a 1000 node job, this spreads the epilog completion messages out over
 two seconds.
 
+.TP
+\fBEpilogSlurmctld\fR
+Fully qualified pathname of a program for the slurmctld to execute 
+upon termination of a job allocation (e.g.
+"/usr/local/slurm/epilog_controller"). 
+The program executes as SlurmUser, which gives it permission to drain 
+nodes and requeue the job if a failure occurs or cancel the job if appropriate.
+The program can be used to reboot nodes or perform other work to prepare 
+resources for use. 
+See \fBProlog and Epilog Scripts\fR for more information.
+
 .TP
 \fBFastSchedule\fR
-Controls how a nodes configuration specifications in slurm.conf are used.
+Controls how a node's configuration specifications in slurm.conf are used.
 If the number of node configuration entries in the configuration file
 is significantly lower than the number of nodes, setting FastSchedule to
 1 will permit much faster scheduling decisions to be made.
 (The scheduler can just check the values in a few configuration records
-instead of possibly thousands of node records. If a job can't be initiated
-immediately, the scheduler may execute these tests repeatedly.)
+instead of possibly thousands of node records.)
 Note that on systems with hyper\-threading, the processor count
-reported by the node will be twice the actually processor count.
+reported by the node will be twice the actual processor count.
 Consider which value you want to be used for scheduling purposes.
 .RS
 .TP 5
 \fB1\fR (default)
 Consider the configuration of each node to be that specified in the
-configuration file and any node with less
-than the configured resources will be set DOWN.
+configuration file and any node with less than the configured resources 
+will be set DOWN.
 .TP
 \fB0\fR
-Base scheduling decisions upon the actual configuration of 
-each individual node. 
+Base scheduling decisions upon the actual configuration of each individual 
+node except that the node's processor count in SLURM's configuration must
+match the actual hardware configuration if \fBSchedulerType=sched/gang\fR 
+or \fBSelectType=select/cons_res\fR are configured (both of those plugins
+maintain resource allocation information using bitmaps for the cores in the
+system and must remain static, while the node's memory and disk space can
+be esblished later).
 .TP
 \fB2\fR
 Consider the configuration of each node to be that specified in the 
@@ -412,7 +471,7 @@ May not exceed 65533.
 
 .TP
 \fBJobAcctGatherType\fR
-Define the job accounting mechanism type.
+The job accounting mechanism type.
 Acceptable values at present include "jobacct_gather/aix" (for AIX operating
 system), "jobacct_gather/linux" (for Linux operating system) and "jobacct_gather/none"
 (no accounting data collected).
@@ -422,7 +481,7 @@ must be configured.
 
 .TP
 \fBJobAcctGatherFrequency\fR
-Define the job accounting sampling interval.
+The job accounting sampling interval.
 For jobacct_gather/none this parameter is ignored.
 For  jobacct_gather/aix and jobacct_gather/linux the parameter is a number is 
 seconds between sampling job state.
@@ -430,36 +489,41 @@ The default value is 30 seconds.
 A value of zero disables real the periodic job sampling and provides accounting 
 information only on job termination (reducing SLURM interference with the job).
 
+.TP
+\fBJobCheckpointDir\fR
+Set the default directory used to store job checkpoint files.
+The default value is "/var/slurm/checkpoint".
+
 .TP
 \fBJobCompHost\fR
-Define the name of the host where the database is running and used
-to store the job completion data.
+The name of the machine hosting the job completion database.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBDefaultStorageHost\fR.
 
 .TP
 \fBJobCompLoc\fR
-The interpretation of this value depends upon the logging mechanism 
-specified by the \fBJobCompType\fR parameter either a filename or a 
-database name. 
+The fully qualified file name where job completion records are written
+when the \fBJobCompType\fR is "jobcomp/filetxt" or the database where
+job completion records are stored when the \fBJobCompType\fR is a
+database.
 Also see \fBDefaultStorageLoc\fR.
 
 .TP
 \fBJobCompPass\fR
-Define the password used to gain access to the database to store the job completion data.
+The password used to gain access to the database to store the job
+completion data.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBDefaultStoragePass\fR.
 
 .TP
 \fBJobCompPort\fR
-Define the port the database server is listening on where we are going
-to store the job completion data.
+The listening port of the job completion database server.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBDefaultStoragePort\fR.
 
 .TP
 \fBJobCompType\fR
-Define the job completion logging mechanism type.
+The job completion logging mechanism type.
 Acceptable values at present include "jobcomp/none", "jobcomp/filetxt", 
 "jobcomp/mysql", "jobcomp/pgsql", and "jobcomp/script"".
 The default value is "jobcomp/none", which means that upon job completion 
@@ -471,28 +535,27 @@ written to a text file specified by the \fBJobCompLoc\fR parameter.
 The value "jobcomp/mysql" indicates that a record of the job should be 
 written to a mysql database specified by the \fBJobCompLoc\fR parameter.
 The value "jobcomp/pgsql" indicates that a record of the job should be 
-written to a postgresql database specified by the \fBJobCompLoc\fR parameter.
+written to a PostgreSQL database specified by the \fBJobCompLoc\fR parameter.
 The value "jobcomp/script" indicates that a script specified by the 
 \fBJobCompLoc\fR parameter is to be executed with environment variables 
 indicating the job information.
 
 .TP
 \fBJobCompUser\fR
-Define the name of the user we are going to connect to the database
-with to store the job completion data.
+The user account for accessing the job completion database.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBDefaultStorageUser\fR.
 
 .TP
 \fBJobCredentialPrivateKey\fR
 Fully qualified pathname of a file containing a private key used for 
-authentication by Slurm daemons.
+authentication by SLURM daemons.
 This parameter is ignored if \fBCryptType=munge\fR.
 
 .TP
 \fBJobCredentialPublicCertificate\fR
 Fully qualified pathname of a file containing a public key used for 
-authentication by Slurm daemons.
+authentication by SLURM daemons.
 This parameter is ignored if \fBCryptType=munge\fR.
 
 .TP
@@ -514,14 +577,21 @@ Use the \fBsbatch\fR \fI\-\-no\-requeue\fR or \fI\-\-requeue\fR
 option to change the default behavior for individual jobs.
 The default value is 1.
 
+.TP
+\fBKillOnBadExit\fR
+If set to 1, the job will be terminated immediately when one of the 
+processes is crashed or aborted. With default value of 0, if one of 
+the processes is crashed or aborted the other processes will continue 
+to run.
+
 .TP
 \fBKillWait\fR
 The interval, in seconds, given to a job's processes between the 
 SIGTERM and SIGKILL signals upon reaching its time limit. 
-If the job fails to terminate gracefully 
-in the interval specified, it will be forcibly terminated. 
+If the job fails to terminate gracefully in the interval specified, 
+it will be forcibly terminated. 
 The default value is 30 seconds.
-May not exceed 65533.
+The value may not exceed 65533.
 
 .TP
 \fBLicenses\fR
@@ -561,6 +631,9 @@ are alocated to jobs (\fBSelectType=select/cons_res\fR).
 The default value is 0 (unlimited).
 Also see \fBDefMemPerCPU\fR and \fBMaxMemPerNode\fR.
 \fBMaxMemPerCPU\fR and \fBMaxMemPerNode\fR are mutually exclusive.
+NOTE: Enforcement of memory limits currently requires enabling of 
+accounting, which samples memory use on a periodic basis (data need
+not be stored, just collected).
 
 .TP
 \fBMaxMemPerNode\fR
@@ -572,6 +645,9 @@ resources are shared (\fBShared=yes\fR or \fBShared=force\fR).
 The default value is 0 (unlimited).
 Also see \fBDefMemPerNode\fR and \fBMaxMemPerCPU\fR.
 \fBMaxMemPerCPU\fR and \fBMaxMemPerNode\fR are mutually exclusive.
+NOTE: Enforcement of memory limits currently requires enabling of 
+accounting, which samples memory use on a periodic basis (data need
+not be stored, just collected).
 
 .TP
 \fBMessageTimeout\fR
@@ -599,6 +675,25 @@ Currently supported versions include:
 \fBnone\fR (default, which works for many other versions of MPI including 
 LAM MPI and Open MPI).
 
+.TP
+\fBMpiParams\fR
+MPI parameters. 
+Used to identify ports used by OpenMPI only and the input format is
+"ports=12000\-12999" to identify a range of communcation ports to be used.
+
+.TP
+\fBOverTimeLimit\fR
+Number of minutes by which a job can exceed its time limit before 
+being cancelled. 
+The configured job time limit is treated as a \fIsoft\fR limit.
+Adding \fBOverTimeLimit\fR to the \fIsoft\fR limit provides a \fIhard\fR
+limit, at which point the job is cancelled.
+This is particularly useful for backfill scheduling, which bases upon
+each job's soft time limit.
+The default value is zero.
+Man not exceed exceed 65533 minutes.
+A value of "UNLIMITED" is also supported.
+
 .TP
 \fBPluginDir\fR
 Identifies the places in which to look for SLURM plugins. 
@@ -616,6 +711,113 @@ part of a user's job step.  Default location is "plugstack.conf"
 in the same directory as the system slurm.conf. For more information
 on SPANK plugins, see the \fBspank\fR(8) manual.
 
+.TP
+\fBPriorityDecayHalfLife\fR
+This controls how long prior resource use is considered in determining
+how over\- or under\-serviced an association is (user, bank account and 
+cluster) in determining job priority.  If set to 0 no decay will be applied.  
+This is helpful if you want to enforce hard time limits per association.  If 
+set to 0 PriorityUsageResetPeriod must be set to some interval.
+Applicable only if PriorityType=priority/multifactor.
+The unit is a time string (i.e. min, hr:min:00, days\-hr:min:00, 
+or days\-hr).  The default value is 7\-0 (7 days).
+
+.TP
+\fBPriorityFavorSmall\fR
+Specifies that small jobs should be given preferencial scheduling priority.
+Applicable only if PriorityType=priority/multifactor.
+Supported values are "YES" and "NO". 
+Applicable only if PriorityType=priority/multifactor.
+The default value is "NO".
+
+.TP
+\fBPriorityMaxAge\fR
+Specifies the job age which will be given the maximum age factor in computing
+priority. For example, a value of 30 minutes would result in all jobs over
+30 minutes old would get the same age\-based priority.
+Applicable only if PriorityType=priority/multifactor.
+The unit is a time string (i.e. min, hr:min:00, days\-hr:min:00, 
+or days\-hr).  The default value is 7\-0 (7 days).
+
+.TP
+\fBPriorityUsageResetPeriod\fR
+At this interval the usage of associations will be reset to 0.  This is used 
+if you want to enforce hard limits of time usage per association.  If 
+PriorityDecayHalfLife is set to be 0 no decay will happen and this is the 
+only way to reset the usage accumulated by running jobs.  By default this is 
+turned off and it is advised to use the PriorityDecayHalfLife option to avoid
+not having anything running on your cluster, but if your schema is set up to 
+only allow certain amounts of time on your system this is the way to do it.
+Applicable only if PriorityType=priority/multifactor.
+.RS
+.TP 12
+\fBNONE\fR
+Never clear historic usage. The default value.
+.TP
+\fBNOW\fR
+Clear the historic usage now. 
+Executed at startup and reconfiguration time.
+.TP
+\fBDAILY\fR
+Cleared every day at midnight.
+.TP
+\fBWEEKLY\fR
+Cleared every week on Sunday at time 00:00.
+.TP
+\fBMONTHLY\fR
+Cleared on the first day of each month at time 00:00.
+.TP
+\fBQUARTERLY\fR
+Cleared on the first day of each quarter at time 00:00.
+.TP
+\fBYEARLY\fR
+Cleared on the first day of each year at time 00:00.
+.RE
+
+.TP
+\fBPriorityType\fR
+This specifies the plugin to be used in establishing a job's scheduling
+priority. Supported values are "priority/basic" (jobs are prioritized
+by order of arrival, also suitable for sched/wiki and sched/wiki2) and
+"priority/multifactor" (jobs are prioritized based upon size, age, 
+fair\-share of allocation, etc). 
+The default value is "priority/basic".
+
+.TP
+\fBPriorityWeightAge\fR
+An integer value that sets the degree to which the queue wait time
+component contributes to the job's priority.
+Applicable only if PriorityType=priority/multifactor.
+The default value is 0.
+
+.TP
+\fBPriorityWeightFairshare\fR
+An integer value that sets the degree to which the fair-share
+component contributes to the job's priority.
+Applicable only if PriorityType=priority/multifactor.
+The default value is 0.
+
+.TP
+\fBPriorityWeightJobSize\fR
+An integer value that sets the degree to which the job size
+component contributes to the job's priority.
+Applicable only if PriorityType=priority/multifactor.
+The default value is 0.
+
+.TP
+\fBPriorityWeightPartition\fR
+An integer value that sets the degree to which the node partition
+component contributes to the job's priority.
+Applicable only if PriorityType=priority/multifactor.
+The default value is 0.
+
+.TP
+\fBPriorityWeightQOS\fR
+An integer value that sets the degree to which the Quality Of Service
+component contributes to the job's priority.
+Applicable only if PriorityType=priority/multifactor.
+The default value is 0.
+
 .TP
 \fBPrivateData\fR
 This controls what type of information is hidden from regular users.
@@ -641,6 +843,12 @@ prevents users from viewing node state information.
 \fBpartitions\fR 
 prevents users from viewing partition state information.
 .TP
+\fBreservations\fR 
+prevents regular users from viewing reservations.
+.TP
+\fBusage\fR 
+(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing 
+usage of any other user.  This applys to sreport.
 \fBusers\fR 
 (NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing 
 information of any user other than themselves, this also makes it so users can 
@@ -648,9 +856,6 @@ only see associations they deal with.
 Coordinators can see associations of all users they are coordinator of, 
 but can only see themselves when listing users.
 .TP
-\fBusage\fR 
-(NON-SLURMDBD ACCOUNTING ONLY) prevents users from viewing 
-usage of any other user.  This applys to sreport.
 .RE
 
 
@@ -690,19 +895,24 @@ default for all other systems
 
 .TP
 \fBProlog\fR
-Fully qualified pathname of a script for the slurmd to execute whenever
-it is asked to run a job step from a new job allocation.  (e.g.
+Fully qualified pathname of a program for the slurmd to execute 
+whenever it is asked to run a job step from a new job allocation (e.g.
 "/usr/local/slurm/prolog").  The slurmd executes the script before starting
-the job step.  This may be used to purge files, enable user login, etc.
+the first job step.  This may be used to purge files, enable user login, etc.
 By default there is no prolog. Any configured script is expected to 
 complete execution quickly (in less time than \fBMessageTimeout\fR).
+See \fBProlog and Epilog Scripts\fR for more information.
 
-NOTE:  The Prolog script is ONLY run on any individual
-node when it first sees a job step from a new allocation; it does not
-run the Prolog immediately when an allocation is granted.  If no job steps
-from an allocation are run on a node, it will never run the Prolog for that
-allocation.  The Epilog, on the other hand, always runs on every node of an
-allocation when the allocation is released.
+.TP
+\fBPrologSlurmctld\fR
+Fully qualified pathname of a program for the slurmctld to execute 
+before granting a new job allocation (e.g.
+"/usr/local/slurm/prolog_controller"). 
+The program executes as SlurmUser, which gives it permission to drain 
+nodes and requeue the job if a failure occurs or cancel the job if appropriate.
+The program can be used to reboot nodes or perform other work to prepare 
+resources for use. 
+See \fBProlog and Epilog Scripts\fR for more information.
 
 .TP
 \fBPropagatePrioProcess\fR
@@ -728,7 +938,7 @@ the default action is to propagate all limits.
 Only one of the parameters, either
 \fBPropagateResourceLimits\fR or \fBPropagateResourceLimitsExcept\fR,
 may be specified.
-The following limit names are supported by Slurm (although some 
+The following limit names are supported by SLURM (although some 
 options may not be supported on some systems):
 .RS
 .TP 10
@@ -782,17 +992,22 @@ See \fBPropagateResourceLimits\fR above for a list of valid limit names.
 \fBResumeProgram\fR
 SLURM supports a mechanism to reduce power consumption on nodes that 
 remain idle for an extended period of time. 
-This is typically accomplished by reducing voltage and frequency. 
+This is typically accomplished by reducing voltage and frequency or powering
+the node down. 
 \fBResumeProgram\fR is the program that will be executed when a node 
 in power save mode is assigned work to perform.
+For reasons of reliability, \fBResumeProgram\fR may execute more than once
+for a node when the \fBslurmctld\fR daemon crashes and is restarted.
+If \fBResumeProgram\fR is unable to restore a node to service, it should
+requeue any node associated with the node and set the node state to DRAIN.
 The program executes as \fBSlurmUser\fR.
 The argument to the program will be the names of nodes to
 be removed from power savings mode (using SLURM's hostlist
 expression format).
 By default no program is run.
-Related configuration options include \fBResumeRate\fR, \fBSuspendRate\fR,
-\fBSuspendTime\fR, \fBSuspendProgram\fR, \fBSuspendExcNodes\fR, and
-\fBSuspendExcParts\fR.
+Related configuration options include \fBResumeTimeout\fR, \fBResumeRate\fR, 
+\fBSuspendRate\fR, \fBSuspendTime\fR, \fBResumeTimeout\fR, \fBSuspendProgram\fR, 
+\fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
 More information is available at the SLURM web site
 (https://computing.llnl.gov/linux/slurm/power_save.html).
 
@@ -803,11 +1018,35 @@ operation by \fBResumeProgram\fR.
 The value is number of nodes per minute and it can be used to prevent 
 power surges if a large number of nodes in power save mode are 
 assigned work at the same time (e.g. a large job starts).
-A value of zero results in no limits being imposed.
-The default value is 60 nodes per minute.
-Related configuration options include \fBResumeProgram\fR, \fBSuspendRate\fR,
-\fBSuspendTime\fR, \fBSuspendProgram\fR, \fBSuspendExcNodes\fR, and
-\fBSuspendExcParts\fR.
+A value of zero results in no limits being imposed. 
+The default value is 300 nodes per minute.
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
+\fBSuspendRate\fR, \fBSuspendTime\fR, \fBResumeTimeout\fR, \fBSuspendProgram\fR, 
+\fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
+
+.TP
+\fBResumeTimeout\fR
+Maximum time permitted (in second) between when a node is resume request 
+is issued and when the node is actually available for use. 
+Nodes which fail to respond in this time frame may be marked DOWN and
+the jobs scheduled on the node requeued.
+The default value is 60 seconds.
+Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR, 
+\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, \fBSuspendProgram\fR, 
+\fBSuspendExcNodes\fR and \fBSuspendExcParts\fR.
+More information is available at the SLURM web site
+(https://computing.llnl.gov/linux/slurm/power_save.html).
+
+.TP
+\fBResvOverRun\fR
+Describes how long a job already running in a reservation should be 
+permitted to execute after the end time of the reservation has been 
+reached. 
+The time period is specified in minutes and the default value is 0
+(kill the job immediately).
+The value may not exceed 65533 minutes, although a value of "UNLIMITED"
+is supported to permit a job to run indefinitely after its reservation
+is terminated.
 
 .TP
 \fBReturnToService\fR
@@ -855,6 +1094,15 @@ and
 
 would run \fBxterm\fR with the title set to the SLURM jobid.
 
+.TP
+\fBSchedulerParameters\fR
+The interprettation of this parameter varies by \fBSchedulerType\fR.
+In the case of \fBSchedulerType=sched/backfill\fR, there is one 
+optional argument of the form "interval=#", where "#" is number of
+seconds between iterations. Higher values result in less overhead 
+and responsivenss, The default value is 5 secondson BlueGene systems 
+and 10 seconds otherwise.
+
 .TP
 \fBSchedulerPort\fR
 The port number on which slurmctld should listen for connection requests.
@@ -873,7 +1121,7 @@ scheduling module "sched/backfill" (see \fBSchedulerType\fR).
 .TP
 \fBSchedulerTimeSlice\fR
 Number of seconds in each time slice when \fBSchedulerType=sched/gang\fR.
-The default value is 30.
+The default value is 30 seconds.
 
 .TP
 \fBSchedulerType\fR
@@ -966,16 +1214,32 @@ Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
 .TP
 \fBCR_Core\fR
 Cores are consumable resources.
+On nodes with hyper\-threads, each thread is counted as a CPU to 
+satisfy a job's resource requirement, but multiple jobs are not 
+allocated threads on the same core.
 .TP
 \fBCR_Core_Memory\fR
 Cores and memory are consumable resources.
+On nodes with hyper\-threads, each thread is counted as a CPU to 
+satisfy a job's resource requirement, but multiple jobs are not 
+allocated threads on the same core.
 Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
 .TP
 \fBCR_Socket\fR
-Sockets are consumable resources.
+Sockets are consumable resources. 
+On nodes with multiple cores, each core or thread is counted as a CPU 
+to satisfy a job's resource requirement, but multiple jobs are not 
+allocated resources on the same socket.
+Note that jobs requesting one CPU will only be given access to 
+that one CPU, but no other job will share the socket.
 .TP
 \fBCR_Socket_Memory\fR
-Memory and CPUs are consumable resources.
+Memory and sockets are consumable resources. 
+On nodes with multiple cores, each core or thread is counted as a CPU 
+to satisfy a job's resource requirement, but multiple jobs are not
+allocated resources on the same socket.
+Note that jobs requesting one CPU will only be given access to 
+that one CPU, but no other job will share the socket.
 Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
 .TP
 \fBCR_Memory\fR
@@ -992,6 +1256,13 @@ This user must exist on all nodes of the cluster for authentication
 of communications between SLURM components.
 The default value is "root". 
 
+.TP
+\fBSlurmdUser\fR
+The name of the user that the \fBslurmd\fR daemon executes as. 
+This user must exist on all nodes of the cluster for authentication 
+of communications between SLURM components.
+The default value is "root". 
+
 .TP
 \fBSlurmctldDebug\fR
 The level of detail to provide \fBslurmctld\fR daemon's logs. 
@@ -1091,6 +1362,21 @@ completion of a job step.  The command line arguments for the executable will
 be the command and arguments of the job step.  This configuration parameter
 may be overridden by srun's \fB\-\-epilog\fR parameter.
 
+.TP
+\fBSrunIOTimeout\fR
+While the \fBsrun\fR detects the termination of tasks under almost all 
+circumstances, there are abnormal deamon failures which may not be 
+detected immediately. 
+Such abnormal failures can be detected by \fBsrun\fR using a more active
+polling mechanism. 
+Note that polling does have an impact upon application performance.
+The interval of polling is specified by the \fBSrunIOTimeout\fR 
+parameter and its units are seconds. 
+\fBsrun\fR's \fB\-\-io\-timeout\fR option takes precedence over
+this configuration parameter.
+The default value is 0 (no polling).
+The value may not exceed 65533 seconds.
+
 .TP
 \fBSrunProlog\fR
 Fully qualified pathname of an executable to be run by srun prior to the
@@ -1118,9 +1404,9 @@ Specifies the nodes which are to not be placed in power save mode, even
 if the node remains idle for an extended period of time.
 Use SLURM's hostlist expression to identify nodes.
 By default no nodes are excluded.
-Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
-\fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR and
-\fBSuspendExcParts\fR.
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
+\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, 
+\fBResumeTimeout\fR, and \fBSuspendExcParts\fR.
 
 .TP
 \fBSuspendExcParts\fR
@@ -1128,23 +1414,25 @@ Specifies the partitions whose nodes are to not be placed in power save
 mode, even if the node remains idle for an extended period of time.
 Multiple partitions can be identified and separated by commas.
 By default no nodes are excluded.
-Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
-\fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR and
-\fBSuspendExcNodes\fR.
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
+\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTime\fR 
+\fBResumeTimeout\fR, and \fBSuspendExcNodes\fR.
 
 .TP
 \fBSuspendProgram\fR
 \fBSuspendProgram\fR is the program that will be executed when a node
 remains idle for an extended period of time.
 This program is expected to place the node into some power save mode.
+This can be used to reduce the frequency and voltage of a node or 
+completely power the node off.
 The program executes as \fBSlurmUser\fR.
 The argument to the program will be the names of nodes to
 be placed into power savings mode (using SLURM's hostlist
 expression format).
-By default no program is run.
-Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
-\fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendExcNodes\fR, and
-\fBSuspendExcParts\fR.
+By default, no program is run.
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
+\fBResumeRate\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBResumeTimeout\fR, 
+\fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
 
 .TP
 \fBSuspendRate\fR
@@ -1153,18 +1441,31 @@ The value is number of nodes per minute and it can be used to prevent
 a large drop in power power consumption (e.g. after a large job completes).
 A value of zero results in no limits being imposed.
 The default value is 60 nodes per minute.
-Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
-\fBSuspendProgram\fR, \fBSuspendTime\fR, \fBSuspendExcNodes\fR, and
-\fBSuspendExcParts\fR.
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
+\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendTime\fR, \fBSuspendTimeout\fR, 
+\fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
 
 .TP
 \fBSuspendTime\fR
 Nodes which remain idle for this number of seconds will be placed into 
-power save mode by \fBSuspendProgram\fR,
+power save mode by \fBSuspendProgram\fR. 
 A value of \-1 disables power save mode and is the default.
-Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR,
-\fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendExcNodes\fR, and
-\fBSuspendExcParts\fR.
+Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR, 
+\fBResumeRate\fR, \fBSuspendProgram\fR, \fBSuspendRate\fR, \fBSuspendTimeout\fR, 
+\fBSuspendExcNodes\fR, and \fBSuspendExcParts\fR.
+
+.TP
+\fBSuspendTimeout\fR
+Maximum time permitted (in second) between when a node suspend request 
+is issued and when the node shutdown.
+At that time the node must ready for a resume request to be issued 
+as needed for new work. 
+The default value is 30 seconds.
+Related configuration options include \fBResumeProgram\fR, \fBResumeRate\fR, 
+\fBResumeTimeout\fR, \fBSuspendRate\fR, \fBSuspendTime\fR, \fBSuspendProgram\fR, 
+\fBSuspendExcNodes\fR and \fBSuspendExcParts\fR.
+More information is available at the SLURM web site
+(https://computing.llnl.gov/linux/slurm/power_save.html).
 
 .TP
 \fBSwitchType\fR
@@ -1206,15 +1507,46 @@ which is supported by SLURM.
 .TP
 \fBTaskPluginParam\fR
 Optional parameters for the task plugin.
+Multiple options should be comma separated
+If \fBNone\fR, \fBSockets\fR, \fBCores\fR, \fBThreads\fR,
+and/or \fBVerbose\fR are specified, they will override
+the \fB\-\-cpu_bind\fR option specified by the user
+in the \fBsrun\fR command.
+\fBNone\fR, \fBSockets\fR, \fBCores\fR and \fBThreads\fR are mutually 
+exclusive and since they decrease scheduling flexibility are not generally 
+recommended (select no more than one of them).
+\fBCpusets\fR and \fBSched\fR
+are mutually exclusive (select only one of them).
+
 .RS
 .TP 10
+\fBCores\fR
+Always bind to cores. 
+Overrides user options or automatic binding.
+.TP
 \fBCpusets\fR
-Use cpusets to perform task affinity functions
+Use cpusets to perform task affinity functions.
+By default, \fBSched\fR task binding is performed.
+.TP
+\fBNone\fR
+Perform no task binding. 
+Overrides user options or automatic binding.
 .TP
 \fBSched\fR
 Use \fIsched_setaffinity\fR or \fIplpa_sched_setaffinity\fR
 (if available) to bind tasks to processors.
-This is the default mode of operation is no parameters are specified.
+.TP
+\fBSockets\fR
+Always bind to sockets. 
+Overrides user options or automatic binding.
+.TP
+\fBThreads\fR
+Always bind to threads. 
+Overrides user options or automatic binding.
+.TP
+\fBVerbose\fR
+Verbosely report binding before tasks run. 
+Overrides user options.
 .RE
 
 .TP
@@ -1258,6 +1590,21 @@ temporary storage. This parameter is used in establishing a node's \fBTmpDisk\fR
 space. 
 The default value is "/tmp".
 
+.TP
+\fBTopologyPlugin\fR
+Identifies the plugin to be used for determining the network topology
+and optimizing job allocations to minimize network contention. 
+Acceptable values include 
+"topology/3d_torus" (default for Cray XT, IBM BlueGene and Sun Constellation 
+systems, best\-fit logic over three\-dimensional topology)
+"topology/none" (default for other systems, best\-fit
+logic over one\-dimensional topology) and 
+"topology/tree" (determine the network topology based
+upon information contained in a topology.conf file).
+See \fBNETWORK TOPOLOGY\fR below for details.
+Additional plugins may be provided in the future which gather topology
+information directly from the network.
+
 .TP
 \fBTrackWCKey\fR
 Boolean yes or no.  Used to set display and track of the Workload  
@@ -1324,10 +1671,10 @@ Specifies how many seconds the srun command should by default wait after
 the first task terminates before terminating all remaining tasks. The 
 "\-\-wait" option on the srun command line overrides this value. 
 If set to 0, this feature is disabled.
-May not exceed 65533.
+May not exceed 65533 seconds.
 
 .LP
-The configuration of nodes (or machines) to be managed by Slurm is 
+The configuration of nodes (or machines) to be managed by SLURM is 
 also specified in \fB/etc/slurm.conf\fR. 
 Changes in node configuration (e.g. adding nodes, changing their
 processor count, etc.) require restarting the slurmctld daemon.
@@ -1474,7 +1821,7 @@ By default a node has no features.
 .TP
 \fBProcs\fR
 Number of logical processors on the node (e.g. "2").
-If Procs is omitted, it will be inferred from
+If \fBProcs\fR is omitted, it will set equal to the product of
 \fBSockets\fR, \fBCoresPerSocket\fR, and \fBThreadsPerCore\fR.
 The default value is 1. 
 
@@ -1519,6 +1866,12 @@ Also see the \fBDownNodes\fR parameter below.
 .TP
 \fBThreadsPerCore\fR
 Number of logical threads in a single physical core (e.g. "2").
+Note that the SLURM can allocate resources to jobs down to the
+resolution of a core. If your system is configured with more than
+one thread per core, execution of a different job on each thread 
+is not supported. 
+A job can execute a one task per thread from within one job step or
+execute a distinct job step on each of the threads.
 The default value is 1.
 
 .TP
@@ -1558,6 +1911,7 @@ scheduling overhead), give each node a distinct \fBWeight\fR
 value and they will be added to the pool of nodes being 
 considered for scheduling individually.
 The default value is 1.
+
 .LP
 The "DownNodes=" configuration permits you to mark certain nodes as in a 
 DOWN, DRAIN, FAIL, or FAILING state without altering the permanent 
@@ -1586,10 +1940,18 @@ to any new jobs.
 "FAILING" indicates the node is expected to fail soon, has
 one or more jobs allocated to it, but will not be allocated
 to any new jobs.
+"FUTURE" indicates the node is defined for future use and need not 
+exist when the SLURM daemons are started. These nodes can be made available 
+for use simply by updating the node state using the scontrol command rather 
+than restarting the slurmctld daemon. After these nodes are made available, 
+change their \fRState\fR in the slurm.conf file. Until these nodes are made 
+available, they will not be seen using any SLURM commands or APIs nor will 
+any attempt be made to contact them. 
 "UNKNOWN" indicates the node's state is undefined (BUSY or IDLE), 
 but will be established when the \fBslurmd\fR daemon on that node 
 registers.
 The default value is "UNKNOWN".
+
 .LP
 The partition configuration permits you to establish different job 
 limits or access controls for various groups (or partitions) of nodes. 
@@ -1611,6 +1973,14 @@ Each line of partition configuration information should
 represent a different partition.
 The partition configuration file contains the following information:
 
+.TP
+\fBAllocNodes\fR
+Comma separated list of nodes from which users can execute jobs in the
+partition.
+Node names may be specified using the node range expression syntax
+described above.
+The default value is "ALL".
+
 .TP
 \fBAllowGroups\fR
 Comma separated list of group IDs which may execute jobs in the partition. 
@@ -1663,6 +2033,12 @@ Time resolution is one minute and second values are rounded up to
 the next minute.
 This limit does not apply to jobs executed by SlurmUser or user root.
 
+.TP
+\fBDefaultTime\fR
+Run time limit used for jobs that don't specify a value. If not set
+then MaxTime will be used.
+Format is the same as for MaxTime.
+
 .TP
 \fBMinNodes\fR
 Minimum count of nodes (or base partitions for BlueGene systems) which 
@@ -1772,6 +2148,103 @@ Recommended only for systems running with gang scheduling
 State of partition or availability for use.  Possible values 
 are "UP" or "DOWN". The default value is "UP".
 
+.SH "Prolog and Epilog Scripts"
+There are a variety of prolog and epilog program options that 
+execute with various permissions and at various times. 
+The four options most likely to be used are: 
+\fBProlog\fR and \fBEpilog\fR (executed once on each compute node
+for each job) plus \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR
+(executed once on the \fBControlMachine\fR for each job).
+ 
+NOTE:  The Prolog script is ONLY run on any individual
+node when it first sees a job step from a new allocation; it does not
+run the Prolog immediately when an allocation is granted.  If no job steps
+from an allocation are run on a node, it will never run the Prolog for that
+allocation.  The Epilog, on the other hand, always runs on every node of an
+allocation when the allocation is released.
+
+Information about the job is passed to the script using environment
+variables.
+Unless otherwise specified, these environment variables are available
+to all of the programs.
+.TP
+\fBBASIL_RESERVATION_ID\fR
+Basil reservation ID.
+Available on Cray XT systems only.
+.TP
+\fBMPIRUN_PARTITION\fR
+BlueGene partition name.
+Available on BlueGene systems only.
+.TP
+\fBSLURM_JOB_ACCOUNT\fR
+Account name used for the job. 
+Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
+.TP
+\fBSLURM_JOB_CONSTRAINTS\fR
+Features required to run the job. 
+Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
+.TP
+\fBSLURM_JOB_GID\fR
+Group ID of the job's owner.
+Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
+.TP
+\fBSLURM_JOB_GROUP\fR
+Group name of the job's owner.
+Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
+.TP
+\fBSLURM_JOB_ID\fR
+Job ID.
+.TP
+\fBSLURM_JOB_NAME\fR
+Name of the job.
+Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
+.TP
+\fBSLURM_JOB_NODELIST\fR
+Nodes assigned to job. A SLURM hostlist expression.
+"scontrol show hostnames" can be used to convert this to a
+list of individual host names.
+Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
+.TP
+\fBSLURM_JOB_PARTITION\fR
+Partition that job runs in.
+Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
+.TP
+\fBSLURM_JOB_UID\fR
+User ID of the job's owner.
+.TP
+\fBSLURM_JOB_USER\fR
+User name of the job's owner.
+
+.SH "NETWORK TOPOLOGY"
+SLURM is able to optimze job allocations to minimize network contention.
+Special SLURM logic is used to optimize allocations on systems with a 
+three\-dimensional interconnect (BlueGene, Sun Constellation, etc.)
+and information about configuring those systems are availble on 
+web pages available here: <https://computing.llnl.gov/linux/slurm/>.
+For a hierarchical network, SLURM needs to have detailed information 
+about how nodes are configured on the network switches.
+.LP
+Given network topology information, SLURM allocates all of a job's 
+resources onto a single leaf of the network (if possible) using a best\-fit 
+algorithm.
+Otherwise it will allocate a job's resources onto multiple leaf switches
+so as to minimize the use of higher\-level switches. 
+The \fBTopologyPlugin\fR parameter controls which plugin is used to
+collect network topology information. 
+The only values presently supported are 
+"topology/3d_torus" (default for IBM BlueGene, Sun Constellation and 
+Cray XT systems, performs best\-fit logic over three\-dimensional topology),
+"topology/none" (default for other systems, 
+best\-fit logic over one\-dimensional topology),
+"topology/tree" (determine the network topology based
+upon information contained in a topology.conf file,
+see "man topology.conf" for more information).
+Future plugins may gather topology information directly from the network.
+The topology information is optional. 
+If not provided, SLURM will perform a best\-fit algorithm assuming the
+nodes are in a one\-dimensional array as configured and the communications 
+cost is related to the node distance in this array.
+
 .SH "RELOCATING CONTROLLERS"
 If the cluster's computers used for the primary or backup controller 
 will be out of service for an extended period of time, it may be 
@@ -1902,9 +2375,9 @@ PartitionName=long Nodes=dev[9\-17] MaxTime=120 AllowGroups=admin
 
 .SH "COPYING"
 Copyright (C) 2002\-2007 The Regents of the University of California.
-Copyright (C) 2008 Lawrence Livermore National Security.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
@@ -1918,12 +2391,14 @@ SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
 FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
 details.
+
 .SH "FILES"
 /etc/slurm.conf
+
 .SH "SEE ALSO"
 .LP
 \fBbluegene.conf\fR(5), \fBgethostbyname\fR(3), 
 \fBgetrlimit\fR(2), \fBgroup\fR(5), \fBhostname\fR(1), 
 \fBscontrol\fR(1), \fBslurmctld\fR(8), \fBslurmd\fR(8), 
-\fBslurmdbd\fR(8), \fBslurmdbd.conf\fR(5), \fBspank(8)\fR,
-\fBsyslog\fR(2), \fBwiki.conf\fR(5)
+\fBslurmdbd\fR(8), \fBslurmdbd.conf\fR(5), \fBsrun(1)\fR, 
+\fBspank(8)\fR, \fBsyslog\fR(2), \fBtopology.conf\fR(5), \fBwiki.conf\fR(5)
diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5
index c94a518de3452164411397195a50d781916094b3..ec35744becfe5eb1132cb4bf9cdd4732392afb24 100644
--- a/doc/man/man5/slurmdbd.conf.5
+++ b/doc/man/man5/slurmdbd.conf.5
@@ -1,4 +1,4 @@
-.TH "slurmdbd.conf" "5" "Feb 2009" "slurmdbd.conf 1.3" "Slurm configuration file"
+.TH "slurmdbd.conf" "5" "February 2009" "slurmdbd.conf 2.0" "Slurm configuration file"
 .SH "NAME"
 slurmdbd.conf \- Slurm Database Daemon (SlurmDBD) configuration file 
 
@@ -27,6 +27,10 @@ If ArchiveScript is not set the slurmdbd will generate a text file that can be
 read in anytime with sacctmgr load filename.  This directory is where the 
 file will be placed archive has ran.  Default is /tmp.
 
+.TP
+\fBArchiveEvents\fR
+Boolean, yes to archive event data, no other wise.  Default is no.
+
 .TP
 \fBArchiveJobs\fR
 Boolean, yes to archive job data, no other wise.  Default is no.
@@ -38,23 +42,40 @@ records out of the database into an archive. The script is executed
 with a no arguments, The following environment variables are set.
 .RS
 .TP
-\fBSLURM_ARCHIVE_STEPS\fR 
-1 for archive steps 0 otherwise.
+\fBSLURM_ARCHIVE_EVENTS\fR 
+1 for archive events 0 otherwise.
 .TP
-\fBSLURM_ARCHIVE_LAST_STEP\fR
-Time of last step start to archive.
+\fBSLURM_ARCHIVE_LAST_EVENT\fR
+Time of last event start to archive.
 .TP
 \fBSLURM_ARCHIVE_JOBS\fR
 1 for achive jobs 0 otherwise.
 .TP
 \fBSLURM_ARCHIVE_LAST_JOB\fR
 Time of last job submit to archive.
+.TP
+\fBSLURM_ARCHIVE_STEPS\fR 
+1 for archive steps 0 otherwise.
+.TP
+\fBSLURM_ARCHIVE_LAST_STEP\fR
+Time of last step start to archive.
+.TP
+\fBSLURM_ARCHIVE_SUSPEND\fR 
+1 for archive suspend data 0 otherwise.
+.TP
+\fBSLURM_ARCHIVE_LAST_SUSPEND\fR
+Time of last suspend start to archive.
+.TP
 .RE
 
 .TP
 \fBArchiveSteps\fR
 Boolean, yes to archive step data, no other wise.  Default is no.
 
+.TP
+\fBArchiveSuspend\fR
+Boolean, yes to archive suspend data, no other wise.  Default is no.
+
 .TP
 \fBAuthInfo\fR
 Additional information to be used for authentication of communications 
@@ -86,22 +107,20 @@ SlurmDbd must be terminated prior to changing the value of \fBAuthType\fR
 and later restarted.
 
 .TP
-\fBDbdAddr\fR
-Name that \fBDbdHost\fR should be referred to in 
-establishing a communications path to the Slurm Database Daemon. 
-This name will be used as an argument to the gethostbyname() 
-function for identification. For example, "elx0000" might be used 
-to designate the ethernet address for node "lx0000". 
-By default the \fBDbdAddr\fR will be identical in value to 
-\fBDbdHost\fR.
-This value must be equal to the \fBSlurmDbdAddr\fR parameter in 
-the slurm.conf file.
+\fBDbdBackupHost\fR
+The name of the machine where the backup Slurm Database Daemon is executed. 
+This host must have access to the same underlying database specified by the 
+'Storage' options mentioned below.
+This should be a node name without the full domain name.  I.e., the hostname
+returned by the \fIgethostname()\fR function cut at the first dot (e.g. use 
+"tux001" rather than "tux001.my.com").
 
 .TP
 \fBDbdHost\fR
 The name of the machine where the Slurm Database Daemon is executed. 
-This should be a node name without the full domain name (e.g. "lx0001"). 
-This value must be specified.
+This should be a node name without the full domain name.  I.e., the hostname
+returned by the \fIgethostname()\fR function cut at the first dot (e.g. use 
+"tux001" rather than "tux001.my.com").  This value must be specified.
 
 .TP
 \fBDbdPort\fR
@@ -123,13 +142,6 @@ The default value is 3.
 When adding a new cluster this will be used as the qos for the cluster 
 unless something is explicitly set by the admin with the create.
  
-.TP
-\fBJobPurge\fR
-Individual job records over this age are purged from the database.
-Aggregated information will be preserved indefinitely.
-The time is a numeric value and is a number of months.
-If zero (default), then job records are never purged.
-
 .TP
 \fBLogFile\fR
 Fully qualified pathname of a file into which the Slurm Database Daemon's 
@@ -173,39 +185,71 @@ prevents users from viewing job records belonging
 to other users unless they are coordinators of the association running the job
 when using sacct.
 .TP
+\fBreservations\fR 
+restricts getting reservation information to users with operator status 
+and above.
+.TP
+\fBusage\fR  
+prevents users from viewing usage of any other user.  
+This applys to sreport.
+.TP
 \fBusers\fR  
 prevents users from viewing information of any user 
 other than themselves, this also makes it so users can only see 
 associations they deal with.  
 Coordinators can see associations of all users they are coordinator of, 
 but can only see themselves when listing users.
-.TP
-\fBusage\fR  
-prevents users from viewing usage of any other user.  
-This applys to sreport.
 .RE
 
 .TP
-\fBSlurmUser\fR
-The name of the user that the \fBslurmctld\fR daemon executes as. 
-This user must exist on the machine executing the Slurm Database Daemon
-and have the same user ID as the hosts on which \fBslurmctld\fR execute.
-For security purposes, a user other than "root" is recommended.
-The default value is "root". 
+\fBPurgeEventMonths\fR
+Events happening on the cluster over this age are purged from the database.
+This includeds node down times and such.
+The time is a numeric value and is a number of months.
+If zero (default), then job step records are never purged.
 
 .TP
-\fBStepPurge\fR
+\fBPurgeJobMonths\fR
+Individual job records over this age are purged from the database.
+Aggregated information will be preserved indefinitely.
+The time is a numeric value and is a number of months.
+If zero (default), then job records are never purged.
+
+.TP
+\fBPurgeStepMonths\fR
 Individual job step records over this age are purged from the database.
 Aggregated information will be preserved indefinitely.
 The time is a numeric value and is a number of months.
 If zero (default), then job step records are never purged.
 
+.TP
+\fBPurgeSuspendMonths\fR
+Records of individual suspend times for jobs over this age are purged from the 
+database.
+Aggregated information will be preserved indefinitely.
+The time is a numeric value and is a number of months.
+If zero (default), then job step records are never purged.
+
+.TP
+\fBSlurmUser\fR
+The name of the user that the \fBslurmctld\fR daemon executes as. 
+This user must exist on the machine executing the Slurm Database Daemon
+and have the same user ID as the hosts on which \fBslurmctld\fR execute.
+For security purposes, a user other than "root" is recommended.
+The default value is "root". 
+
 .TP
 \fBStorageHost\fR
 Define the name of the host the database is running where we are going
 to store the data.
 Ideally this should be the host on which slurmdbd executes.
 
+.TP
+\fBStorageBackupHost\fR
+Define the name of the backup host the database is running where we are going
+to store the data.
+Default is none.
+
 .TP
 \fBStorageLoc\fR
 Specify the name of the database as the location where accounting 
@@ -260,10 +304,14 @@ Characterization Key. Must be set to track wckey usage.
 .br
 #
 .br
+ArchiveEvents=yes
+.br
 ArchiveJobs=yes
 .br
 ArchiveSteps=no
 .br
+ArchiveSuspend=no
+.br
 #ArchiveScript=/usr/sbin/slurm.dbd.archive
 .br
 AuthInfo=/var/run/munge/munge.socket.2
@@ -274,9 +322,13 @@ DbdHost=db_host
 .br
 DebugLevel=4
 .br
-JobPurge=12
+PurgeEventMonths=1
+.br
+PurgeJobMonths=12
+.br
+PurgeStepMonths=1
 .br
-StepPurge=1
+PurgeSuspendMonths=1
 .br
 LogFile=/var/log/slurmdbd.log
 .br
@@ -293,7 +345,7 @@ StorageUser=database_mgr
 .SH "COPYING"
 Copyright (C) 2008 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man5/topology.conf.5 b/doc/man/man5/topology.conf.5
new file mode 100644
index 0000000000000000000000000000000000000000..e14eb927b6d82be4e1c1100987537ca226723009
--- /dev/null
+++ b/doc/man/man5/topology.conf.5
@@ -0,0 +1,88 @@
+.TH "topology.conf" "5" "March 2009" "topology.conf 2.0" "Slurm configuration file"
+
+.SH "NAME"
+topology.conf \- Slurm configuration file for defining the network topology
+
+.SH "DESCRIPTION"
+\fB/etc/topology.conf\fP is an ASCII file which describes the
+cluster's network topology for optimized job resource allocation. 
+The file location can be modified at system build time using the
+DEFAULT_SLURM_CONF parameter. The file will always be located in the 
+same directory as the \fBslurm.conf\fP file. 
+.LP
+Paramter names are case insensitive.
+Any text following a "#" in the configuration file is treated 
+as a comment through the end of that line. 
+The size of each line in the file is limited to 1024 characters.
+Changes to the configuration file take effect upon restart of 
+SLURM daemons, daemon receipt of the SIGHUP signal, or execution 
+of the command "scontrol reconfigure" unless otherwise noted.
+.LP
+The network topology configuration one one line defining a switch name and
+its children, either node names or switch names.
+SLURM's hostlist expression parser is used, so the node and switch
+names need not be consecutive (e.g. "Nodes=tux[0\-3,12,18\-20]"
+and "Swithces=s[0\-2,4\-8,12]" will parse fine).
+An optional link speed may also be specified.
+.LP
+The overall configuration parameters available include:
+
+.TP
+\fBSwitchName\fR
+The name of a switch. This name is internal to SLURM and arbitrary.
+Each switch should have a unique name.
+This field must be specified.
+.TP
+\fBSwitches\fR
+Child switches of the named switch.
+Either this option or the \fBNodes\fR option must be specified.
+.TP
+\fBNodes\fR
+Child Nodes of the named leaf switch.
+Either this option or the \fBSwitches\fR option must be specified.
+.TP
+\fBLinkSpeed\fR
+An optional value specifying the performance of this communication link.
+The units used are arbitrary and this information is currently not used.
+It may be used in the future to optimize resource allocations.
+
+.SH "EXAMPLE"
+.LP 
+.br
+##################################################################
+.br
+# SLURM's network topology configuration file for use with the
+.br
+# topology/tree plugin
+.br
+##################################################################
+.br
+SwitchName=s0 Nodes=dev[0\-5]
+.br
+SwitchName=s1 Nodes=dev[6\-11]
+.br
+SwitchName=s2 Nodes=dev[12\-17]
+.br
+SwitchName=s3 Switches=s[0\-2]
+
+.SH "COPYING"
+Copyright (C) 2009 Lawrence Livermore National Security.
+Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+CODE\-OCEC\-09\-009. All rights reserved.
+.LP
+This file is part of SLURM, a resource management program.
+For details, see <https://computing.llnl.gov/linux/slurm/>.
+.LP
+SLURM is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+
+.SH "SEE ALSO"
+.LP
+\fBslurm.conf\fR(5)
diff --git a/doc/man/man5/wiki.conf.5 b/doc/man/man5/wiki.conf.5
index ddc81f090f783ee3ed42debdcea28bb0fed4e8cd..c7004b5fed3d54121959bc858fe4b8625e0895e4 100644
--- a/doc/man/man5/wiki.conf.5
+++ b/doc/man/man5/wiki.conf.5
@@ -1,4 +1,4 @@
-.TH "wiki.conf" "5" "December 2007" "wiki.conf 1.2" "Slurm configuration file"
+.TH "wiki.conf" "5" "December 2007" "wiki.conf 2.0" "Slurm configuration file"
 .SH "NAME"
 wiki.conf \- Slurm configuration file for wiki and wiki2 scheduler plugins
 .SH "DESCRIPTION"
@@ -173,7 +173,7 @@ JobAggregationTime=15
 .SH "COPYING"
 Copyright (C) 2006-2007 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man8/slurmctld.8 b/doc/man/man8/slurmctld.8
index 67f64b5b3a9d91d34727c495a83376b98d7c6757..563eee7444b81832fb65bf89a7eb4430c8eb9c9f 100644
--- a/doc/man/man8/slurmctld.8
+++ b/doc/man/man8/slurmctld.8
@@ -1,4 +1,4 @@
-.TH SLURMCTLD "8" "June 2006" "slurmctld 1.1" "Slurm components"
+.TH SLURMCTLD "8" "June 2006" "slurmctld 12.0" "Slurm components"
 .SH "NAME"
 slurmctld \- The central management daemon of Slurm.
 .SH "SYNOPSIS"
@@ -57,7 +57,7 @@ configuration file, \fBslurm.conf\fR.
 .SH "COPYING"
 Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man8/slurmd.8 b/doc/man/man8/slurmd.8
index 8cb55211162e93ebcaf41c59c273b5c03b6c68b1..e81a7a57f0cf8b244efab2243c6c15b10a7190ac 100644
--- a/doc/man/man8/slurmd.8
+++ b/doc/man/man8/slurmd.8
@@ -1,8 +1,11 @@
-.TH SLURMD "8" "June 2006" "slurmd 1.1" "Slurm components"
+.TH SLURMD "8" "March 2009" "slurmd 2.0" "Slurm components"
+
 .SH "NAME"
 slurmd \- The compute node daemon for SLURM.
+
 .SH "SYNOPSIS"
 \fBslurmd\fR [\fIOPTIONS\fR...]
+
 .SH "DESCRIPTION"
 \fBslurmd\fR is the compute node daemon of Slurm. It monitors all tasks 
 running on the compute node , accepts work (tasks), launches tasks, and kills 
@@ -14,25 +17,29 @@ OPTIONS
 Clear system locks as needed. This may be required if \fBslurmd\fR terminated 
 abnormally.
 .TP
+\fB\-d <file>\fR
+Specify the fully qualified pathname to the \fBslurmstepd\fR program to be used
+for sheperding user job steps. This can be useful for testing purposes.
+.TP
 \fB\-D\fR
 Run slurmd in the foreground. Error and debug messages will be copied to stderr.
 .TP
-\fB\-M\fR
-Lock slurmd pages into system memory using mlockall(2) to disable
-paging of the slurmd process. This may help in cases where nodes are
-marked DOWN during periods of heavy swap activity. If the mlockall(2)
-system call is not available, an error will be printed to the log
-and slurmd will continue as normal.
+\fB\-f <file>\fR
+Read configuration from the specified file. See \fBNOTES\fR below.
 .TP
 \fB\-h\fR
 Help; print a brief summary of command options.
 .TP
-\fB\-f <file>\fR
-Read configuration from the specified file. See \fBNOTES\fR below.
-.TP
 \fB\-L <file>\fR
 Write log messages to the specified file.
 .TP
+\fB\-M\fR
+Lock slurmd pages into system memory using mlockall(2) to disable
+paging of the slurmd process. This may help in cases where nodes are
+marked DOWN during periods of heavy swap activity. If the mlockall(2)
+system call is not available, an error will be printed to the log
+and slurmd will continue as normal.
+.TP
 \fB\-v\fR
 Verbose operation. Multiple \-v's increase verbosity.
 .TP
@@ -58,9 +65,10 @@ other parameters used by other Slurm components, change the common
 configuration file, \fBslurm.conf\fR.
 
 .SH "COPYING"
-Copyright (C) 2002\-2006 The Regents of the University of California.
+Copyright (C) 2002\-2007 The Regents of the University of California.
+Copyright (C) 2008\-2009 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
@@ -78,5 +86,6 @@ details.
 .SH "FILES"
 .LP
 /etc/slurm.conf
+
 .SH "SEE ALSO"
 \fBslurm.conf\fR(5), \fBslurmctld\fR(8)
diff --git a/doc/man/man8/slurmdbd.8 b/doc/man/man8/slurmdbd.8
index 0b3b656c6baf89c8e6b3fed5a05f762195a01942..0a3f377f0115c7c08b758bb6624f6df9f642da71 100644
--- a/doc/man/man8/slurmdbd.8
+++ b/doc/man/man8/slurmdbd.8
@@ -1,4 +1,4 @@
-.TH slurmdbd "8" "February 2008" "slurmdbd 1.3" "Slurm components"
+.TH slurmdbd "8" "February 2008" "slurmdbd 2.0" "Slurm components"
 .SH "NAME"
 slurmdbd \- Slurm Database Daemon.
 
@@ -36,7 +36,7 @@ configuration file, \fBslurm.conf\fR.
 .SH "COPYING"
 Copyright (C) 2008 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man8/slurmstepd.8 b/doc/man/man8/slurmstepd.8
index 24748ec2b114d443f5f7f3cb47810e8b817bb1df..3a81beca98151d01994299a3196be182e923e571 100644
--- a/doc/man/man8/slurmstepd.8
+++ b/doc/man/man8/slurmstepd.8
@@ -1,4 +1,4 @@
-.TH SLURMSTEPD "8" "September 2006" "slurmstepd 1.1" "Slurm components"
+.TH SLURMSTEPD "8" "September 2006" "slurmstepd 2.0" "Slurm components"
 .SH "NAME"
 slurmstepd \- The job step manager for SLURM.
 .SH "SYNOPSIS"
@@ -13,7 +13,7 @@ for the job step along with its accounting and signal processing.
 .SH "COPYING"
 Copyright (C) 2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/doc/man/man8/spank.8 b/doc/man/man8/spank.8
index 04756910218dd1eb1139580fe296d01e8c1fab4d..29495c78ad113f8880df3741e17accea0baac078 100644
--- a/doc/man/man8/spank.8
+++ b/doc/man/man8/spank.8
@@ -1,4 +1,5 @@
 .TH "SPANK" "8" "February 2009" "SPANK" "SLURM plug\-in architecture for Node and job (K)control"
+
 .SH "NAME"
 \fBSPANK\fR \- SLURM Plug\-in Architecture for Node and job (K)control 
 
@@ -19,16 +20,27 @@ behavior of SLURM job launch.
 .LP
 
 .SH "SPANK PLUGINS"
-\fBSPANK\fR plugins are loaded in two separate contexts during a 
-\fBSLURM\fR job.  In "local" context, the plugin is loaded by \fBsrun\fR
-(NOTE: the \fBsalloc\fR and \fBsbatch\fR commands do not support \fBSPANK\fR).
-In local context, options provided by 
-plugins are read by \fBSPANK\fR, and these options are presented to the user. 
-In "remote" context, the plugin is loaded on a compute node of the job,
-in other words, the plugin is loaded by \fBslurmd\fR. In local context, only
-the \fBinit\fR, \fBexit\fR, and \fBuser_local_init\fR functions are called.
+\fBSPANK\fR plugins are loaded in up to three separate contexts during a
+\fBSLURM\fR job. Briefly, the three contexts are:
+.TP 8
+\fBlocal\fB
+In \fBlocal\fR context, the plugin is loaded by \fBsrun\fR. (i.e. the "local"
+part of a parallel job).
+.TP
+\fBremote\fR
+In \fBremote\fR context, the plugin is loaded by \fBslurmd\fR. (i.e. the "remote"
+part of a parallel job).
+.TP
+\fBallocator\fR
+In \fBallocator\fR context, the plugin is loaded in one of the job allocation
+utilities \fBsbatch\fR or \fBsalloc\fR.
+.LP
+In local context, only the \fBinit\fR, \fBexit\fR, \fBinit_post_opt\fR, and
+\fBuser_local_init\fR functions are called. In allocator context, only the
+\fBinit\fR, \fBexit\fR, and \fBinit_post_opt\fR functions are called.
 Plugins may query the context in which they are running with the
-\fBspank_remote\fR function defined in \fB<slurm/spank.h>\fR.
+\fBspank_context\fR and \fBspank_remote\fR functions defined in
+\fB<slurm/spank.h>\fR.
 .LP
 \fBSPANK\fR plugins may be called from multiple points during the SLURM job
 launch. A plugin may define the following functions:
@@ -41,7 +53,7 @@ option processing.
 \fBslurm_spank_init_post_opt\fR
 Called at the same point as \fBslurm_spank_init\fR, but after all
 user options to the plugin have been processed. The reason that the
-\fBinit\fR and \fBinit_post_opt\fR callbacks are separated is so that
+\fBunit\fR and \fBinit_post_opt\fR callbacks are separated is so that
 plugins can process system-wide options specified in plugstack.conf in
 the \fBinit\fR callback, then process user options, and finaly take some
 action in \fBslurm_spank_init_post_opt\fR if necessary.
@@ -56,6 +68,10 @@ tasks are launched.
 \fBslurm_spank_user_init\fR 
 Called after privileges are temporarily dropped. (remote context only)
 .TP
+\fBslurm_spank_task_init_privileged\fR
+Called for each task just after fork, but before all elevated privileges
+are dropped. (remote context only)
+.TP
 \fBslurm_spank_task_init\fR 
 Called for each task just before execve(2). (remote context only)
 .TP
@@ -155,15 +171,29 @@ and \fBunsetenv\fR(3) may be used in local context.
 .LP
 See \fBspank.h\fR for more information, and \fBEXAMPLES\fR below for an example
 for \fBspank_getenv\fR usage.
+.LP
+Many of the described \fBSPANK\fR functions available to plugins return
+errors via the \fBspank_err_t\fR error type. On success, the return value
+will be set to \fBESPANK_SUCCESS\fR, while on failure, the return value
+will be set to one of many error values defined in slurm/spank.h. The
+\fBSPANK\fR interface provides a simple function
+.nf
+
+  const char * \fBspank_strerror (spank_err_t err);
+
+.fi
+which may be used to translate a \fBspank_err_t\fR value into its
+string representation.
 
 .SH "SPANK OPTIONS"
 .LP
 SPANK plugins also have an interface through which they may define
 and implement extra job options. These options are made available to
-the user through SLURM commands such as \fBsrun\fR(1), and if the
-option is specified, its value is forwarded and registered with
-the plugin on the remote side. In this way, \fBSPANK\fR plugins
-may dynamically provide new options and functionality to SLURM.
+the user through SLURM commands such as \fBsrun\fR(1), \fBsalloc\fR(1),
+and \fBsbatch\fR(1). if the option is specified by the user, its value is
+forwarded and registered with the plugin in slurmd when the job is run.
+In this way, \fBSPANK\fR plugins may dynamically provide new options and
+functionality to SLURM.
 .LP
 Each option registered by a plugin to SLURM takes the form of
 a \fBstruct spank_option\fR which is declared in \fB<slurm/spank.h>\fR as
@@ -215,14 +245,7 @@ struct, \fIoptarg\fR is the supplied argument if applicable, and \fIremote\fR
 is 0 if the function is being called from the "local" host 
 (e.g. \fBsrun\fR) or 1 from the "remote" host (\fBslurmd\fR).
 .LP
-There are two methods by which the plugin can register these options
-with SLURM. The simplest method is for the plugin to define an array
-of \fBstruct spank_option\fR with the symbol name \fBspank_options\fR. 
-This final element in the options table must be filled with zeros. A
-\fBSPANK_OPTIONS_TABLE_END\fR macro is defined in \fB<slurm/spank.h>\fR
-for this purpose.
-.LP
-Plugin options may also be dynamically registered with SLURM using 
+Plugin options may be registered with SLURM using
 the \fBspank_option_register\fR function. This function is only valid
 when called from the plugin's \fBslurm_spank_init\fR handler, and 
 registers one option at a time. The prototype is
@@ -235,7 +258,27 @@ registers one option at a time. The prototype is
 This function will return \fBESPANK_SUCCESS\fR on successful registration
 of an option, or \fBESPANK_BAD_ARG\fR for errors including invalid spank_t
 handle, or when the function is not called from the \fBslurm_spank_init\fR
-function.
+function. All options need to be registered from all contexts in which
+they will be used. For instance, if an option is only used in local (srun)
+and remote (slurmd) contexts, then \fBspank_option_register\fR
+should only be called from within those contexts. For example:
+.nf
+
+   if (spank_context() != S_CTX_ALLOCATOR)
+      spank_option_register (sp, opt);
+
+.fi
+If, however, the option is used in all contexts, the \fBspank_option_register\fR
+needs to be called everywhere.
+.LP
+In addition to \fBspank_option_register\fR, plugins may also export options
+to SLURM by defining a table of \fBstruct spank_option\fR with the
+symbol name \fBspank_options\fR. This method, however, is not supported
+for use with \fBsbatch\fR and \fBsalloc\fR (allocator context), thus
+the use of \fBspank_option_register\fR is preferred. When using the
+\fBspank_options\fR table, the final element in the array must be
+filled with zeros. A \fBSPANK_OPTIONS_TABLE_END\fR macro is provided
+in \fB<slurm/spank.h>\fR for this purpose.
 .LP
 When an option is provided by the user on the local side, \fBSLURM\fR will 
 immediately invoke the option's callback with \fIremote\fR=0. This
@@ -383,6 +426,11 @@ int slurm_spank_init (spank_t sp, int ac, char **av)
 {
     int i;
 
+	/* Don't do anything in sbatch/salloc
+	 */
+	if (spank_context () == S_CTX_ALLOCATOR)
+		return (0);
+
     for (i = 0; i < ac; i++) {
         if (strncmp ("min_prio=", av[i], 9) == 0) {
             const char *optarg = av[i] + 9;
@@ -479,7 +527,7 @@ static int _renice_opt_process (int val, const char *optarg, int remote)
 .SH "COPYING"
 Copyright (C) 2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
-LLNL\-CODE\-402394.
+CODE\-OCEC\-09\-009. All rights reserved.
 .LP
 This file is part of SLURM, a resource management program.
 For details, see <https://computing.llnl.gov/linux/slurm/>.
diff --git a/etc/bluegene.conf.example b/etc/bluegene.conf.example
index 8ce73bd51673068cfec5cd2bcc96fef74395980f..d35e068aaff7eac2ac2abd8d9619161ec65563ad 100644
--- a/etc/bluegene.conf.example
+++ b/etc/bluegene.conf.example
@@ -34,6 +34,8 @@
 #                    2: Log level 1 and basic debug messages
 #                    3: Log level 2 and more debug message
 #                    4: Log all messages
+# DenyPassthrough:   Prevents use of passthrough ports in specific
+#                    dimensions, X, Y, and/or Z, plus ALL
 # 
 # NOTE: The bgl_serial value is set at configuration time using the 
 #       "--with-bgl-serial=" option. Its default value is "BGL".
diff --git a/etc/slurm.conf.example b/etc/slurm.conf.example
index f0c5780d1934842525b338a28187d05e43d06430..425f4ff952f2e0454538fab744f2b2250e5eb9f2 100644
--- a/etc/slurm.conf.example
+++ b/etc/slurm.conf.example
@@ -8,12 +8,14 @@
 #
 # See the slurm.conf man page for more information.
 #
+ClusterName=linux
 ControlMachine=linux0
 #ControlAddr=
 #BackupController=
 #BackupAddr=
 #
 SlurmUser=slurm
+#SlurmdUser=root
 SlurmctldPort=6817
 SlurmdPort=6818
 AuthType=auth/munge
@@ -62,6 +64,14 @@ SchedulerType=sched/backfill
 #SchedulerRootFilter=
 SelectType=select/linear
 FastSchedule=1
+#PriorityType=priority/multifactor
+#PriorityDecayHalfLife=14-0
+#PriorityUsageResetPeriod=14-0
+#PriorityWeightFairshare=100000
+#PriorityWeightAge=1000
+#PriorityWeightPartition=10000
+#PriorityWeightJobSize=1000
+#PriorityMaxAge=1-0
 #
 # LOGGING
 SlurmctldDebug=3
@@ -70,14 +80,16 @@ SlurmdDebug=3
 #SlurmdLogFile=
 JobCompType=jobcomp/none
 #JobCompLoc=
-JobAcctType=jobacct/none
-#JobAcctLoc=
-#JobAcctFrequency=
-DatabaseType=database/flatfile
-#DatabaseHost=localhost
-#DatabasePort=1234
-#DatabaseUser=mysql
-#DatabasePass=mysql
+#
+# ACCOUNTING
+#JobAcctGatherType=jobacct_gather/linux
+#JobAcctGatherFrequency=30
+#
+#AccountingStorageType=accounting_storage/slurmdbd
+#AccountingStorageHost=
+#AccountingStorageLoc=
+#AccountingStoragePass=
+#AccountingStorageUser=
 #
 # COMPUTE NODES
 NodeName=linux[1-32] Procs=1 State=UNKNOWN
diff --git a/etc/slurm.epilog.clean b/etc/slurm.epilog.clean
index 4ae37ae465198be462561aec8bce51ecd5599914..60df570c6f75bffcbbbd3e9368f1212464db5d61 100644
--- a/etc/slurm.epilog.clean
+++ b/etc/slurm.epilog.clean
@@ -11,7 +11,7 @@
 if [ x$SLURM_UID == "x" ] ; then 
 	exit 0
 fi
-if [ x$SLURM_JOBID == "x" ] ; then 
+if [ x$SLURM_JOB_ID == "x" ] ; then 
         exit 0
 fi
 
@@ -25,7 +25,7 @@ fi
 job_list=`${SLURM_BIN}squeue --noheader --format=%i --user=$SLURM_UID --node=localhost`
 for job_id in $job_list
 do
-	if [ $job_id -ne $SLURM_JOBID ] ; then
+	if [ $job_id -ne $SLURM_JOB_ID ] ; then
 		exit 0
 	fi
 done
diff --git a/slurm.spec b/slurm.spec
index 7a4d191f77cc8fcb5a9fb4f084868a76f2197253..c0ba49b7b15b921d1bf66c0a5eba196957cdd751 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -1,4 +1,4 @@
-# $Id: slurm.spec 16983 2009-03-24 16:33:55Z da $
+# $Id: slurm.spec 17631 2009-05-28 21:18:15Z jette $
 #
 # Note that this package is not relocatable
 
@@ -9,6 +9,7 @@
 # --with authd       %_with_authd       1    build auth-authd RPM
 # --with auth_none   %_with_auth_none   1    build auth-none RPM
 # --with bluegene    %_with_bluegene    1    build bluegene RPM
+# --with cray_xt     %_with_cray_xt     1    build for Cray XT system
 # --with debug       %_with_debug       1    enable extra debugging within SLURM
 # --with elan        %_with_elan        1    build switch-elan RPM
 # --without munge    %_without_munge    1    don't build auth-munge RPM
@@ -33,11 +34,12 @@
 %define slurm_with() %{expand:%%{?slurm_with_%{1}:1}%%{!?slurm_with_%{1}:0}}
 
 #  Options that are off by default (enable with --with <opt>)
-%slurm_without_opt elan
+%slurm_without_opt auth_none
 %slurm_without_opt authd
 %slurm_without_opt bluegene
-%slurm_without_opt auth_none
+%slurm_without_opt cray
 %slurm_without_opt debug
+%slurm_without_opt elan
 %slurm_without_opt sun_const
 
 # These options are only here to force there to be these on the build.  
@@ -73,14 +75,14 @@
 %endif
 
 Name:    slurm
-Version: 1.3.15
+Version: 2.0.2
 Release: 1%{?dist}
 
 Summary: Simple Linux Utility for Resource Management
 
 License: GPL 
 Group: System Environment/Base
-Source: slurm-1.3.15.tar.bz2
+Source: slurm-2.0.2.tar.bz2
 BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
 URL: https://computing.llnl.gov/linux/slurm/
 
@@ -91,7 +93,9 @@ BuildRequires: python
 %endif
 
 %if %{?chaos}0
+BuildRequires: gtk2-devel >= 2.7.1
 BuildRequires: ncurses-devel
+BuildRequires: pkgconfig
 %endif
 
 %if %{slurm_with pam}
@@ -265,10 +269,11 @@ SLURM process tracking plugin for SGI job containers.
 #############################################################################
 
 %prep
-%setup -n slurm-1.3.15
+%setup -n slurm-2.0.2
 
 %build
 %configure --program-prefix=%{?_program_prefix:%{_program_prefix}} \
+	%{?slurm_with_cray_xt:--enable-cray-xt} \
 	%{?slurm_with_debug:--enable-debug} \
 	%{?slurm_with_sun_const:--enable-sun-const} \
 	%{?with_proctrack}	\
@@ -312,6 +317,7 @@ install -D -m644 etc/federation.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/fed
 %endif
 
 %if %{slurm_with bluegene}
+rm ${RPM_BUILD_ROOT}%{_bindir}/srun
 install -D -m644 etc/bluegene.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/bluegene.conf.example
 %endif
 
@@ -322,11 +328,20 @@ if [ -d /etc/init.d ]; then
 fi
 
 LIST=./plugins.files
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_affinity.so &&
-   echo %{_libdir}/slurm/task_affinity.so >> $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_openssl.so &&
-   echo %{_libdir}/slurm/crypto_openssl.so >> $LIST
-
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/accounting_storage_mysql.so &&
+   echo %{_libdir}/slurm/accounting_storage_mysql.so >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/accounting_storage_pgsql.so &&
+   echo %{_libdir}/slurm/accounting_storage_pgsql.so >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/checkpoint_blcr.so          &&
+   echo %{_libdir}/slurm/checkpoint_blcr.so          >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_openssl.so           &&
+   echo %{_libdir}/slurm/crypto_openssl.so           >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/jobcomp_mysql.so            &&
+   echo %{_libdir}/slurm/jobcomp_mysql.so            >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/jobcomp_pgsql.so            &&
+   echo %{_libdir}/slurm/jobcomp_pgsql.so            >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_affinity.so            &&
+   echo %{_libdir}/slurm/task_affinity.so             >> $LIST
 
 #############################################################################
 
@@ -441,9 +456,7 @@ rm -rf $RPM_BUILD_ROOT
 %defattr(-,root,root)
 %dir %{_libdir}/slurm
 %{_libdir}/slurm/accounting_storage_filetxt.so
-%{_libdir}/slurm/accounting_storage_mysql.so
 %{_libdir}/slurm/accounting_storage_none.so
-%{_libdir}/slurm/accounting_storage_pgsql.so
 %{_libdir}/slurm/accounting_storage_slurmdbd.so
 %{_libdir}/slurm/checkpoint_none.so
 %{_libdir}/slurm/checkpoint_ompi.so
@@ -453,9 +466,9 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/jobacct_gather_none.so
 %{_libdir}/slurm/jobcomp_none.so
 %{_libdir}/slurm/jobcomp_filetxt.so
-%{_libdir}/slurm/jobcomp_mysql.so
-%{_libdir}/slurm/jobcomp_pgsql.so
 %{_libdir}/slurm/jobcomp_script.so
+%{_libdir}/slurm/priority_basic.so
+%{_libdir}/slurm/priority_multifactor.so
 %{_libdir}/slurm/proctrack_pgid.so
 %{_libdir}/slurm/proctrack_linuxproc.so
 %{_libdir}/slurm/sched_backfill.so
@@ -467,6 +480,9 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/select_cons_res.so
 %{_libdir}/slurm/select_linear.so
 %{_libdir}/slurm/switch_none.so
+%{_libdir}/slurm/topology_3d_torus.so
+%{_libdir}/slurm/topology_none.so
+%{_libdir}/slurm/topology_tree.so
 %{_libdir}/slurm/mpi_lam.so
 %{_libdir}/slurm/mpi_mpich1_p4.so
 %{_libdir}/slurm/mpi_mpich1_shmem.so
diff --git a/slurm/pmi.h b/slurm/pmi.h
index 9621077da9a6f1f38228c0a323b7be26ae7c3231..6740a8b68592e3ee3a186e6359d6ef637680f610 100644
--- a/slurm/pmi.h
+++ b/slurm/pmi.h
@@ -389,7 +389,7 @@ communicate through IPC mechanisms (e.g., shared memory) and other network
 mechanisms.
 
 @*/
-int PMI_Get_clique_ranks( char ranks[], int length);
+int PMI_Get_clique_ranks( int ranks[], int length);
 
 /*@
 PMI_Abort - abort the process group associated with this process
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index b63eb1e2332d2d32f2dfffd59e080298b0478fc9..95511f00f13891d7ba9f209dbbef00ebf6f07d39 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -2,14 +2,15 @@
  *  slurm.h - Definitions for all of the SLURM RPCs
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, 
  *	Joey Ekstrom <ekstrom1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -133,10 +134,12 @@ BEGIN_C_DECLS
    typedef struct switch_jobinfo *switch_jobinfo_t;	/* opaque data type */
 #endif
 
-/* Define select_jobinfo_t below to avoid including extraneous slurm headers */
+/* Define select_jobinfo_t and select_job_res_t below 
+ * to avoid including extraneous slurm headers */
 #ifndef __select_jobinfo_t_defined
-#  define  __select_jobinfo_t_defined
-   typedef struct select_jobinfo *select_jobinfo_t;     /* opaque data type */
+#  define  __select_jobinfo_t_defined	/* Opaque data for select plugins */
+   typedef struct select_jobinfo *select_jobinfo_t;  /* for BlueGene */
+   typedef struct select_job_res *select_job_res_t;  /* for non-BlueGene */
 #endif
 
 /* Define jobacctinfo_t below to avoid including extraneous slurm headers */
@@ -221,7 +224,10 @@ enum job_state_reason {
 	WAIT_HELD,		/* job is held, priority==0 */
 	WAIT_TIME,		/* job waiting for specific begin time */
 	WAIT_LICENSES,		/* job is waiting for licenses */
-	WAIT_ASSOC_LIMIT,	/* user/bank job limit reached */
+	WAIT_ASSOC_JOB_LIMIT,	/* user/bank job limit reached */
+	WAIT_ASSOC_RESOURCE_LIMIT,/* user/bank resource limit reached */
+	WAIT_ASSOC_TIME_LIMIT,  /* user/bank time limit reached */
+	WAIT_RESERVATION,	/* reservation not available */
 	WAIT_TBD1,
 	WAIT_TBD2,
 	FAIL_DOWN_PARTITION,	/* partition for job is DOWN */
@@ -281,6 +287,7 @@ enum select_data_type {
 	SELECT_DATA_MLOADER_IMAGE,/* data-> char *mloaderimage */
 	SELECT_DATA_RAMDISK_IMAGE,/* data-> char *ramdiskimage */
 	SELECT_DATA_REBOOT,	/* data-> uint16_t reboot */
+	SELECT_DATA_RESV_ID,	/* data-> char *reservation_id */
 };
 
 enum select_print_mode {
@@ -299,6 +306,7 @@ enum select_print_mode {
 	SELECT_PRINT_MLOADER_IMAGE,/* Print just the MLOADER IMAGE */
 	SELECT_PRINT_RAMDISK_IMAGE,/* Print just the RAMDISK IMAGE */
 	SELECT_PRINT_REBOOT,	/* Print just the REBOOT */
+	SELECT_PRINT_RESV_ID,	/* Print just Cray/BASIL reservation ID */
 };
 
 enum select_node_cnt {
@@ -356,18 +364,26 @@ typedef enum task_dist_states {
 #define OPEN_MODE_TRUNCATE	2
 
 typedef enum cpu_bind_type {	/* cpu binding type from --cpu_bind=... */
-	/* the following auto-binding flags are mutually exclusive */
-	CPU_BIND_TO_THREADS= 0x01, /* =threads */ 
-	CPU_BIND_TO_CORES  = 0x02, /* =cores */
-	CPU_BIND_TO_SOCKETS= 0x04, /* =sockets */
 	/* verbose can be set with any other flag */
-	CPU_BIND_VERBOSE   = 0x08, /* =v, */
+	CPU_BIND_VERBOSE   = 0x01, /* =v, */
+	/* the following auto-binding flags are mutually exclusive */
+	CPU_BIND_TO_THREADS= 0x02, /* =threads */ 
+	CPU_BIND_TO_CORES  = 0x04, /* =cores */
+	CPU_BIND_TO_SOCKETS= 0x08, /* =sockets */
+	CPU_BIND_TO_LDOMS  = 0x10, /* locality domains */
 	/* the following manual binding flags are mutually exclusive */
-	/* CPU_BIND_NONE needs to be the first in this sub-list */
-	CPU_BIND_NONE	   = 0x10, /* =no */
-	CPU_BIND_RANK  	   = 0x20, /* =rank */
-	CPU_BIND_MAP	   = 0x40, /* =map_cpu:<list of CPU IDs> */
-	CPU_BIND_MASK	   = 0x80  /* =mask_cpu:<list of CPU masks> */
+	/* CPU_BIND_NONE needs to be the lowest value among manual bindings */
+	CPU_BIND_NONE	   = 0x20, /* =no */
+	CPU_BIND_RANK  	   = 0x40, /* =rank */
+	CPU_BIND_MAP	   = 0x80, /* =map_cpu:<list of CPU IDs> */
+	CPU_BIND_MASK	   = 0x100,/* =mask_cpu:<list of CPU masks> */
+	CPU_BIND_LDRANK    = 0x200,/* =locality domain rank */
+	CPU_BIND_LDMAP	   = 0x400,/* =map_ldom:<list of locality domains> */
+	CPU_BIND_LDMASK	   = 0x800,/* =mask_ldom:<list of ldom masks> */
+	
+	/* the following is used only as a flag for expressing 
+	 * the contents of TaskPluginParams */
+	CPU_BIND_CPUSETS   = 0x8000
 } cpu_bind_type_t;
 
 typedef enum mem_bind_type {    /* memory binding type from --mem_bind=... */
@@ -392,6 +408,7 @@ enum node_states {
 	NODE_STATE_DOWN,	/* node in non-usable state */
 	NODE_STATE_IDLE,	/* node idle and available for use */
 	NODE_STATE_ALLOCATED,	/* node has been allocated to a job */
+	NODE_STATE_FUTURE,	/* node slot reserved for future use */
 	NODE_STATE_END		/* last entry in table */
 };
 #define NODE_STATE_BASE       0x00ff
@@ -406,6 +423,8 @@ enum node_states {
 #define NODE_STATE_POWER_SAVE 0x1000	/* node in power save mode */
 #define NODE_STATE_FAIL       0x2000	/* node is failing, do not allocate
 					 * new work */
+#define NODE_STATE_POWER_UP   0x4000	/* restore power to a node */
+#define NODE_STATE_MAINT      0x8000	/* node in maintenance reservation */
 
 /* used to define the size of the credential.signature size
  * used to define the key size of the io_stream_header_t
@@ -447,20 +466,24 @@ typedef enum select_type_plugin_info {
 	CR_CPU_MEMORY     /* CPU and Memory are CRs */
 } select_type_plugin_info_t ;
 
-/* Values for slurm_ctl_conf.task_plugin_param */
-#define TASK_PARAM_NONE    0x0000
-#define TASK_PARAM_CPUSETS 0x0001
-#define TASK_PARAM_SCHED   0x0002
-
 #define MEM_PER_CPU  0x80000000
 #define SHARED_FORCE 0x8000
 
-#define PRIVATE_DATA_JOBS	0x0001	/* job/step data is private */
-#define PRIVATE_DATA_NODES	0x0002	/* node data is private */
-#define PRIVATE_DATA_PARTITIONS	0x0004	/* partition data is private */
-#define PRIVATE_DATA_USAGE	0x0008	/* accounting usage data is private */
-#define PRIVATE_DATA_USERS	0x0010	/* accounting user data is private */
-#define PRIVATE_DATA_ACCOUNTS	0x0020	/* accounting account data is private */
+#define PRIVATE_DATA_JOBS	  0x0001 /* job/step data is private */
+#define PRIVATE_DATA_NODES	  0x0002 /* node data is private */
+#define PRIVATE_DATA_PARTITIONS	  0x0004 /* partition data is private */
+#define PRIVATE_DATA_USAGE	  0x0008 /* accounting usage data is private */
+#define PRIVATE_DATA_USERS	  0x0010 /* accounting user data is private */
+#define PRIVATE_DATA_ACCOUNTS	  0x0020 /* accounting account data is private*/
+#define PRIVATE_DATA_RESERVATIONS 0x0040 /* reservation data is private */
+
+#define PRIORITY_RESET_NONE	0x0000	/* never clear */
+#define PRIORITY_RESET_NOW	0x0001	/* clear now (when slurmctld restarts) */
+#define PRIORITY_RESET_DAILY	0x0002	/* clear daily at midnight */
+#define PRIORITY_RESET_WEEKLY	0x0003	/* clear weekly at Sunday 00:00 */
+#define PRIORITY_RESET_MONTHLY	0x0004	/* clear monthly on first at 00:00 */
+#define PRIORITY_RESET_QUARTERLY 0x0005	/* clear quarterly on first at 00:00 */
+#define PRIORITY_RESET_YEARLY	0x0006	/* clear yearly on first at 00:00 */
 
 /*****************************************************************************\
  *	PROTOCOL DATA STRUCTURE DEFINITIONS
@@ -479,9 +502,13 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	uint32_t argc;		/* number of arguments to the script */
 	char **argv;		/* arguments to the script */
 	time_t begin_time;	/* delay initiation until this time */
+	uint16_t ckpt_interval;	/* periodically checkpoint this job */
+	char *ckpt_dir;  	/* directory to store checkpoint images */
 	char *comment;		/* arbitrary comment (used by Moab scheduler) */
 	uint16_t contiguous;	/* 1 if job requires contiguous nodes,
 				 * 0 otherwise,default=0 */
+	char *cpu_bind;		/* binding map for map/mask_cpu */
+	uint16_t cpu_bind_type;	/* see cpu_bind_type_t */
 	char *dependency;	/* syncrhonize job execution with other jobs */
 	char **environment;	/* environment variables to set for job, 
 				 *  name=value pairs, one per line */
@@ -501,11 +528,14 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	char *licenses;		/* licenses required by the job */
 	uint16_t mail_type;	/* see MAIL_JOB_ definitions above */
 	char *mail_user;	/* user to receive notification */
+	char *mem_bind;		/* binding map for map/mask_cpu */
+	uint16_t mem_bind_type;	/* see mem_bind_type_t */
 	char *name;		/* name of the job, default "" */
 	char *network;		/* network use spec */
 	uint16_t nice;		/* requested priority change, 
 				 * NICE_OFFSET == no change */
-	uint32_t num_tasks;	/* number of tasks to be started, for batch only */
+	uint32_t num_tasks;	/* number of tasks to be started, 
+				 * for batch only */
 	uint8_t open_mode;	/* out/err open mode truncate or append,
 				 * see OPEN_MODE_* */
 	uint16_t other_port;	/* port to send various notification msg to */
@@ -522,7 +552,7 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	char *req_nodes;	/* comma separated list of required nodes
 				 * default NONE */
 	uint16_t requeue;	/* enable or disable job requeue option */
-
+	char *reservation;	/* name of reservation to use */
 	char *script;		/* the actual job script, default NONE */
 	uint16_t shared;	/* 1 if job can share nodes with other jobs,
 				 * 0 if job needs exclusive access to the node,
@@ -588,17 +618,21 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 /* End of Blue Gene specific values */
 
 	select_jobinfo_t select_jobinfo; /* opaque data type,
-			* SLURM internal use only */
+					  * SLURM internal use only */
+	char *wckey;            /* wckey for job */
 } job_desc_msg_t;
 
 typedef struct job_info {
 	char *account;		/* charge to specified account */
 	char    *alloc_node;	/* local node making resource alloc */
 	uint32_t alloc_sid;	/* local sid making resource alloc */
+	uint32_t assoc_id;	/* association id for job */
 	uint16_t batch_flag;	/* 1 if batch: queued job with script */
 	char *command;		/* command to be executed */
 	char *comment;		/* arbitrary comment (used by Moab scheduler) */
 	uint16_t contiguous;	/* 1 if job requires contiguous nodes */
+	uint32_t *cpu_count_reps;/* how many nodes have same cpu count */
+	uint16_t *cpus_per_node;/* cpus per node */
 	uint16_t cpus_per_task;	/* number of processors required for each task */
 	char *dependency;	/* syncrhonize job execution with other jobs */
 	time_t end_time;	/* time of termination, actual or expected */
@@ -618,6 +652,13 @@ typedef struct job_info {
 	uint32_t job_min_tmp_disk; /* minimum tmp disk per node, default=0 */
 	uint16_t job_state;	/* state of the job, see enum job_states */
 	char *licenses;		/* licenses required by the job */
+	uint16_t max_cores;	/* maximum number of cores per cpu */
+	uint32_t max_nodes;	/* maximum number of nodes usable by job */
+	uint16_t max_sockets;	/* maximum number of sockets per node */
+	uint16_t max_threads;	/* maximum number of threads per core */
+	uint16_t min_cores;	/* minimum number of cores per cpu */
+	uint16_t min_sockets;	/* minimum number of sockets per node */
+	uint16_t min_threads;	/* minimum number of threads per core */
 	char *name;		/* name of the job */
 	char *network;		/* network specification */
 	char *nodes;		/* list of nodes allocated to job */
@@ -627,6 +668,10 @@ typedef struct job_info {
 	uint16_t ntasks_per_core;/* number of tasks to invoke on each core */
 	uint16_t ntasks_per_node;/* number of tasks to invoke on each node */
 	uint16_t ntasks_per_socket;/* number of tasks to invoke on each socket */
+	uint32_t num_cpu_groups;/* elements in cpu arrays below */
+
+	uint32_t num_nodes;	/* minimum number of nodes required by job */
+	uint32_t num_procs;	/* number of processors required by job */
 	char *partition;	/* name of assigned partition */
 	time_t pre_sus_time;	/* time job ran prior to last suspend */
 	uint32_t priority;	/* relative priority of the job, 
@@ -636,30 +681,21 @@ typedef struct job_info {
 				 * start_range_1, end_range_1, 
 				 * start_range_2, .., -1  */
         uint16_t requeue;       /* enable or disable job requeue option */
+	uint16_t restart_cnt;	/* count of job restarts */
+	char *resv_name;	/* reservation name */
 	select_jobinfo_t select_jobinfo; /* opaque data type,
 				 * process using select_g_get_jobinfo() */
 	uint16_t shared;	/* 1 if job can share nodes with other jobs */
 	time_t start_time;	/* time execution begins, actual or expected */
+	char *state_desc;	/* optional details for state_reason */
 	uint16_t state_reason;	/* reason job still pending or failed, see
 				 * slurm.h:enum job_state_reason */
 	time_t submit_time;	/* time of job submission */
 	time_t suspend_time;	/* time job last suspended or resumed */
 	uint32_t time_limit;	/* maximum run time in minutes or INFINITE */
 	uint32_t user_id;	/* user the job runs as */
+	char *wckey;            /* wckey for job */
 	char *work_dir;		/* pathname of working directory */
-
-	uint16_t num_cpu_groups;/* elements in below cpu arrays */
-	uint32_t *cpus_per_node;/* cpus per node */
-	uint32_t *cpu_count_reps;/* how many nodes have same cpu count */
-	uint32_t num_procs;	/* number of processors required by job */
-	uint32_t num_nodes;	/* minimum number of nodes required by job */
-	uint32_t max_nodes;	/* maximum number of nodes usable by job */
-	uint16_t min_sockets;	/* minimum number of sockets per node */
-	uint16_t max_sockets;	/* maximum number of sockets per node */
-	uint16_t min_cores;	/* minimum number of cores per cpu */
-	uint16_t max_cores;	/* maximum number of cores per cpu */
-	uint16_t min_threads;	/* minimum number of threads per core */
-	uint16_t max_threads;	/* maximum number of threads per core */
 } job_info_t;
 
 typedef struct job_info_msg {
@@ -670,21 +706,18 @@ typedef struct job_info_msg {
 
 typedef struct slurm_step_layout {
 	uint32_t node_cnt;	/* node count */
-	uint32_t task_cnt;	/* total number of tasks in the step */
-
 	char *node_list;        /* list of nodes in step */
+	uint16_t plane_size;	/* plane size when task_dist =
+				 * SLURM_DIST_PLANE */
 	/* Array of length "node_cnt". Each element of the array
 	 * is the number of tasks assigned to the corresponding node */
 	uint16_t *tasks;
-
+	uint32_t task_cnt;	/* total number of tasks in the step */
+	uint16_t task_dist;	/* see enum task_dist_state */
 	/* Array (of length "node_cnt") of task ID arrays.  The length
 	 * of each subarray is designated by the corresponding value in
          * the tasks array. */
 	uint32_t **tids;	/* host id => task id mapping */
-
-	uint16_t task_dist;	/* see enum task_dist_state */
-	uint16_t plane_size;	/* plane size when task_dist =
-				 * SLURM_DIST_PLANE */
 } slurm_step_layout_t;
 
 typedef struct slurm_step_io_fds {
@@ -739,38 +772,45 @@ typedef struct srun_user_msg {
 
 typedef struct srun_node_fail_msg {
 	uint32_t job_id;	/* slurm job_id */
-	uint32_t step_id;	/* step_id or NO_VAL */
 	char *nodelist;		/* name of failed node(s) */
+	uint32_t step_id;	/* step_id or NO_VAL */
 } srun_node_fail_msg_t;
 
+typedef struct srun_step_missing_msg {
+	uint32_t job_id;	/* slurm job_id */
+	char *nodelist;		/* name of node(s) lacking this step */
+	uint32_t step_id;	/* step_id or NO_VAL */
+} srun_step_missing_msg_t;
+
 typedef struct {
+	uint16_t ckpt_interval;	/* checkpoint interval in minutes */
+	uint32_t cpu_count;	/* number of required processors */
+	uint16_t exclusive;	/* 1 if CPUs not shared with other steps */
+	uint16_t immediate;	/* 1 if allocate to run or fail immediately, 
+				 * 0 if to be queued awaiting resources */
 	uint32_t job_id;	/* job ID */
-	uid_t uid;
+	uint32_t mem_per_task;	/* memory required per task (MB), 
+				 * use job limit if 0 */
+	char *ckpt_dir;		/* directory to store checkpoint image files */
 	char *name;		/* name of the job step */
+	char *network;		/* network use spec */
+	uint8_t no_kill;	/* 1 if no kill on node failure */
 	uint32_t node_count;	/* number of required nodes */
-	uint32_t cpu_count;	/* number of required processors */
-	uint32_t task_count;	/* number of tasks required */
-	uint16_t relative;	/* first node to use of job's allocation */
-	uint16_t task_dist;	/* see enum task_dist_state, default
-				   is SLURM_DIST_CYCLIC */
-	uint16_t plane_size;	/* plane size when task_dist =
-				   SLURM_DIST_PLANE */
 	char *node_list;	/* list of required nodes */
-	char *network;		/* network use spec */
-	uint16_t immediate;	/* 1 if allocate to run or fail immediately, 
-				 * 0 if to be queued awaiting resources */
-	uint16_t exclusive;	/* 1 if CPUs not shared with other steps */
 	bool overcommit;	/* "true" to allow the allocation of more tasks
-				   to a node than available processors,
-				   "false" to accept at most one task per
-				   processor. "false" by default. */
-	bool no_kill;		/* true of no kill on node failure */
-	uint16_t ckpt_interval;	/* checkpoint interval in minutes */
-	char *ckpt_path;	/* path to store checkpoint image files */
+				 * to a node than available processors,
+				 * "false" to accept at most one task per
+				 * processor. "false" by default. */
+	uint16_t plane_size;	/* plane size when task_dist =
+				 * SLURM_DIST_PLANE */
+	uint16_t relative;	/* first node to use of job's allocation */
+	uint16_t resv_port_cnt;	/* reserve ports if set */
+	uint32_t task_count;	/* number of tasks required */
+	uint16_t task_dist;	/* see enum task_dist_state, default
+				 * is SLURM_DIST_CYCLIC */
+	uid_t uid;		/* user ID */
 	uint16_t verbose_level; /* for extra logging decisions in step
-				   launch api */
-
-	uint16_t mem_per_task;	/* memory required per task (MB), 0=no limit */
+				 * launch api */
 } slurm_step_ctx_params_t;
 
 typedef struct {
@@ -806,17 +846,15 @@ typedef struct {
 	uint16_t max_cores;
 	uint16_t max_threads;
 	uint16_t cpus_per_task;
-	uint16_t ntasks_per_node;
-	uint16_t ntasks_per_socket;
-	uint16_t ntasks_per_core;
 	uint16_t task_dist;
-	uint16_t plane_size;
+	bool preserve_env;
 
 	char *mpi_plugin_name;
 	uint8_t open_mode;
 	uint16_t acctg_freq;
 	bool pty;
-	char *ckpt_path;
+	char *ckpt_dir;
+	char *restart_dir;
 } slurm_step_launch_params_t;
 
 typedef struct {
@@ -833,21 +871,22 @@ typedef struct {
 } slurm_allocation_callbacks_t;
 
 typedef struct {
+	char *ckpt_dir;	        /* path to store checkpoint image files */
+	uint16_t ckpt_interval;	/* checkpoint interval in minutes */
 	uint32_t job_id;	/* job ID */
-	uint16_t step_id;	/* step ID */
-	uint32_t user_id;	/* user the job runs as */
-	uint32_t num_tasks;	/* number of tasks */
-	time_t start_time;	/* step start time */
-	time_t run_time;	/* net run time (factor out time suspended) */
-	char *partition;	/* name of assigned partition */
-	char *nodes;		/* list of nodes allocated to job_step */
 	char *name;		/* name of job step */
 	char *network;		/* network specs for job step */
+	char *nodes;		/* list of nodes allocated to job_step */
 	int *node_inx;		/* list index pairs into node_table for *nodes:
 				 * start_range_1, end_range_1,
 				 * start_range_2, .., -1  */
-	uint16_t ckpt_interval;	/* checkpoint interval in minutes */
-	char *ckpt_path;	/* path to store checkpoint image files */
+	uint32_t num_tasks;	/* number of tasks */
+	char *partition;	/* name of assigned partition */
+	char *resv_ports;	/* ports allocated for MPI */
+	time_t run_time;	/* net run time (factor out time suspended) */
+	time_t start_time;	/* step start time */
+	uint32_t step_id;	/* step ID */
+	uint32_t user_id;	/* user the job runs as */
 } job_step_info_t;
 
 typedef struct job_step_info_response_msg {
@@ -857,21 +896,21 @@ typedef struct job_step_info_response_msg {
 } job_step_info_response_msg_t;
 
 typedef struct node_info {
-	char *name;		/* node name */
-	uint16_t node_state;	/* see enum node_states */
+	char *arch;		/* computer architecture */
+	uint16_t cores;         /* number of cores per CPU */
 	uint16_t cpus;		/* configured count of cpus running on 
 				 * the node */
-	uint16_t used_cpus;	/* count of used cpus on this node */
+	char *features;		/* arbitrary list of features for node */
+	char *name;		/* node name */
+	uint16_t node_state;	/* see enum node_states */
+	char *os;		/* operating system currently running */
+	uint32_t real_memory;	/* configured MB of real memory on the node */
+	char *reason;   	/* reason for node being DOWN or DRAINING */
 	uint16_t sockets;       /* number of sockets per node */
-	uint16_t cores;         /* number of cores per CPU */
 	uint16_t threads;       /* number of threads per core */
-	uint32_t real_memory;	/* configured MB of real memory on the node */
 	uint32_t tmp_disk;	/* configured MB of total disk in TMP_FS */
+	uint16_t used_cpus;	/* count of used cpus on this node */
 	uint32_t weight;	/* arbitrary priority of node for scheduling */
-	char *arch;		/* computer architecture */
-	char *features;		/* arbitrary list of features for node */
-	char *os;		/* operating system currently running */
-	char *reason;   	/* reason for node being DOWN or DRAINING */
 } node_info_t;
 
 typedef struct node_info_msg {
@@ -880,14 +919,29 @@ typedef struct node_info_msg {
 	node_info_t *node_array;	/* the node records */
 } node_info_msg_t;
 
+typedef struct topo_info {
+	uint16_t level;			/* level in hierarchy, leaf=0 */
+	uint32_t link_speed;		/* link speed, arbitrary units */
+	char *name;			/* switch name */
+	char *nodes;			/* name if direct descendent nodes */
+	char *switches;			/* name if direct descendent switches */
+} topo_info_t;
+
+typedef struct topo_info_response_msg {
+	uint32_t record_count;		/* number of records */
+	topo_info_t *topo_array;	/* the switch topology records */
+} topo_info_response_msg_t;
+
 typedef struct job_alloc_info_msg {
 	uint32_t job_id;	/* job ID */
 } job_alloc_info_msg_t;
 
 typedef struct partition_info {
+	char *allow_alloc_nodes;/* list names of allowed allocating nodes */
 	char *allow_groups;	/* comma delimited list of groups, 
 				 * null indicates all */
 	uint16_t default_part;	/* 1 if this is default partition */
+	uint32_t default_time;	/* minutes, NO_VAL or INFINITE */
 	uint16_t disable_root_jobs; /* 1 if user root jobs disabled */
 	uint16_t hidden;	/* 1 if partition is hidden by default */
 	uint32_t max_nodes;	/* per job or INFINITE */
@@ -914,8 +968,8 @@ typedef struct delete_partition_msg {
 typedef struct resource_allocation_response_msg {
 	uint32_t job_id;	/* assigned job id */
 	char *node_list;	/* assigned list of nodes */
-	uint16_t num_cpu_groups;/* elements in below cpu arrays */
-	uint32_t *cpus_per_node;/* cpus per node */
+	uint32_t num_cpu_groups;/* elements in below cpu arrays */
+	uint16_t *cpus_per_node;/* cpus per node */
 	uint32_t *cpu_count_reps;/* how many nodes have same cpu count */
 	uint32_t node_cnt;	/* count of nodes */
 	uint32_t error_code;	/* error code for warning message */
@@ -926,8 +980,8 @@ typedef struct resource_allocation_response_msg {
 typedef struct job_alloc_info_response_msg {
 	uint32_t job_id;	/* assigned job id */
 	char *node_list;	/* assigned list of nodes */
-	uint16_t num_cpu_groups;/* elements in below cpu arrays */
-	uint32_t *cpus_per_node;/* cpus per node */
+	uint32_t num_cpu_groups;/* elements in below cpu arrays */
+	uint16_t *cpus_per_node;/* cpus per node */
 	uint32_t *cpu_count_reps;/* how many nodes have same cpu count */
 	uint32_t node_cnt;	/* count of nodes */
 	slurm_addr *node_addr;	/* network addresses */
@@ -942,15 +996,81 @@ typedef struct partition_info_msg {
 	partition_info_t *partition_array; /* the partition records */
 } partition_info_msg_t;
 
+/*
+ * Resource reservation data structures.
+ * Create, show, modify and delete functions are required
+ */
+#define RESERVE_FLAG_MAINT	0x0001		/* Set MAINT flag */
+#define RESERVE_FLAG_NO_MAINT	0x0002		/* Clear MAINT flag */
+#define RESERVE_FLAG_DAILY	0x0004		/* Set DAILY flag */
+#define RESERVE_FLAG_NO_DAILY	0x0008		/* Clear DAILY flag */
+#define RESERVE_FLAG_WEEKLY	0x0010		/* Set WEEKLY flag */
+#define RESERVE_FLAG_NO_WEEKLY	0x0020		/* Clear WEEKLY flag */
+#define RESERVE_FLAG_SPEC_NODES 0x8000		/* Contains specific nodes */
+
+typedef struct reserve_info {
+	char *accounts;		/* names of accounts permitted to use */
+	time_t end_time;	/* end time of reservation */
+	char *features;		/* required node features */
+	uint16_t flags;		/* see RESERVE_FLAG_* above */
+	char *name;		/* name of reservation */
+	uint32_t node_cnt;	/* count of nodes required */
+	int *node_inx;		/* list index pairs into node_table for *nodes:
+				 * start_range_1, end_range_1,
+				 * start_range_2, .., -1  */
+	char *node_list;	/* list of reserved nodes or ALL */
+	char *partition;	/* name of partition to be used */
+	time_t start_time;	/* start time of reservation */
+	char *users;		/* names of users permitted to use */
+} reserve_info_t;
+
+typedef struct reserve_info_msg {
+	time_t last_update;	/* time of latest info */
+	uint32_t record_count;	/* number of records */
+	reserve_info_t *reservation_array; /* the reservation records */
+} reserve_info_msg_t;
+
+typedef struct resv_desc_msg {
+	char *accounts;		/* names of accounts permitted to use */
+	uint32_t duration;	/* duration of reservation in seconds */
+	time_t end_time;	/* end time of reservation */
+	char *features;		/* required node features */
+	uint16_t flags;		/* see RESERVE_FLAG_* above */
+	char *name;		/* name of reservation (optional on create) */
+	uint32_t node_cnt;	/* count of nodes required */
+	char *node_list;	/* list of reserved nodes or ALL */
+	char *partition;	/* name of partition to be used */
+	time_t start_time;	/* start time of reservation */
+	char *users;		/* names of users permitted to use */
+} resv_desc_msg_t;
+
+typedef struct reserve_response_msg {
+	char *name;		/* name of reservation */
+} reserve_response_msg_t;
+
+typedef struct reservation_name_msg {
+	char *name;		/* name of reservation just created or
+				 * to be delete */
+} reservation_name_msg_t;
+
+
+#define DEBUG_FLAG_SELECT_TYPE	0x00000001	/* SelectType plugin */
+#define DEBUG_FLAG_STEPS	0x00000002	/* slurmctld steps */
+#define DEBUG_FLAG_TRIGGERS	0x00000004	/* slurmctld triggers */
+#define DEBUG_FLAG_CPU_BIND	0x00000008	/* CPU binding */
+#define DEBUG_FLAG_WIKI		0x00000010	/* wiki communications */
+
 typedef struct slurm_ctl_conf {
 	time_t last_update;	/* last update time of the build parameters */
 	uint16_t accounting_storage_enforce; /* job requires valid association:
-					* user/account/partition/cluster */
-	char *accounting_storage_host; /* accounting storage host */
-	char *accounting_storage_loc; /* accounting storage (db table)
-				       * location */
-	char *accounting_storage_pass; /* accounting storage
-					    password */
+					 * user/account/partition/cluster */
+	char *accounting_storage_backup_host;	/* accounting storage
+						 * backup host */
+	char *accounting_storage_host;	/* accounting storage host */
+	char *accounting_storage_loc;	/* accounting storage (db table)
+					 * location */
+	char *accounting_storage_pass;	/* accounting storage
+					 * password */
 	uint32_t accounting_storage_port;/* node accountinging storage port */
 	char *accounting_storage_type; /* accounting storage type */
 	char *accounting_storage_user; /* accounting storage user */
@@ -962,9 +1082,12 @@ typedef struct slurm_ctl_conf {
 	uint16_t cache_groups;	/* cache /etc/groups to avoid initgroups(2) */
 	char *checkpoint_type;	/* checkpoint plugin type */
 	char *cluster_name;     /* general name of the entire cluster */
+	uint16_t complete_wait;	/* seconds to wait for job completion before
+				 * scheduling another job */
 	char *control_addr;	/* comm path of slurmctld primary server */
 	char *control_machine;	/* name of slurmctld primary server */
 	char *crypto_type;	/* cryptographic signature plugin */
+	uint32_t debug_flags;	/* see DEBUG_FLAG_* above for values */
 	uint32_t def_mem_per_task; /* default MB memory per spawned task */
 	uint16_t disable_root_jobs; /* if set then user root can't run jobs */
 	uint16_t enforce_part_limits;	/* if set, reject job exceeding 
@@ -972,18 +1095,19 @@ typedef struct slurm_ctl_conf {
 	char *epilog;		/* pathname of job epilog */
 	uint32_t epilog_msg_time;  /* usecs for slurmctld to process an
 				 * epilog complete message */
+	char *epilog_slurmctld;	/* pathname of job epilog run by slurmctld */
 	uint16_t fast_schedule;	/* 1 to *not* check configurations by node
 				 * (only check configuration file, faster) */
 	uint32_t first_job_id;	/* first slurm generated job_id to assign */
-	uint32_t next_job_id;	/* next slurm generated job_id to assign */
 	uint16_t get_env_timeout; /* timeout for srun --get-user-env option */
 	uint16_t health_check_interval;	/* secs between health checks */
 	char * health_check_program;	/* pathname of health check program */
 	uint16_t inactive_limit;/* seconds of inactivity before a
 				 * inactive resource allocation is released */
-	char *job_acct_gather_type; /* job accounting gather type */
 	uint16_t job_acct_gather_freq; /* poll frequency for job accounting 
 					* gather plugins */
+	char *job_acct_gather_type; /* job accounting gather type */
+	char *job_ckpt_dir;	/* directory saving job record checkpoint */
 	char *job_comp_host;	/* job completion logging host */
 	char *job_comp_loc;	/* job completion logging location */
 	char *job_comp_pass;	/* job completion storage password */
@@ -994,6 +1118,9 @@ typedef struct slurm_ctl_conf {
 	char *job_credential_public_certificate;/* path to public certificate*/
 	uint16_t job_file_append; /* if set, append to stdout/err file */
 	uint16_t job_requeue;	/* If set, jobs get requeued on node failre */
+	uint16_t kill_on_bad_exit; /* If set, the job will be
+				 * terminated immediately when one of
+				 * the processes is aborted or crashed */ 
 	uint16_t kill_wait;	/* seconds between SIGXCPU to SIGKILL 
 				 * on job termination */
 	char *licenses;		/* licenses available on this cluster */
@@ -1003,23 +1130,47 @@ typedef struct slurm_ctl_conf {
 	uint16_t min_job_age;	/* COMPLETED jobs over this age (secs) 
 	                         * purged from in memory records */
 	char *mpi_default;	/* Default version of MPI in use */
+	char *mpi_params;	/* MPI parameters */
 	uint16_t msg_timeout;	/* message timeout */
+	uint32_t next_job_id;	/* next slurm generated job_id to assign */
 	char *node_prefix;      /* prefix of nodes in partition, only set in 
 				   bluegene clusters NULL otherwise */
+	uint16_t over_time_limit; /* job's time limit can be exceeded by this
+				   * number of minutes before cancellation */
 	char *plugindir;	/* pathname to plugins */
-	char *plugstack;        /* pathname to plugin stack config file */
+	char *plugstack;        /* pathname to plugin stack config
+				 * file */
+	uint32_t priority_decay_hl; /* priority decay half life in
+				     * seconds */
+	uint16_t priority_favor_small; /* favor small jobs over large */
+	uint32_t priority_max_age; /* time when not to add any more
+				    * priority to a job if reached */
+	uint16_t priority_reset_period; /* when to clear usage,
+					 * see PRIORITY_RESET_* */
+	char *priority_type;    /* priority type plugin */
+	uint32_t priority_weight_age; /* weight for age factor */
+	uint32_t priority_weight_fs; /* weight for Fairshare factor */
+	uint32_t priority_weight_js; /* weight for Job Size factor */
+	uint32_t priority_weight_part; /* weight for Partition factor */
+	uint32_t priority_weight_qos; /* weight for QOS factor */
 	uint16_t private_data;	/* block viewing of information, 
 				 * see PRIVATE_DATA_* */
 	char *proctrack_type;	/* process tracking plugin type */
-	char *prolog;		/* pathname of job prolog */
+	char *prolog;		/* pathname of job prolog run by slurmd */
+	char *prolog_slurmctld;	/* pathname of job prolog run by slurmctld */
 	uint16_t propagate_prio_process; /* 1 if process priority should
 				          * be propagated */
         char *propagate_rlimits;/* Propagate (all/specific) resource limits */
         char *propagate_rlimits_except;/* Propagate all rlimits except these */
-	uint16_t resume_rate;	/* nodes to make full power, per minute */
 	char *resume_program;	/* program to make nodes full power */
+	uint16_t resume_rate;	/* nodes to make full power, per minute */
+	uint16_t resume_timeout;/* time required in order to perform a node
+				 * resume operation */
+	uint16_t resv_over_run;	/* how long a running job can exceed 
+				 * reservation time */
 	uint16_t ret2service;	/* 1 return DOWN node to service at 
 				 * registration */
+	char *salloc_default_command; /* default salloc command */
 	char *sched_params;	/* SchedulerParameters OR 
 				 * contents of scheduler plugin config file */
 	uint16_t sched_time_slice;	/* gang scheduler slice time, secs */
@@ -1028,10 +1179,14 @@ typedef struct slurm_ctl_conf {
 	uint16_t schedrootfltr;	/* 1 if rootOnly partitions should be
 				 * filtered from scheduling (if needed) */
 	char *select_type;	/* type of node selector to use */
+	void *select_conf_key_pairs; /* key-pair list which can be
+				      * listed with slurm_print_key_pairs */
 	uint16_t select_type_param; /* Parameters 
 				 * describing the select_type plugin */
 	uint32_t slurm_user_id;	/* uid of slurm_user_name */
 	char *slurm_user_name;	/* user that slurmctld runs as */
+	uint32_t slurmd_user_id;/* uid of slurmd_user_name */
+	char *slurmd_user_name;	/* user that slurmd runs as */
 	uint16_t slurmctld_debug; /* slurmctld logging level */
 	char *slurmctld_logfile;/* where slurmctld error log gets written */
 	char *slurmctld_pidfile;/* where to put slurmctld pidfile         */
@@ -1047,6 +1202,7 @@ typedef struct slurm_ctl_conf {
 				 * considering node DOWN */
 	char *slurm_conf;	/* pathname of slurm config file */
 	char *srun_epilog;      /* srun epilog program */
+	uint16_t srun_io_timeout; /* timeout for non-responsive tasks */
 	char *srun_prolog;      /* srun prolog program */
 	char *state_save_location;/* pathname of slurmctld state save
 				 * directory */
@@ -1055,12 +1211,15 @@ typedef struct slurm_ctl_conf {
 	char *suspend_program;	/* program to make nodes power saving */
 	uint16_t suspend_rate;	/* nodes to make power saving, per minute */
 	uint16_t suspend_time;	/* node idle for this long before power save mode */
+	uint16_t suspend_timeout;/* time required in order to perform a node
+				 * suspend operation */
 	char *switch_type;	/* switch or interconnect type */
 	char *task_epilog;	/* pathname of task launch epilog */
 	char *task_plugin;	/* task launch plugin */
 	uint16_t task_plugin_param;	/* see TASK_PARAM_* */
 	char *task_prolog;	/* pathname of task launch prolog */
 	char *tmp_fs;		/* pathname of temporary file system */
+	char *topology_plugin;	/* network topology plugin */
 	uint16_t track_wckey;    /* see if we are using wckey or not */
 	uint16_t tree_width;    /* number of threads per node to span */
 	char *unkillable_program; /* program run by the slurmstepd when
@@ -1070,7 +1229,9 @@ typedef struct slurm_ctl_conf {
                                       * they are considered "unkillable". */
 	uint16_t use_pam;	/* enable/disable PAM support */
 	uint16_t wait_time;	/* default job --wait time */
-	char *salloc_default_command; /* default salloc command */
+	uint16_t z_16;		/* reserved for future use */
+	uint32_t z_32;		/* reserved for future use */
+	char *z_char;		/* reserved for future use */
 } slurm_ctl_conf_t;
 
 typedef struct slurmd_status_msg {
@@ -1101,6 +1262,7 @@ typedef struct slurm_update_node_msg {
 	uint16_t node_state;	/* see enum node_states */
 	char *reason;   	/* reason for node being DOWN or DRAINING */
 	char *features;		/* new feature for node */
+	uint32_t weight;	/* new weight for node */
 } update_node_msg_t;
 
 typedef struct partition_info update_part_msg_t;
@@ -1457,12 +1619,10 @@ extern void slurm_step_launch_params_t_init
 /*
  * slurm_step_launch - launch a parallel job step
  * IN ctx - job step context generated by slurm_step_ctx_create
- * IN launcher_host - address used for PMI communications
  * IN callbacks - Identify functions to be called when various events occur
  * RET SLURM_SUCCESS or SLURM_ERROR (with errno set)
  */
 extern int slurm_step_launch PARAMS((slurm_step_ctx_t *ctx,
-	char *launcher_host,
 	const slurm_step_launch_params_t *params,
 	const slurm_step_launch_callbacks_t *callbacks));
 
@@ -1556,6 +1716,16 @@ extern void slurm_free_slurmd_status PARAMS((
 void slurm_print_slurmd_status PARAMS((
 		FILE* out, slurmd_status_t * slurmd_status_ptr));
 
+/*
+ * slurm_print_key_pairs - output the contents of key_pairs 
+ *	which is a list of opaque data type config_key_pair_t
+ * IN out - file to write to
+ * IN key_pairs - List contianing key pairs to be printed
+ * IN title - title of key pair list
+ */
+void slurm_print_key_pairs PARAMS((
+                FILE* out, void* key_pairs, char *title));
+
 /*****************************************************************************\
  *	SLURM JOB CONTROL CONFIGURATION READ/PRINT/UPDATE FUNCTIONS
 \*****************************************************************************/
@@ -1804,6 +1974,12 @@ extern void slurm_print_node_table PARAMS((
 extern char *slurm_sprint_node_table PARAMS(( node_info_t * node_ptr, 
 					     int one_liner ));
 
+/*
+ * slurm_init_update_node_msg - initialize node update message
+ * OUT update_node_msg - user defined node descriptor
+ */
+void slurm_init_update_node_msg (update_node_msg_t * update_node_msg);
+
 /*
  * slurm_update_node - issue RPC to a node's configuration per request, 
  *	only usable by user root
@@ -1813,6 +1989,50 @@ extern char *slurm_sprint_node_table PARAMS(( node_info_t * node_ptr,
 extern int slurm_update_node PARAMS(( update_node_msg_t * node_msg ));
 
 
+/*****************************************************************************\
+ *	SLURM SWITCH TOPOLOGY CONFIGURATION READ/PRINT FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_load_topo - issue RPC to get slurm all switch topology configuration 
+ *	information 
+ * IN node_info_msg_pptr - place to store a node configuration pointer
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_topo_info_msg
+ */
+extern int slurm_load_topo PARAMS(( 
+	topo_info_response_msg_t **topo_info_msg_pptr ));
+
+/*
+ * slurm_free_topo_info_msg - free the switch topology configuration 
+ *	information response message
+ * IN msg - pointer to switch topology configuration response message
+ * NOTE: buffer is loaded by slurm_load_topo.
+ */
+extern void slurm_free_topo_info_msg PARAMS(( topo_info_response_msg_t *msg ));
+
+/*
+ * slurm_print_topo_info_msg - output information about all switch topology 
+ *	configuration information based upon message as loaded using 
+ *	slurm_load_topo
+ * IN out - file to write to
+ * IN topo_info_msg_ptr - switch topology information message pointer
+ * IN one_liner - print as a single line if not zero
+ */
+extern void slurm_print_topo_info_msg PARAMS(( 
+	FILE * out, topo_info_response_msg_t *topo_info_msg_ptr, int one_liner )) ;
+
+/*
+ * slurm_print_topo_record - output information about a specific Slurm topology
+ *	record based upon message as loaded using slurm_load_topo
+ * IN out - file to write to
+ * IN topo_ptr - an individual switch information record pointer
+ * IN one_liner - print as a single line if not zero
+ * RET out - char * containing formatted output (must be freed after call)
+ *	   NULL is returned on failure.
+ */
+extern void slurm_print_topo_record PARAMS((FILE * out, topo_info_t *topo_ptr, 
+				    int one_liner ));
 /*****************************************************************************\
  *	SLURM PARTITION CONFIGURATION READ/PRINT/UPDATE FUNCTIONS
 \*****************************************************************************/
@@ -1820,7 +2040,7 @@ extern int slurm_update_node PARAMS(( update_node_msg_t * node_msg ));
 /* 
  * slurm_init_part_desc_msg - initialize partition descriptor with 
  *	default values 
- * OUT job_desc_msg - user defined partition descriptor
+ * IN/OUT update_part_msg - user defined partition descriptor
  */
 extern void slurm_init_part_desc_msg PARAMS((update_part_msg_t * update_part_msg ));
 
@@ -1877,6 +2097,14 @@ extern void slurm_print_partition_info PARAMS((
  */
 extern char *slurm_sprint_partition_info PARAMS(( 
 		partition_info_t * part_ptr, int one_liner ));
+
+/*
+ * slurm_create_partition - create a new partition, only usable by user root
+ * IN part_msg - description of partition configuration
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int slurm_create_partition PARAMS(( update_part_msg_t * part_msg ));
+
 /*
  * slurm_update_partition - issue RPC to update a partition's configuration
  *	per request, only usable by user root
@@ -1893,6 +2121,95 @@ extern int slurm_update_partition PARAMS(( update_part_msg_t * part_msg ));
  */
 extern int slurm_delete_partition PARAMS(( delete_part_msg_t * part_msg ));
 
+/*****************************************************************************\
+ *	SLURM RESERVATION CONFIGURATION READ/PRINT/UPDATE FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_init_resv_desc_msg - initialize reservation descriptor with 
+ *	default values 
+ * OUT job_desc_msg - user defined partition descriptor
+ */
+extern void slurm_init_resv_desc_msg PARAMS(( 
+		resv_desc_msg_t * update_resv_msg ));
+/*
+ * slurm_create_reservation - create a new reservation, only usable by user root
+ * IN resv_msg - description of reservation
+ * RET name of reservation on success (caller must free the memory),
+ *	otherwise return NULL and set errno to indicate the error
+ */
+extern char * slurm_create_reservation PARAMS((
+		resv_desc_msg_t * resv_msg ));
+
+/*
+ * slurm_update_reservation - modify an existing reservation, only usable by 
+ *	user root
+ * IN resv_msg - description of reservation
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int slurm_update_reservation PARAMS((resv_desc_msg_t * resv_msg));
+
+/*
+ * slurm_delete_reservation - issue RPC to delete a reservation, only usable 
+ *	by user root
+ * IN resv_msg - description of reservation to delete
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int slurm_delete_reservation PARAMS(( 
+		reservation_name_msg_t * resv_msg ));
+
+/*
+ * slurm_load_reservations - issue RPC to get all slurm reservation 
+ *	configuration information if changed since update_time 
+ * IN update_time - time of current configuration data
+ * IN reserve_info_msg_pptr - place to store a reservation configuration 
+ *	pointer
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_reservation_info_msg
+ */
+extern int slurm_load_reservations PARAMS(( time_t update_time, 
+		reserve_info_msg_t **resp ));
+
+/*
+ * slurm_print_reservation_info_msg - output information about all Slurm 
+ *	reservations based upon message as loaded using slurm_load_reservation
+ * IN out - file to write to
+ * IN resv_info_ptr - reservation information message pointer
+ * IN one_liner - print as a single line if true
+ */
+void slurm_print_reservation_info_msg PARAMS(( FILE* out, 
+		reserve_info_msg_t * resv_info_ptr, int one_liner ));
+
+/*
+ * slurm_print_reservation_info - output information about a specific Slurm 
+ *	reservation based upon message as loaded using slurm_load_reservation
+ * IN out - file to write to
+ * IN resv_ptr - an individual reservation information record pointer
+ * IN one_liner - print as a single line if true
+ */
+void slurm_print_reservation_info PARAMS(( FILE* out, 
+		reserve_info_t * resv_ptr, int one_liner ));
+
+/*
+ * slurm_sprint_reservation_info - output information about a specific Slurm 
+ *	reservation based upon message as loaded using slurm_load_reservations
+ * IN resv_ptr - an individual reservation information record pointer
+ * IN one_liner - print as a single line if true
+ * RET out - char * containing formatted output (must be freed after call)
+ *           NULL is returned on failure.
+ */
+char *slurm_sprint_reservation_info PARAMS(( reserve_info_t * resv_ptr, 
+		int one_liner ));
+
+/*
+ * slurm_free_reservation_info_msg - free the reservation information 
+ *	response message
+ * IN msg - pointer to reservation information response message
+ * NOTE: buffer is loaded by slurm_load_reservation
+ */
+extern void slurm_free_reservation_info_msg PARAMS(( 
+	reserve_info_msg_t * resv_info_ptr ));
+
 /*****************************************************************************\
  *	SLURM PING/RECONFIGURE/SHUTDOWN FUNCTIONS
 \*****************************************************************************/
@@ -1922,6 +2239,14 @@ extern int slurm_reconfigure PARAMS(( void ));
  */
 extern int slurm_shutdown PARAMS(( uint16_t options ));
 
+/*
+ * slurm_takeover - issue RPC to have Slurm backup controller (slurmctld)
+ *	take over the primary controller.
+ *
+ * RET 0 or a slurm error code
+ */
+extern int slurm_takeover PARAMS(( void ));
+
 /*
  * slurm_set_debug_level - issue RPC to set slurm controller debug level
  * IN debug_level - requested debug level
@@ -1987,7 +2312,7 @@ extern int slurm_checkpoint_disable PARAMS(( uint32_t job_id,
  * RET 0 or a slurm error code
  */
 extern int slurm_checkpoint_enable PARAMS(( uint32_t job_id, 
-		uint32_t step_id ));
+					    uint32_t step_id ));
 
 /*
  * slurm_checkpoint_create - initiate a checkpoint requests for some job step.
@@ -1995,10 +2320,13 @@ extern int slurm_checkpoint_enable PARAMS(( uint32_t job_id,
  * IN job_id  - job on which to perform operation
  * IN step_id - job step on which to perform operation
  * IN max_wait - maximum wait for operation to complete, in seconds
+ * IN image_dir - directory to store image files
  * RET 0 or a slurm error code
  */
 extern int slurm_checkpoint_create PARAMS(( uint32_t job_id, 
-		uint32_t step_id, uint16_t max_wait ));
+					    uint32_t step_id, 
+					    uint16_t max_wait, 
+					    char *image_dir ));
 
 /*
  * slurm_checkpoint_vacate - initiate a checkpoint requests for some job step.
@@ -2006,19 +2334,26 @@ extern int slurm_checkpoint_create PARAMS(( uint32_t job_id,
  * IN job_id  - job on which to perform operation
  * IN step_id - job step on which to perform operation
  * IN max_wait - maximum wait for operation to complete, in seconds
+ * IN image_dir - directory to store image files
  * RET 0 or a slurm error code
  */
 extern int slurm_checkpoint_vacate PARAMS(( uint32_t job_id, 
-		uint32_t step_id, uint16_t max_wait ));
+					    uint32_t step_id, 
+					    uint16_t max_wait, 
+					    char *image_dir ));
 
 /*
  * slurm_checkpoint_restart - restart execution of a checkpointed job step.
  * IN job_id  - job on which to perform operation
  * IN step_id - job step on which to perform operation
+ * IN stick - stick to nodes previously running on
+ * IN image_dir - directory to find checkpoint image files
  * RET 0 or a slurm error code
  */
 extern int slurm_checkpoint_restart PARAMS(( uint32_t job_id, 
-		uint32_t step_id ));
+					     uint32_t step_id, 
+					     uint16_t stick, 
+					     char *image_dir ));
 
 /*
  * slurm_checkpoint_complete - note the completion of a job step's checkpoint
@@ -2031,8 +2366,10 @@ extern int slurm_checkpoint_restart PARAMS(( uint32_t job_id,
  * RET 0 or a slurm error code
  */
 extern int slurm_checkpoint_complete PARAMS(( uint32_t job_id, 
-		uint32_t step_id, time_t begin_time, 
-		uint32_t error_code, char *error_msg ));
+					      uint32_t step_id, 
+					      time_t begin_time, 
+					      uint32_t error_code, 
+					      char *error_msg ));
 
 /*
  * slurm_checkpoint_task_complete - note the completion of a task's checkpoint
@@ -2045,9 +2382,12 @@ extern int slurm_checkpoint_complete PARAMS(( uint32_t job_id,
  * IN error_msg - error message, preserved for highest error_code
  * RET 0 or a slurm error code
  */
-extern int slurm_checkpoint_task_complete (uint32_t job_id, uint32_t step_id,
-					   uint32_t task_id, time_t begin_time, 
-					   uint32_t error_code, char *error_msg);
+extern int slurm_checkpoint_task_complete PARAMS(( uint32_t job_id, 
+						   uint32_t step_id,
+						   uint32_t task_id, 
+						   time_t begin_time, 
+						   uint32_t error_code, 
+						   char *error_msg ));
 
 /*
  * slurm_checkpoint_error - gather error information for the last checkpoint
@@ -2062,18 +2402,24 @@ extern int slurm_checkpoint_task_complete (uint32_t job_id, uint32_t step_id,
  *	must be freed by the caller to prevent memory leak
  * RET 0 or a slurm error code
  */
-extern int slurm_checkpoint_error PARAMS(( uint32_t job_id, 
-		uint32_t step_id, uint32_t *error_code, 
-		char **error_msg ));
+extern int slurm_checkpoint_error PARAMS(( uint32_t job_id, uint32_t step_id, 
+					   uint32_t *error_code, 
+					   char **error_msg ));
 
 /*
- * slurm_get_checkpoint_file_path - return the checkpoint file
- *      path of this process, creating the directory if needed.
- * IN len: length of the file path buffer
- * OUT buf: buffer to store the checkpoint file path
- * RET: 0 on success, -1 on failure with errno set
+ * slurm_checkpoint_tasks - send checkpoint request to tasks of
+ *     specified step
+ * IN job_id: job ID of step
+ * IN step_id: step ID of step
+ * IN image_dir: location to store ckpt images. parameter to plugin.
+ * IN max_wait: seconds to wait for the operation to complete
+ * IN nodelist: nodes to send the request
+ * RET: 0 on success, non-zero on failure with errno set
  */
-extern int slurm_get_checkpoint_file_path(size_t len, char *buf);
+extern int slurm_checkpoint_tasks PARAMS(( uint32_t job_id, uint16_t step_id, 
+					   time_t begin_time, char *image_dir, 
+					   uint16_t max_wait, char *nodelist));
+
 
 /*****************************************************************************\
  *      SLURM HOSTLIST FUNCTIONS
@@ -2123,6 +2469,64 @@ extern int slurm_get_checkpoint_file_path(size_t len, char *buf);
  */
 extern hostlist_t slurm_hostlist_create PARAMS(( const char *hostlist ));
 
+/* slurm_hostlist_count():
+ *
+ * Return the number of hosts in hostlist hl.
+ */ 
+extern int slurm_hostlist_count PARAMS((hostlist_t hl));
+
+/*
+ * slurm_hostlist_destroy():
+ *
+ * Destroy a hostlist object. Frees all memory allocated to the hostlist.
+ */
+extern void slurm_hostlist_destroy PARAMS(( hostlist_t hl ));
+
+/* slurm_hostlist_find():
+ *
+ * Searches hostlist hl for the first host matching hostname 
+ * and returns position in list if found. 
+ *
+ * Returns -1 if host is not found.
+ *
+ */
+extern int slurm_hostlist_find PARAMS((hostlist_t hl, const char *hostname));
+
+/* slurm_hostlist_push():
+ *
+ * push a string representation of hostnames onto a hostlist.
+ *
+ * The hosts argument may take the same form as in slurm_hostlist_create()
+ *
+ * Returns the number of hostnames inserted into the list, 
+ * or 0 on failure.
+ */
+extern int slurm_hostlist_push PARAMS((hostlist_t hl, const char *hosts));
+
+/* slurm_hostlist_push_host():
+ *
+ * Push a single host onto the hostlist hl. 
+ * This function is more efficient than slurm_hostlist_push() for a single
+ * hostname, since the argument does not need to be checked for ranges.
+ *
+ * return value is 1 for success, 0 for failure.
+ */
+extern int slurm_hostlist_push_host PARAMS((hostlist_t hl, const char *host));
+
+/* slurm_hostlist_ranged_string():
+ *
+ * Write the string representation of the hostlist hl into buf,
+ * writing at most n chars. Returns the number of bytes written,
+ * or -1 if truncation occurred.
+ *
+ * The result will be NULL terminated.
+ * 
+ * slurm_hostlist_ranged_string() will write a bracketed hostlist representation
+ * where possible.
+ */
+extern ssize_t slurm_hostlist_ranged_string PARAMS((hostlist_t hl,
+						    size_t n, char *buf));
+
 /*
  * slurm_hostlist_shift():
  *
@@ -2134,12 +2538,12 @@ extern hostlist_t slurm_hostlist_create PARAMS(( const char *hostlist ));
  */
 extern char * slurm_hostlist_shift PARAMS(( hostlist_t hl ));
 
-/*
- * slurm_hostlist_destroy():
+/* slurm_hostlist_uniq():
  *
- * Destroy a hostlist object. Frees all memory allocated to the hostlist.
+ * Sort the hostlist hl and remove duplicate entries.
+ * 
  */
-extern void slurm_hostlist_destroy PARAMS(( hostlist_t hl ));
+extern void slurm_hostlist_uniq PARAMS((hostlist_t hl));
 
 /*****************************************************************************\
  *      SLURM TRIGGER FUNCTIONS
diff --git a/slurm/slurm_errno.h b/slurm/slurm_errno.h
index 0cb0d1b8283833c11ba310b5e1e54277a4c62dc9..2a788973fe3f83db13424dca5b2306d39c443971 100644
--- a/slurm/slurm_errno.h
+++ b/slurm/slurm_errno.h
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  *  slurm_errno.h - error codes and functions for slurm
  ******************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, 
  *	Jim Garlick <garlick@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -16,7 +18,7 @@
  *  any later version.
  *
  *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
+ *  to link the code of portions of this program with the OpenSSL library under
  *  certain conditions as described in each individual source file, and 
  *  distribute linked combinations including the two. You must obey the GNU 
  *  General Public License in all respects for all of the code used other than 
@@ -159,7 +161,16 @@ enum {
 	ESLURM_NEED_RESTART,
 	ESLURM_ACCOUNTING_POLICY,
 	ESLURM_INVALID_TIME_LIMIT,
+	ESLURM_RESERVATION_ACCESS,
+	ESLURM_RESERVATION_INVALID,
+	ESLURM_INVALID_TIME_VALUE,
+	ESLURM_RESERVATION_BUSY,
+	ESLURM_RESERVATION_NOT_USABLE,
 	ESLURM_INVALID_WCKEY,
+	ESLURM_RESERVATION_OVERLAP,
+	ESLURM_PORTS_BUSY,
+	ESLURM_PORTS_INVALID,
+	ESLURM_PROLOG_RUNNING,
 
 	/* switch specific error codes, specific values defined in plugin module */
 	ESLURM_SWITCH_MIN = 3000,
diff --git a/slurm/spank.h b/slurm/spank.h
index 3d59ba1747083d6c261ab9aadfc0ee05b3faee6c..08e36fb881681d7a425d665e9be4dced45bb2fab 100644
--- a/slurm/spank.h
+++ b/slurm/spank.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -68,7 +69,13 @@ typedef int (spank_f) (spank_t spank, int ac, char *argv[]);
  *               `-> user_init ()  
  *               + for each task
  *               |       + fork ()
- *               |       `-> user_task_init ()
+ *               |       |
+ *               |       + reclaim privileges
+ *               |       `-> task_init_privileged ()
+ *               |       |
+ *               |       + become_user ()
+ *               |       `-> task_init ()
+ *               |       |
  *               |       + execve ()
  *               |
  *               + reclaim privileges
@@ -78,9 +85,13 @@ typedef int (spank_f) (spank_t spank, int ac, char *argv[]);
  *               + for each task
  *               |       + wait ()
  *               |          `-> task_exit ()
- *               `-> fini ()
+ *               `-> exit ()
  *
- *   In srun only the init() and local_user_init() callbacks are used.
+ *   In srun only the init(), init_post_opt() and local_user_init(), and exit()
+ *    callbacks are used.
+ *
+ *   In sbatch/salloc only the init(), init_post_opt(), and exit() callbacks
+ *    are used.
  *
  */
 
@@ -88,6 +99,7 @@ extern spank_f slurm_spank_init;
 extern spank_f slurm_spank_init_post_opt;
 extern spank_f slurm_spank_local_user_init;
 extern spank_f slurm_spank_user_init;
+extern spank_f slurm_spank_task_init_privileged;
 extern spank_f slurm_spank_task_init;
 extern spank_f slurm_spank_task_post_fork;
 extern spank_f slurm_spank_task_exit;
@@ -154,11 +166,24 @@ enum spank_err {
     ESPANK_NOSPACE     = 6, /* Buffer too small.                             */
     ESPANK_NOT_REMOTE  = 7, /* Function only may be called in remote context */
     ESPANK_NOEXIST     = 8, /* Id/pid doesn't exist on this node             */
-    ESPANK_NOT_EXECD   = 9  /* Lookup by pid requested, but no tasks running */
+    ESPANK_NOT_EXECD   = 9, /* Lookup by pid requested, but no tasks running */
+    ESPANK_NOT_AVAIL   = 10,/* SPANK item not available from this callback   */
 };
 
 typedef enum spank_err spank_err_t;
 
+/*
+ *  SPANK plugin context
+ */
+enum spank_context {
+    S_CTX_ERROR,             /* Error obtaining current context              */
+    S_CTX_LOCAL,             /* Local context (srun)                         */
+    S_CTX_REMOTE,            /* Remote context (slurmd)                      */
+    S_CTX_ALLOCATOR          /* Allocator context (sbatch/salloc)            */
+};
+
+typedef enum spank_context spank_context_t;
+
 /*
  *  SPANK plugin options
  */
@@ -182,9 +207,10 @@ struct spank_option {
 };
 
 /*
- *  Plugin may declare spank_options option table:
- *   [Note: options may also be declared with spank_option_register(),
- *    defined below.]
+ *  Plugins may export a spank_options option table as symbol "spank_options".
+ *   This method only works in "local" and "remote" mode. To register options
+ *   in "allocator" mode (sbatch/salloc), use the preferred
+ *   spank_option_register function described below.
  */
 extern struct spank_option spank_options [];
 
@@ -203,6 +229,11 @@ extern struct spank_option spank_options [];
  */
 BEGIN_C_DECLS
 
+/*
+ *  Return the string representation of a spank_err_t error code.
+ */
+const char *spank_strerror (spank_err_t err);
+
 /*
  *  Determine whether a given spank plugin symbol is supported
  *   in this version of SPANK interface.
@@ -215,20 +246,33 @@ BEGIN_C_DECLS
 int spank_symbol_supported (const char *symbol);
 
 /*
- *  Determine whether plugin is loaded "local" or "remote."
+ *  Determine whether plugin is loaded in "remote" context
  * 
  *  Returns:
  *  = 1   remote context, i.e. plugin is loaded in slurmd.
- *  = 0   local context, i.e. plugin loaded in srun.
+ *  = 0   not remote context
  *  < 0   spank handle was not valid.
  */
 int spank_remote (spank_t spank);
 
+/*
+ *  Return the context in which the calling plugin is loaded.
+ *
+ *  Returns the spank_context for the calling plugin, or SPANK_CTX_ERROR
+ *   if the current context cannot be determined.
+ */
+spank_context_t spank_context (void);
+
 /*
  *  Register a plugin-provided option dynamically. This function
  *   is only valid when called from slurm_spank_init(), and must
- *   be called in both remote (slurmd) and local (srun) contexts.
- *   May be called multiple times to register many options.
+ *   be guaranteed to be called in all contexts in which it is
+ *   used (local, remote, allocator).
+ *
+ *  This function is the only method to register options in
+ *   allocator context.
+ *
+ *  May be called multiple times to register many options.
  *
  *  Returns ESPANK_SUCCESS on successful registration of the option
  *   or ESPANK_BAD_ARG if not called from slurm_spank_init().
diff --git a/src/Makefile.am b/src/Makefile.am
index a852e7d97c8d6acca5345b16d7790ffdf0bfceaa..4392926da66593a2659a4816ae5805378b519b61 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -1,6 +1,14 @@
+if WITH_BLCR 
+SRUN_CR = srun_cr
+else
+SRUN_CR = 
+endif
 
 SUBDIRS = common api database \
 	slurmctld slurmd slurmdbd plugins srun sbcast \
 	scontrol scancel squeue sinfo smap sview salloc \
-	sbatch sattach strigger sacct sacctmgr sreport sstat
+	sbatch sattach strigger sacct sacctmgr sreport sstat \
+	sshare sprio $(SRUN_CR)
+
+
 
diff --git a/src/Makefile.in b/src/Makefile.in
index 3e86c104e13a41b1f9566abb1166cf5e941ea9d7..1a818e95374c48831b591a8dc3b80a271016652d 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,7 +81,10 @@ RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
   distclean-recursive maintainer-clean-recursive
 ETAGS = etags
 CTAGS = ctags
-DIST_SUBDIRS = $(SUBDIRS)
+DIST_SUBDIRS = common api database slurmctld slurmd slurmdbd plugins \
+	srun sbcast scontrol scancel squeue sinfo smap sview salloc \
+	sbatch sattach strigger sacct sacctmgr sreport sstat sshare \
+	sprio srun_cr
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
 ACLOCAL = @ACLOCAL@
 AMTAR = @AMTAR@
@@ -89,6 +96,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -248,10 +259,13 @@ target_os = @target_os@
 target_vendor = @target_vendor@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
+@WITH_BLCR_FALSE@SRUN_CR = 
+@WITH_BLCR_TRUE@SRUN_CR = srun_cr
 SUBDIRS = common api database \
 	slurmctld slurmd slurmdbd plugins srun sbcast \
 	scontrol scancel squeue sinfo smap sview salloc \
-	sbatch sattach strigger sacct sacctmgr sreport sstat
+	sbatch sattach strigger sacct sacctmgr sreport sstat \
+	sshare sprio $(SRUN_CR)
 
 all: all-recursive
 
diff --git a/src/api/Makefile.am b/src/api/Makefile.am
index 0000a85496aac10ea3be8d564e8b3b7a3a5a4f67..762c91d6095fc7e7348f9a855ddd50b5ddc0de4a 100644
--- a/src/api/Makefile.am
+++ b/src/api/Makefile.am
@@ -76,7 +76,9 @@ slurmapi_src =           \
 	node_info.c      \
 	node_select_info.c node_select_info.h \
 	partition_info.c \
+	reservation_info.c \
 	signal.c         \
+	slurm_hostlist.c \
 	slurm_pmi.c slurm_pmi.h	\
 	step_ctx.c step_ctx.h \
 	step_io.c step_io.h \
@@ -84,6 +86,7 @@ slurmapi_src =           \
 	pmi_server.c pmi_server.h \
 	submit.c         \
 	suspend.c        \
+	topo_info.c      \
 	triggers.c       \
 	reconfigure.c    \
 	update_config.c
diff --git a/src/api/Makefile.in b/src/api/Makefile.in
index f4ad8b5d4826b7cfac00233a55627e0db200dc9f..93e32d9187120479503e27fc1268571b38e235b4 100644
--- a/src/api/Makefile.in
+++ b/src/api/Makefile.in
@@ -46,14 +46,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -97,8 +101,9 @@ libslurmhelper_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
 am__objects_1 = allocate.lo allocate_msg.lo cancel.lo checkpoint.lo \
 	complete.lo config_info.lo init_msg.lo job_info.lo \
 	job_step_info.lo node_info.lo node_select_info.lo \
-	partition_info.lo signal.lo slurm_pmi.lo step_ctx.lo \
-	step_io.lo step_launch.lo pmi_server.lo submit.lo suspend.lo \
+	partition_info.lo reservation_info.lo signal.lo \
+	slurm_hostlist.lo slurm_pmi.lo step_ctx.lo step_io.lo \
+	step_launch.lo pmi_server.lo submit.lo suspend.lo topo_info.lo \
 	triggers.lo reconfigure.lo update_config.lo
 am_libslurmhelper_la_OBJECTS = $(am__objects_1)
 libslurmhelper_la_OBJECTS = $(am_libslurmhelper_la_OBJECTS)
@@ -137,6 +142,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -368,7 +377,9 @@ slurmapi_src = \
 	node_info.c      \
 	node_select_info.c node_select_info.h \
 	partition_info.c \
+	reservation_info.c \
 	signal.c         \
+	slurm_hostlist.c \
 	slurm_pmi.c slurm_pmi.h	\
 	step_ctx.c step_ctx.h \
 	step_io.c step_io.h \
@@ -376,6 +387,7 @@ slurmapi_src = \
 	pmi_server.c pmi_server.h \
 	submit.c         \
 	suspend.c        \
+	topo_info.c      \
 	triggers.c       \
 	reconfigure.c    \
 	update_config.c
@@ -533,13 +545,16 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pmi.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pmi_server.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reconfigure.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reservation_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/signal.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_hostlist.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_pmi.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/step_ctx.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/step_io.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/step_launch.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/submit.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/suspend.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/topo_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/triggers.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/update_config.Plo@am__quote@
 
diff --git a/src/api/allocate.c b/src/api/allocate.c
index 0fea867c2ca00e114ba7cdef998aadebc58ab66f..a650dffb9027b8171c41095faa4e2265d0f29481 100644
--- a/src/api/allocate.c
+++ b/src/api/allocate.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  allocate.c - allocate nodes for a job or step with supplied contraints
- *  $Id: allocate.c 14992 2008-09-05 20:10:34Z da $
+ *  $Id: allocate.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -717,6 +718,7 @@ _wait_for_alloc_rpc(const listen_t *listen, int sleep_time,
 			case EINTR:
 				*resp = NULL;
 				return -1;
+			case EBADF:
 			case ENOMEM:
 			case EINVAL:
 			case EFAULT:
diff --git a/src/api/allocate_msg.c b/src/api/allocate_msg.c
index 9ae77a6e8b103ac904b8af61f31e780834f5d093..18985bfe35c6d3347daff2b7e48d76b264e175bb 100644
--- a/src/api/allocate_msg.c
+++ b/src/api/allocate_msg.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/cancel.c b/src/api/cancel.c
index 678605d236acb9c3eb9edd31a6e78fb928e84d42..4d7f58663084f4b05b033ecbf1c3544047580125 100644
--- a/src/api/cancel.c
+++ b/src/api/cancel.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  cancel.c - cancel a slurm job or job step
- *  $Id: cancel.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: cancel.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/checkpoint.c b/src/api/checkpoint.c
index be3c46acbd51339f9545bd0166a06c73b6bd6a9e..174bc21ecd90ddd847075cfaae2309aa946f0fbd 100644
--- a/src/api/checkpoint.c
+++ b/src/api/checkpoint.c
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  *  checkpoint.c - Process checkpoint related functions.
- *  $Id: checkpoint.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: checkpoint.c 16867 2009-03-12 16:35:42Z jette $
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -58,29 +60,33 @@
 
 static int _handle_rc_msg(slurm_msg_t *msg);
 static int _checkpoint_op (uint16_t op, uint16_t data,
-		uint32_t job_id, uint32_t step_id);
+			   uint32_t job_id, uint32_t step_id,
+			   char *image_dir);
 /*
  * _checkpoint_op - perform many checkpoint operation for some job step.
- * IN op      - operation to perform
- * IN data    - operation-specific data
- * IN job_id  - job on which to perform operation
- * IN step_id - job step on which to perform operation
+ * IN op        - operation to perform
+ * IN data      - operation-specific data
+ * IN job_id    - job on which to perform operation
+ * IN step_id   - job step on which to perform operation
+ * IN image_dir - directory used to get/put checkpoint images
  * RET 0 or a slurm error code
  */
 static int _checkpoint_op (uint16_t op, uint16_t data,
-		uint32_t job_id, uint32_t step_id)
+			   uint32_t job_id, uint32_t step_id,
+			   char *image_dir)
 {
 	int rc;
 	checkpoint_msg_t ckp_req;
 	slurm_msg_t req_msg;
 
 	slurm_msg_t_init(&req_msg);
-	ckp_req.op       = op;
-	ckp_req.data     = data;
-	ckp_req.job_id   = job_id;
-	ckp_req.step_id  = step_id;
-	req_msg.msg_type = REQUEST_CHECKPOINT;
-	req_msg.data     = &ckp_req;
+	ckp_req.op        = op;
+	ckp_req.data      = data;
+	ckp_req.job_id    = job_id;
+	ckp_req.step_id   = step_id;
+	ckp_req.image_dir = image_dir;
+	req_msg.msg_type  = REQUEST_CHECKPOINT;
+	req_msg.data      = &ckp_req;
 
 	if (slurm_send_recv_controller_rc_msg(&req_msg, &rc) < 0)
 		return SLURM_ERROR;
@@ -105,13 +111,14 @@ extern int slurm_checkpoint_able (uint32_t job_id, uint32_t step_id,
 	checkpoint_msg_t ckp_req;
 	checkpoint_resp_msg_t *resp;
 
-	ckp_req.op       = CHECK_ABLE;
-	ckp_req.job_id   = job_id;
-	ckp_req.step_id  = step_id;
+	ckp_req.op        = CHECK_ABLE;
+	ckp_req.job_id    = job_id;
+	ckp_req.step_id   = step_id;
+	ckp_req.image_dir = NULL;
 	slurm_msg_t_init(&req_msg);
 	slurm_msg_t_init(&resp_msg);
-	req_msg.msg_type = REQUEST_CHECKPOINT;
-	req_msg.data     = &ckp_req;
+	req_msg.msg_type  = REQUEST_CHECKPOINT;
+	req_msg.data      = &ckp_req;
 
 	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
 		return SLURM_ERROR;
@@ -141,7 +148,7 @@ extern int slurm_checkpoint_able (uint32_t job_id, uint32_t step_id,
  */
 extern int slurm_checkpoint_disable (uint32_t job_id, uint32_t step_id)
 {
-	return _checkpoint_op (CHECK_DISABLE, 0, job_id, step_id);
+	return _checkpoint_op (CHECK_DISABLE, 0, job_id, step_id, NULL);
 }
 
 
@@ -153,21 +160,23 @@ extern int slurm_checkpoint_disable (uint32_t job_id, uint32_t step_id)
  */
 extern int slurm_checkpoint_enable (uint32_t job_id, uint32_t step_id)
 {
-	return _checkpoint_op (CHECK_ENABLE, 0, job_id, step_id);
+	return _checkpoint_op (CHECK_ENABLE, 0, job_id, step_id, NULL);
 }
 
 /*
  * slurm_checkpoint_create - initiate a checkpoint requests for some job step.
  *	the job will continue execution after the checkpoint operation completes
- * IN job_id  - job on which to perform operation
- * IN step_id - job step on which to perform operation
- * IN max_wait - maximum wait for operation to complete, in seconds
+ * IN job_id   - job on which to perform operation
+ * IN step_id  - job step on which to perform operation
+ * IN max_wait  - maximum wait for operation to complete, in seconds
+ * IN image_dir - directory used to get/put checkpoint images
  * RET 0 or a slurm error code
  */
 extern int slurm_checkpoint_create (uint32_t job_id, uint32_t step_id, 
-		uint16_t max_wait)
+		uint16_t max_wait, char *image_dir)
 {
-	return _checkpoint_op (CHECK_CREATE, max_wait, job_id, step_id);
+	return _checkpoint_op (CHECK_CREATE, max_wait, job_id, step_id, 
+			       image_dir);
 }
 
 /*
@@ -176,12 +185,14 @@ extern int slurm_checkpoint_create (uint32_t job_id, uint32_t step_id,
  * IN job_id  - job on which to perform operation
  * IN step_id - job step on which to perform operation
  * IN max_wait - maximum wait for operation to complete, in seconds
+ * IN image_dir - directory used to get/put checkpoint images
  * RET 0 or a slurm error code
  */
 extern int slurm_checkpoint_vacate (uint32_t job_id, uint32_t step_id, 
-		uint16_t max_wait)
+		uint16_t max_wait, char *image_dir)
 {
-	return _checkpoint_op (CHECK_VACATE, max_wait, job_id, step_id);
+	return _checkpoint_op (CHECK_VACATE, max_wait, job_id, step_id, 
+			       image_dir);
 }
 
 /*
@@ -190,9 +201,10 @@ extern int slurm_checkpoint_vacate (uint32_t job_id, uint32_t step_id,
  * IN step_id - job step on which to perform operation
  * RET 0 or a slurm error code
  */
-extern int slurm_checkpoint_restart (uint32_t job_id, uint32_t step_id)
+extern int slurm_checkpoint_restart (uint32_t job_id, uint32_t step_id,
+				     uint16_t stick, char *image_dir)
 {
-	return _checkpoint_op (CHECK_RESTART, 0, job_id, step_id);
+	return _checkpoint_op (CHECK_RESTART, stick, job_id, step_id, image_dir);
 }
 
 /*
@@ -256,13 +268,14 @@ extern int slurm_checkpoint_error ( uint32_t job_id, uint32_t step_id,
 	/*
 	 * Request message:
 	 */
-	req.op       = CHECK_ERROR;
-	req.job_id   = job_id;
-	req.step_id  = step_id;
+	req.op        = CHECK_ERROR;
+	req.job_id    = job_id;
+	req.step_id   = step_id;
+	req.image_dir = NULL;
 	slurm_msg_t_init(&msg);
 	slurm_msg_t_init(&resp_msg);
-	msg.msg_type = REQUEST_CHECKPOINT;
-	msg.data     = &req;
+	msg.msg_type  = REQUEST_CHECKPOINT;
+	msg.data      = &req;
 
 	rc = slurm_send_recv_controller_msg(&msg, &resp_msg);
 
@@ -317,7 +330,9 @@ _handle_rc_msg(slurm_msg_t *msg)
  * RET 0 or a slurm error code
  */
 extern int slurm_checkpoint_task_complete (uint32_t job_id, uint32_t step_id,
-		uint32_t task_id, time_t begin_time, uint32_t error_code, char *error_msg)
+					   uint32_t task_id, time_t begin_time,
+					   uint32_t error_code, 
+					   char *error_msg)
 {
 	int rc;
 	slurm_msg_t msg;
@@ -341,77 +356,19 @@ extern int slurm_checkpoint_task_complete (uint32_t job_id, uint32_t step_id,
 }
 
 /*
- * slurm_get_checkpoint_file_path - return the checkpoint file
- *      path of this process, creating the directory if needed.
- * IN len: length of the file path buffer
- * OUT buf: buffer to store the checkpoint file path
- * RET: 0 on success, -1 on failure with errno set
+ * slurm_checkpoint_tasks - send checkpoint request to tasks of
+ *     specified step
+ * IN job_id: job ID of step
+ * IN step_id: step ID of step
+ * IN image_dir: location to store ckpt images. parameter to plugin.
+ * IN max_wait: seconds to wait for the operation to complete
+ * IN nodelist: nodes to send the request
+ * RET: 0 on success, non-zero on failure with errno set
  */
 extern int
-slurm_get_checkpoint_file_path(size_t len, char *buf)
+slurm_checkpoint_tasks(uint32_t job_id, uint16_t step_id, time_t begin_time,
+		       char *image_dir, uint16_t max_wait, char *nodelist)
 {
-       char *ckpt_path, *job_id, *step_id, *proc_id;
-       struct stat mystat;
-       int idx;
-
-       len --;                 /* for a terminating 0 */
-
-       ckpt_path = getenv("SLURM_CHECKPOINT_PATH");
-       if (ckpt_path == NULL) { /* this should not happen since the program may chdir */
-               ckpt_path = getcwd(buf, len);
-               if (ckpt_path == NULL)  /* ERANGE: len is too short */
-                       return -1;
-       } else {
-               if (snprintf(buf, len, "%s", ckpt_path) >= len) { /* glibc >= 2.1 */
-                       errno = ERANGE;
-                       return -1;
-               }
-               ckpt_path = buf;
-       }
-       idx = strlen(ckpt_path) - 1;
-       while (idx > 0 && ckpt_path[idx] == '/')
-               ckpt_path[idx --] = 0;
-
-       if (stat(ckpt_path, &mystat) < 0)
-               return -1;
-       if (! S_ISDIR(mystat.st_mode)) {
-               errno = ENOTDIR;
-               return -1;
-       }
-
-       job_id = getenv("SLURM_JOBID");
-       step_id = getenv("SLURM_STEPID");
-       proc_id = getenv("SLURM_PROCID");
-       if (job_id == NULL || step_id == NULL || proc_id == NULL) {
-               errno = ENODATA;
-               return -1;
-       }
-       idx = strlen(buf);
-       if (snprintf(buf + idx, len - idx, "/%s.%s", job_id, step_id) >= len - idx) {
-               errno = ERANGE;
-               return -1;
-       }
-
-       if (stat(buf, &mystat) < 0) {
-               if (errno == ENOENT) { /* dir does not exists */
-                       if (mkdir(buf, 0750) < 0 && errno != EEXIST)
-                               return -1;
-                       if (stat(buf, &mystat) < 0)
-                               return -1;
-               }
-               else
-                       return -1;
-       }
-       if (! S_ISDIR(mystat.st_mode)) {
-               errno = ENOTDIR;
-               return -1;
-       }
-
-       idx = strlen(buf);
-       if (snprintf(buf + idx, len - idx, "/%s.%s.ckpt", __progname, proc_id) >= len - idx) {
-               errno = ERANGE;
-               return -1;
-       }
-
-       return 0;
+	return checkpoint_tasks(job_id, step_id, begin_time,
+				image_dir, max_wait, nodelist);
 }
diff --git a/src/api/complete.c b/src/api/complete.c
index 15ce8c44adab7b8c7f57ecea0444496a1e70957f..e00f1103ed397a0456b9902e6f85613efca37db7 100644
--- a/src/api/complete.c
+++ b/src/api/complete.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  complete.c - note the completion a slurm job or job step
- *  $Id: complete.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: complete.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/config_info.c b/src/api/config_info.c
index f4afe45788edaf54e7091c558770c8f29ec11f4b..c645df051f14f3b72876d538481bfdcf366d6648 100644
--- a/src/api/config_info.c
+++ b/src/api/config_info.c
@@ -2,13 +2,14 @@
  *  config_info.c - get/print the system configuration information of slurm
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> and Kevin Tew <tew1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -47,8 +48,13 @@
 
 #include "src/api/job_info.h"
 #include "src/common/parse_time.h"
+#include "src/common/read_config.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_resource_info.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/common/list.h"
 
 /*
  * slurm_api_version - Return a single number reflecting the SLURM API's 
@@ -87,17 +93,26 @@ _select_info(uint16_t select_type_param)
 	}
 }
 
-static char *_task_plugin_param(uint16_t task_plugin_param)
+static char *
+_reset_period_str(uint16_t reset_period)
 {
-	switch(task_plugin_param) {
-		case TASK_PARAM_NONE:
-			return "none";
-		case TASK_PARAM_CPUSETS:
-			return "cpusets";
-		case TASK_PARAM_SCHED:
-			return "sched";
+	switch (reset_period) {
+		case PRIORITY_RESET_NONE:
+			return "NONE";
+		case PRIORITY_RESET_NOW:
+			return "NOW";
+		case PRIORITY_RESET_DAILY:
+			return "DAILY";
+		case PRIORITY_RESET_WEEKLY:
+			return "WEEKLY";
+		case PRIORITY_RESET_MONTHLY:
+			return "MONTHLY";
+		case PRIORITY_RESET_QUARTERLY:
+			return "QUARTERLY";
+		case PRIORITY_RESET_YEARLY:
+			return "YEARLY";
 		default:
-			return "unknown";
+			return "UNKNOWN";
 	}
 }
 
@@ -110,16 +125,29 @@ static char *_task_plugin_param(uint16_t task_plugin_param)
 void slurm_print_ctl_conf ( FILE* out, 
                             slurm_ctl_conf_info_msg_t * slurm_ctl_conf_ptr )
 {
-	char time_str[32], tmp_str[128];
-
+	char time_str[32], tmp_str[128], *xbuf;
+	char *select_title = "";
+#ifdef HAVE_BGL
+	select_title = "Bluegene/L configuration";
+#endif
+#ifdef HAVE_BGP
+	select_title = "Bluegene/P configuration";
+#endif
+#ifdef HAVE_BGQ
+	select_title = "Bluegene/Q configuration";
+#endif
 	if ( slurm_ctl_conf_ptr == NULL )
 		return ;
 
 	slurm_make_time_str ((time_t *)&slurm_ctl_conf_ptr->last_update, 
 			     time_str, sizeof(time_str));
 	fprintf(out, "Configuration data as of %s\n", time_str);
-	fprintf(out, "AccountingStorageEnforce = %u\n",
-		slurm_ctl_conf_ptr->accounting_storage_enforce);
+	fprintf(out, "AccountingStorageBackupHost = %s\n", 
+		slurm_ctl_conf_ptr->accounting_storage_backup_host);
+	accounting_enforce_string(
+		slurm_ctl_conf_ptr->accounting_storage_enforce,
+		tmp_str, sizeof(tmp_str));
+	fprintf(out, "AccountingStorageEnforce = %s\n", tmp_str);
 	fprintf(out, "AccountingStorageHost   = %s\n", 
 		slurm_ctl_conf_ptr->accounting_storage_host);
 	fprintf(out, "AccountingStorageLoc    = %s\n", 
@@ -138,6 +166,8 @@ void slurm_print_ctl_conf ( FILE* out,
 		slurm_ctl_conf_ptr->backup_addr);
 	fprintf(out, "BackupController        = %s\n", 
 		slurm_ctl_conf_ptr->backup_controller);
+	fprintf(out, "BatchStartTime          = %u sec\n", 
+		slurm_ctl_conf_ptr->batch_start_timeout);
 	slurm_make_time_str ((time_t *)&slurm_ctl_conf_ptr->boot_time,
 			     time_str, sizeof(time_str));
 	fprintf(out, "BOOT_TIME               = %s\n",
@@ -148,55 +178,65 @@ void slurm_print_ctl_conf ( FILE* out,
 		slurm_ctl_conf_ptr->checkpoint_type);
 	fprintf(out, "ClusterName             = %s\n",
 		slurm_ctl_conf_ptr->cluster_name);
+	fprintf(out, "CompleteWait            = %u sec\n", 
+		slurm_ctl_conf_ptr->complete_wait);
 	fprintf(out, "ControlAddr             = %s\n", 
 		slurm_ctl_conf_ptr->control_addr);
 	fprintf(out, "ControlMachine          = %s\n", 
 		slurm_ctl_conf_ptr->control_machine);
 	fprintf(out, "CryptoType              = %s\n",
 		slurm_ctl_conf_ptr->crypto_type);
+
+	xbuf = debug_flags2str(slurm_ctl_conf_ptr->debug_flags);
+	fprintf(out, "DebugFlags              = %s\n", xbuf);
+	xfree(xbuf);
+
 	if (slurm_ctl_conf_ptr->def_mem_per_task & MEM_PER_CPU) {
-		fprintf(out, "DefMemPerCPU            = %u\n",
+		fprintf(out, "DefMemPerCPU            = %u MB\n",
 			slurm_ctl_conf_ptr->def_mem_per_task &
 			(~MEM_PER_CPU));
 	} else if (slurm_ctl_conf_ptr->def_mem_per_task) {
-		fprintf(out, "DefMemPerNode           = %u\n",
+		fprintf(out, "DefMemPerNode           = %u MB\n",
 			slurm_ctl_conf_ptr->def_mem_per_task);
 	} else
 		fprintf(out, "DefMemPerCPU            = UNLIMITED\n");
+
 	if (slurm_ctl_conf_ptr->disable_root_jobs)
 		fprintf(out, "DisableRootJobs         = YES\n");
 	else
 		fprintf(out, "DisableRootJobs         = NO\n");
-#if 0
-/* Add in Slurm v1.4 */
+
 	if (slurm_ctl_conf_ptr->enforce_part_limits)
 		fprintf(out, "EnforcePartLimits       = YES\n");
 	else
 		fprintf(out, "EnforcePartLimits       = NO\n");
-#endif
 	fprintf(out, "Epilog                  = %s\n",
 		slurm_ctl_conf_ptr->epilog);
-	fprintf(out, "EpilogMsgTime           = %u\n",
+	fprintf(out, "EpilogMsgTime           = %u usec\n",
 		slurm_ctl_conf_ptr->epilog_msg_time);
+	fprintf(out, "EpilogSlurmctld         = %s\n", 
+		slurm_ctl_conf_ptr->epilog_slurmctld);
 	fprintf(out, "FastSchedule            = %u\n",
 		slurm_ctl_conf_ptr->fast_schedule);
 	fprintf(out, "FirstJobId              = %u\n",
 		slurm_ctl_conf_ptr->first_job_id);
-	fprintf(out, "GetEnvTimeout           = %u\n",
+	fprintf(out, "GetEnvTimeout           = %u sec\n",
 		slurm_ctl_conf_ptr->get_env_timeout);
-	fprintf(out, "HealthCheckInterval     = %u\n",
+	fprintf(out, "HealthCheckInterval     = %u sec\n",
 		slurm_ctl_conf_ptr->health_check_interval);
 	fprintf(out, "HealthCheckProgram      = %s\n",
 		slurm_ctl_conf_ptr->health_check_program);
 #ifdef HAVE_XCPU
 	fprintf(out, "HAVE_XCPU               = %d\n", HAVE_XCPU);
 #endif
-	fprintf(out, "InactiveLimit           = %u\n",
+	fprintf(out, "InactiveLimit           = %u sec\n",
 		slurm_ctl_conf_ptr->inactive_limit);
-	fprintf(out, "JobAcctGatherFrequency  = %u\n",
+	fprintf(out, "JobAcctGatherFrequency  = %u sec\n",
 		slurm_ctl_conf_ptr->job_acct_gather_freq);
 	fprintf(out, "JobAcctGatherType       = %s\n",
 		slurm_ctl_conf_ptr->job_acct_gather_type);
+	fprintf(out, "JobCheckpointDir        = %s\n",
+		slurm_ctl_conf_ptr->job_ckpt_dir);
 	fprintf(out, "JobCompHost             = %s\n",
 		slurm_ctl_conf_ptr->job_comp_host);
 	fprintf(out, "JobCompLoc              = %s\n",
@@ -217,7 +257,9 @@ void slurm_print_ctl_conf ( FILE* out,
 		slurm_ctl_conf_ptr->job_file_append);
 	fprintf(out, "JobRequeue              = %u\n",
 		slurm_ctl_conf_ptr->job_requeue);
-	fprintf(out, "KillWait                = %u\n", 
+	fprintf(out, "KillOnBadExit           = %u\n", 
+		slurm_ctl_conf_ptr->kill_on_bad_exit);
+	fprintf(out, "KillWait                = %u sec\n", 
 		slurm_ctl_conf_ptr->kill_wait);
 	fprintf(out, "Licenses                = %s\n",
 		slurm_ctl_conf_ptr->licenses);
@@ -226,29 +268,67 @@ void slurm_print_ctl_conf ( FILE* out,
 	fprintf(out, "MaxJobCount             = %u\n", 
 		slurm_ctl_conf_ptr->max_job_cnt);
 	if (slurm_ctl_conf_ptr->max_mem_per_task & MEM_PER_CPU) {
-		fprintf(out, "MaxMemPerCPU            = %u\n",
+		fprintf(out, "MaxMemPerCPU            = %u MB\n",
 			slurm_ctl_conf_ptr->max_mem_per_task &
 			(~MEM_PER_CPU));
 	} else if (slurm_ctl_conf_ptr->max_mem_per_task) {
-		fprintf(out, "MaxMemPerNode           = %u\n",
+		fprintf(out, "MaxMemPerNode           = %u MB\n",
 			slurm_ctl_conf_ptr->max_mem_per_task);
 	} else
 		fprintf(out, "MaxMemPerCPU            = UNLIMITED\n");
-	fprintf(out, "MessageTimeout          = %u\n",
+	fprintf(out, "MessageTimeout          = %u sec\n",
 		slurm_ctl_conf_ptr->msg_timeout);
-	fprintf(out, "MinJobAge               = %u\n", 
+	fprintf(out, "MinJobAge               = %u sec\n", 
 		slurm_ctl_conf_ptr->min_job_age);
 	fprintf(out, "MpiDefault              = %s\n",
 		slurm_ctl_conf_ptr->mpi_default);
+	fprintf(out, "MpiParams               = %s\n",
+		slurm_ctl_conf_ptr->mpi_params);
 #ifdef MULTIPLE_SLURMD
 	fprintf(out, "MULTIPLE_SLURMD         = %d\n", MULTIPLE_SLURMD);
 #endif
 	fprintf(out, "NEXT_JOB_ID             = %u\n",
 		slurm_ctl_conf_ptr->next_job_id);
+	if (slurm_ctl_conf_ptr->over_time_limit == (uint16_t) INFINITE)
+		fprintf(out, "OverTimeLimit           = UNLIMITED\n");
+	else {
+		fprintf(out, "OverTimeLimit           = %u min\n",
+			slurm_ctl_conf_ptr->over_time_limit);
+	}
 	fprintf(out, "PluginDir               = %s\n", 
 		slurm_ctl_conf_ptr->plugindir);
 	fprintf(out, "PlugStackConfig         = %s\n",
 		slurm_ctl_conf_ptr->plugstack);
+
+	if (strcmp(slurm_ctl_conf_ptr->priority_type, "priority/basic") == 0) {
+		fprintf(out, "PriorityType            = %s\n",
+			slurm_ctl_conf_ptr->priority_type);
+	} else {
+		secs2time_str((time_t) slurm_ctl_conf_ptr->priority_decay_hl,
+			      tmp_str, sizeof(tmp_str));
+		fprintf(out, "PriorityDecayHalfLife   = %s\n", tmp_str);
+		fprintf(out, "PriorityFavorSmall      = %u\n",
+			slurm_ctl_conf_ptr->priority_favor_small);
+		secs2time_str((time_t) slurm_ctl_conf_ptr->priority_max_age,
+			      tmp_str, sizeof(tmp_str));
+		fprintf(out, "PriorityMaxAge          = %s\n", tmp_str);
+		fprintf(out, "PriorityUsageResetPeriod = %s\n", 
+			_reset_period_str(slurm_ctl_conf_ptr->
+					  priority_reset_period));
+		fprintf(out, "PriorityType            = %s\n",
+			slurm_ctl_conf_ptr->priority_type);
+		fprintf(out, "PriorityWeightAge       = %u\n",
+			slurm_ctl_conf_ptr->priority_weight_age);
+		fprintf(out, "PriorityWeightFairShare = %u\n",
+			slurm_ctl_conf_ptr->priority_weight_fs);
+		fprintf(out, "PriorityWeightJobSize   = %u\n",
+			slurm_ctl_conf_ptr->priority_weight_js);
+		fprintf(out, "PriorityWeightPartition = %u\n",
+			slurm_ctl_conf_ptr->priority_weight_part);
+		fprintf(out, "PriorityWeightQOS       = %u\n",
+			slurm_ctl_conf_ptr->priority_weight_qos);
+	}
+
 	private_data_string(slurm_ctl_conf_ptr->private_data,
 			    tmp_str, sizeof(tmp_str));
 	fprintf(out, "PrivateData             = %s\n", tmp_str);
@@ -256,6 +336,8 @@ void slurm_print_ctl_conf ( FILE* out,
 		slurm_ctl_conf_ptr->proctrack_type);
 	fprintf(out, "Prolog                  = %s\n", 
 		slurm_ctl_conf_ptr->prolog);
+	fprintf(out, "PrologSlurmctld         = %s\n", 
+		slurm_ctl_conf_ptr->prolog_slurmctld);
 	fprintf(out, "PropagatePrioProcess    = %u\n",
 		slurm_ctl_conf_ptr->propagate_prio_process);
         fprintf(out, "PropagateResourceLimits = %s\n",
@@ -264,17 +346,32 @@ void slurm_print_ctl_conf ( FILE* out,
                 slurm_ctl_conf_ptr->propagate_rlimits_except);
 	fprintf(out, "ResumeProgram           = %s\n", 
 		slurm_ctl_conf_ptr->resume_program);
-	fprintf(out, "ResumeRate              = %u\n", 
+	fprintf(out, "ResumeRate              = %u nodes/min\n", 
 		slurm_ctl_conf_ptr->resume_rate);
+	fprintf(out, "ResumeTimeout           = %u sec\n", 
+		slurm_ctl_conf_ptr->resume_timeout);
+	if (slurm_ctl_conf_ptr->resv_over_run == (uint16_t) INFINITE)
+		fprintf(out, "ResvOverRun             = UNLIMITED\n");
+	else {
+		fprintf(out, "ResvOverRun             = %u min\n",
+			slurm_ctl_conf_ptr->resv_over_run);
+	}
 	fprintf(out, "ReturnToService         = %u\n", 
 		slurm_ctl_conf_ptr->ret2service);
+	if (slurm_ctl_conf_ptr->salloc_default_command) {
+		fprintf(out, "SallocDefaultCommand    = \"%s\"\n",
+			slurm_ctl_conf_ptr->salloc_default_command);
+	} else {
+		fprintf(out, "SallocDefaultCommand    = %s\n",
+			slurm_ctl_conf_ptr->salloc_default_command);
+	}
 	fprintf(out, "SchedulerParameters     = %s\n",
 		slurm_ctl_conf_ptr->sched_params);
 	fprintf(out, "SchedulerPort           = %u\n",
 		slurm_ctl_conf_ptr->schedport);
 	fprintf(out, "SchedulerRootFilter     = %u\n",
 		slurm_ctl_conf_ptr->schedrootfltr);
-	fprintf(out, "SchedulerTimeSlice      = %u\n",
+	fprintf(out, "SchedulerTimeSlice      = %u sec\n",
 		slurm_ctl_conf_ptr->sched_time_slice);
 	fprintf(out, "SchedulerType           = %s\n",
 		slurm_ctl_conf_ptr->schedtype);
@@ -296,7 +393,7 @@ void slurm_print_ctl_conf ( FILE* out,
 		slurm_ctl_conf_ptr->slurmctld_pidfile);
 	fprintf(out, "SlurmctldPort           = %u\n", 
 		slurm_ctl_conf_ptr->slurmctld_port);
-	fprintf(out, "SlurmctldTimeout        = %u\n", 
+	fprintf(out, "SlurmctldTimeout        = %u sec\n", 
 		slurm_ctl_conf_ptr->slurmctld_timeout);
 	fprintf(out, "SlurmdDebug             = %u\n", 
 		slurm_ctl_conf_ptr->slurmd_debug);
@@ -310,13 +407,18 @@ void slurm_print_ctl_conf ( FILE* out,
 #endif
 	fprintf(out, "SlurmdSpoolDir          = %s\n", 
 		slurm_ctl_conf_ptr->slurmd_spooldir);
-	fprintf(out, "SlurmdTimeout           = %u\n", 
+	fprintf(out, "SlurmdTimeout           = %u sec\n", 
 		slurm_ctl_conf_ptr->slurmd_timeout);
-	fprintf(out, "SLURM_CONFIG_FILE       = %s\n", 
+	fprintf(out, "SlurmdUser              = %s(%u)\n", 
+		slurm_ctl_conf_ptr->slurmd_user_name,
+		slurm_ctl_conf_ptr->slurmd_user_id);
+	fprintf(out, "SLURM_CONF              = %s\n", 
 		slurm_ctl_conf_ptr->slurm_conf);
 	fprintf(out, "SLURM_VERSION           = %s\n", SLURM_VERSION);
 	fprintf(out, "SrunEpilog              = %s\n",
 		slurm_ctl_conf_ptr->srun_epilog);
+	fprintf(out, "SrunIOTimeout           = %u sec\n", 
+		slurm_ctl_conf_ptr->srun_io_timeout);
 	fprintf(out, "SrunProlog              = %s\n",
 		slurm_ctl_conf_ptr->srun_prolog);
 	fprintf(out, "StateSaveLocation       = %s\n", 
@@ -327,32 +429,46 @@ void slurm_print_ctl_conf ( FILE* out,
 		slurm_ctl_conf_ptr->suspend_exc_parts);
 	fprintf(out, "SuspendProgram          = %s\n", 
 		slurm_ctl_conf_ptr->suspend_program);
-	fprintf(out, "SuspendRate             = %u\n", 
+	fprintf(out, "SuspendRate             = %u nodes/min\n", 
 		slurm_ctl_conf_ptr->suspend_rate);
-	fprintf(out, "SuspendTime             = %d\n", 
-		((int)slurm_ctl_conf_ptr->suspend_time - 1));
+	if (slurm_ctl_conf_ptr->suspend_time == 0) {
+		fprintf(out, "SuspendTime             = NONE\n");
+	} else {
+		fprintf(out, "SuspendTime             = %d sec\n", 
+			((int)slurm_ctl_conf_ptr->suspend_time - 1));
+	}
+	fprintf(out, "SuspendTimeout          = %u sec\n", 
+		slurm_ctl_conf_ptr->suspend_timeout);
 	fprintf(out, "SwitchType              = %s\n",
 		slurm_ctl_conf_ptr->switch_type);
 	fprintf(out, "TaskEpilog              = %s\n",
 		slurm_ctl_conf_ptr->task_epilog);
 	fprintf(out, "TaskPlugin              = %s\n",
 		 slurm_ctl_conf_ptr->task_plugin);
-	fprintf(out, "TaskPluginParam         = %s\n",
-		_task_plugin_param(slurm_ctl_conf_ptr->task_plugin_param));
+	slurm_sprint_cpu_bind_type(tmp_str, 
+				   slurm_ctl_conf_ptr->task_plugin_param);
+	fprintf(out, "TaskPluginParam         = %s\n", tmp_str);
 	fprintf(out, "TaskProlog              = %s\n",
 		slurm_ctl_conf_ptr->task_prolog);
 	fprintf(out, "TmpFS                   = %s\n", 
 		slurm_ctl_conf_ptr->tmp_fs);
+	fprintf(out, "TopologyPlugin          = %s\n",
+		 slurm_ctl_conf_ptr->topology_plugin);
+	fprintf(out, "TrackWCKey              = %u\n",
+		slurm_ctl_conf_ptr->track_wckey);
 	fprintf(out, "TreeWidth               = %u\n",
 		slurm_ctl_conf_ptr->tree_width);
 	fprintf(out, "UsePam                  = %u\n",
 		slurm_ctl_conf_ptr->use_pam);
 	fprintf(out, "UnkillableStepProgram   = %s\n",
 		slurm_ctl_conf_ptr->unkillable_program);
-	fprintf(out, "UnkillableStepTimeout   = %u\n",
+	fprintf(out, "UnkillableStepTimeout   = %u sec\n",
 		slurm_ctl_conf_ptr->unkillable_timeout);
-	fprintf(out, "WaitTime                = %u\n", 
+	fprintf(out, "WaitTime                = %u sec\n", 
 		slurm_ctl_conf_ptr->wait_time);
+
+	slurm_print_key_pairs(out, slurm_ctl_conf_ptr->select_conf_key_pairs,
+			      select_title);
 }
 
 /*
@@ -412,15 +528,21 @@ slurm_load_slurmd_status(slurmd_status_t **slurmd_status_ptr)
 	int rc;
 	slurm_msg_t req_msg;
 	slurm_msg_t resp_msg;
-	
+	char this_host[256], *this_addr;
+
 	slurm_msg_t_init(&req_msg);
 	slurm_msg_t_init(&resp_msg);
 
 	/*
 	 *  Set request message address to slurmd on localhost
 	 */
+	gethostname_short(this_host, sizeof(this_host));
+	this_addr = slurm_conf_get_nodeaddr(this_host);
+	if (this_addr == NULL)
+		this_addr = xstrdup("localhost");
 	slurm_set_addr(&req_msg.address, (uint16_t)slurm_get_slurmd_port(), 
-		       "localhost");
+		       this_addr);
+	xfree(this_addr);
 
 	req_msg.msg_type = REQUEST_DAEMON_STATUS;
 	req_msg.data     = NULL;
@@ -509,3 +631,20 @@ void slurm_print_slurmd_status (FILE* out,
 		slurmd_status_ptr->version);
 	return;
 }
+
+extern void slurm_print_key_pairs(FILE* out, void *key_pairs, char *title)
+{
+	List config_list = (List)key_pairs;
+	ListIterator iter = NULL;
+	config_key_pair_t *key_pair;
+
+	if (!config_list)
+		return;
+	
+	fprintf(out, "\n%s:\n", title);
+	iter = list_iterator_create(config_list);
+	while((key_pair = list_next(iter))) {
+		fprintf(out, "%-23s = %s\n", key_pair->name, key_pair->value);
+	}
+	list_iterator_destroy(iter);
+}
diff --git a/src/api/init_msg.c b/src/api/init_msg.c
index 333752b31d3f1bcb9320351057a08e97a16b22fa..f62e7e9b6fc292069bfdca3bcd53b7bc8775f0b0 100644
--- a/src/api/init_msg.c
+++ b/src/api/init_msg.c
@@ -2,13 +2,14 @@
  *  init_msg.c - initialize RPC messages contents
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -51,7 +52,7 @@
 /*
  * slurm_init_job_desc_msg - initialize job descriptor with 
  *	default values 
- * OUT job_desc_msg - user defined job descriptor
+ * IN/OUT job_desc_msg - user defined job descriptor
  */
 void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 {
@@ -64,9 +65,13 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 	job_desc_msg->argv		= ((char **) NULL);
 	job_desc_msg->begin_time	= 0;
 	job_desc_msg->blrtsimage	= NULL;
+	job_desc_msg->ckpt_dir          = NULL;
+	job_desc_msg->ckpt_interval     = 0;
 	job_desc_msg->comment		= NULL;
 	job_desc_msg->conn_type		= (uint16_t) NO_VAL;
 	job_desc_msg->contiguous	= (uint16_t) NO_VAL;
+	job_desc_msg->cpu_bind		= NULL;
+	job_desc_msg->cpu_bind_type	= (uint16_t) NO_VAL;
 	job_desc_msg->cpus_per_task	= (uint16_t) NO_VAL;
 	job_desc_msg->dependency	= NULL;
 	job_desc_msg->environment	= ((char **) NULL);
@@ -100,6 +105,8 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 	job_desc_msg->max_nodes		= NO_VAL;
 	job_desc_msg->max_sockets	= (uint16_t) NO_VAL;
 	job_desc_msg->max_threads	= (uint16_t) NO_VAL;
+	job_desc_msg->mem_bind		= NULL;
+	job_desc_msg->mem_bind_type	= (uint16_t) NO_VAL;
 	job_desc_msg->min_cores		= (uint16_t) NO_VAL;
 	job_desc_msg->min_nodes		= NO_VAL;
 	job_desc_msg->min_sockets	= (uint16_t) NO_VAL;
@@ -125,6 +132,7 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 	job_desc_msg->resp_host		= NULL;
 	job_desc_msg->req_nodes		= NULL;
 	job_desc_msg->requeue		= (uint16_t) NO_VAL;
+	job_desc_msg->reservation	= NULL;
 	job_desc_msg->rotate		= (uint16_t) NO_VAL;
 	job_desc_msg->script		= NULL;
 	job_desc_msg->select_jobinfo	= NULL;
@@ -132,19 +140,21 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 	job_desc_msg->task_dist		= (uint16_t) NO_VAL;
 	job_desc_msg->time_limit	= NO_VAL;
 	job_desc_msg->user_id		= NO_VAL;
+	job_desc_msg->wckey		= NULL;
 	job_desc_msg->work_dir		= NULL;
 }
 
 /*
  * slurm_init_part_desc_msg - initialize partition descriptor with 
  *	default values 
- * OUT job_desc_msg - user defined partition descriptor
+ * IN/OUT update_part_msg - user defined partition descriptor
  */
 void slurm_init_part_desc_msg (update_part_msg_t * update_part_msg)
 {
 	update_part_msg->name 		= NULL;
 	update_part_msg->nodes 		= NULL;
 	update_part_msg->allow_groups 	= NULL;
+	update_part_msg->default_time   = (uint32_t) NO_VAL;
 	update_part_msg->max_time 	= (uint32_t) NO_VAL;
 	update_part_msg->max_nodes 	= NO_VAL;
 	update_part_msg->min_nodes 	= NO_VAL;
@@ -154,5 +164,38 @@ void slurm_init_part_desc_msg (update_part_msg_t * update_part_msg)
 	update_part_msg->max_share 	= (uint16_t) NO_VAL;
 	update_part_msg->priority 	= (uint16_t) NO_VAL;
 	update_part_msg->state_up 	= (uint16_t) NO_VAL;
+	update_part_msg->allow_alloc_nodes = NULL;
+}
+
+/*
+ * slurm_init_resv_desc_msg - initialize reservation descriptor with 
+ *	default values 
+ * OUT job_desc_msg - user defined partition descriptor
+ */
+void slurm_init_resv_desc_msg (resv_desc_msg_t * resv_msg)
+{
+	resv_msg->name		= NULL;
+	resv_msg->start_time	= (time_t) NO_VAL;
+	resv_msg->end_time	= (time_t) NO_VAL;
+	resv_msg->duration	= NO_VAL;
+	resv_msg->flags		= (uint16_t) NO_VAL;
+	resv_msg->node_cnt	= NO_VAL;
+	resv_msg->node_list	= NULL;
+	resv_msg->features	= NULL;
+	resv_msg->partition	= NULL;
+	resv_msg->users		= NULL;
+	resv_msg->accounts	= NULL;
 }
 
+/*
+ * slurm_init_update_node_msg - initialize node update message
+ * OUT update_node_msg - user defined node descriptor
+ */
+void slurm_init_update_node_msg (update_node_msg_t * update_node_msg)
+{
+	update_node_msg->node_names = NULL;
+	update_node_msg->features = NULL;
+	update_node_msg->reason = NULL;
+	update_node_msg->node_state = (uint16_t) NO_VAL;
+	update_node_msg->weight = (uint32_t) NO_VAL;
+}
diff --git a/src/api/job_info.c b/src/api/job_info.c
index e5264161241ca3d4faff7cb354927efa06e5ae06..c3f00de88f119241da2c504ef3b5cb001a47affb 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -2,13 +2,14 @@
  *  job_info.c - get/print the job state information of slurm
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -40,9 +41,10 @@
 #  include "config.h"
 #endif
 
+#include <ctype.h>
 #include <errno.h>
-#include <pwd.h>
 #include <grp.h>
+#include <pwd.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -54,13 +56,13 @@
 #include <slurm/slurm_errno.h>
 
 #include "src/api/job_info.h"
+#include "src/common/forward.h"
 #include "src/common/node_select.h"
 #include "src/common/parse_time.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/uid.h"
 #include "src/common/xstring.h"
-#include "src/common/forward.h"
 
 /*
  * slurm_print_job_info_msg - output information about all Slurm 
@@ -125,7 +127,6 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 {
 	int i, j;
 	char time_str[32], select_buf[122], *group_name, *user_name;
-	char *wckey = NULL, *jname = NULL;
 	char tmp1[128], tmp2[128], *tmp3_ptr;
 	char tmp_line[512];
 	char *ionodes = NULL;
@@ -159,26 +160,11 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 		xstrcat(out, "\n   ");
 
 	/****** Line 2 ******/
-	if (job_ptr->name && job_ptr->name[0]) {
-		char *temp = NULL;
-		/* first set the jname to the job_ptr->name */
-		jname = xstrdup(job_ptr->name);
-		/* then grep for " since that is the delimiter for
-		   the wckey */
-		if((temp = strchr(jname, '\"'))) {
-			/* if we have a wckey set the " to NULL to
-			 * end the jname */
-			temp[0] = '\0';
-			/* increment and copy the remainder */
-			temp++;
-			wckey = xstrdup(temp);
-		}
-	}
 	if(slurm_get_track_wckey())
 		snprintf(tmp_line, sizeof(tmp_line), "Name=%s WCKey=%s",
-			 jname, wckey);
+			 job_ptr->name, job_ptr->wckey);
 	else
-		snprintf(tmp_line, sizeof(tmp_line), "Name=%s", jname);
+		snprintf(tmp_line, sizeof(tmp_line), "Name=%s", job_ptr->name);
 		
 	xstrcat(out, tmp_line);
 	if (one_liner)
@@ -188,9 +174,9 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 
 	/****** Line 3 ******/
 	snprintf(tmp_line, sizeof(tmp_line), 
-		"Priority=%u Partition=%s BatchFlag=%u", 
-		job_ptr->priority, job_ptr->partition, 
-		job_ptr->batch_flag);
+		 "Priority=%u Partition=%s BatchFlag=%u Reservation=%s", 
+		 job_ptr->priority, job_ptr->partition, 
+		 job_ptr->batch_flag, job_ptr->resv_name);
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, " ");
@@ -408,8 +394,9 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 
 	/****** Line 11 ******/
 	snprintf(tmp_line, sizeof(tmp_line), 
-		"Dependency=%s Account=%s Requeue=%u",
-		job_ptr->dependency, job_ptr->account, job_ptr->requeue);
+		"Dependency=%s Account=%s Requeue=%u Restarts=%u",
+		job_ptr->dependency, job_ptr->account, job_ptr->requeue,
+		job_ptr->restart_cnt);
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, " ");
@@ -417,10 +404,18 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 		xstrcat(out, "\n   ");
 
 	/****** Line 12 ******/
+	if (job_ptr->state_desc) {
+		/* Replace white space with underscore for easier parsing */
+		for (j=0; job_ptr->state_desc[j]; j++) {
+			if (isspace(job_ptr->state_desc[j]))
+				job_ptr->state_desc[j] = '_';
+		}
+		tmp3_ptr = job_ptr->state_desc;
+	} else
+		tmp3_ptr = job_reason_string(job_ptr->state_reason);
 	snprintf(tmp_line, sizeof(tmp_line), 
 		"Reason=%s Network=%s",
-		job_reason_string(job_ptr->state_reason), 
-		job_ptr->network);
+		tmp3_ptr, job_ptr->network);
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, " ");
@@ -516,6 +511,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 			xstrcat(out, "\n   ");
 		xstrcat(out, select_buf);
 	}
+#ifdef HAVE_BG
 	/****** Line 20 (optional) ******/
 	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
 				select_buf, sizeof(select_buf),
@@ -578,6 +574,7 @@ slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 #endif
 		xstrcat(out, tmp_line);
 	}
+#endif
 	xstrcat(out, "\n\n");
 
 	return out;
@@ -688,15 +685,21 @@ slurm_pid2jobid (pid_t job_pid, uint32_t *jobid)
 	slurm_msg_t req_msg;
 	slurm_msg_t resp_msg;
 	job_id_request_msg_t req;
-	
+	char this_host[256], *this_addr;
+
 	slurm_msg_t_init(&req_msg);
 	slurm_msg_t_init(&resp_msg);
 
 	/*
 	 *  Set request message address to slurmd on localhost
 	 */
+	gethostname_short(this_host, sizeof(this_host));
+	this_addr = slurm_conf_get_nodeaddr(this_host);
+	if (this_addr == NULL)
+		this_addr = xstrdup("localhost");
 	slurm_set_addr(&req_msg.address, (uint16_t)slurm_get_slurmd_port(), 
-		       "localhost");
+		       this_addr);
+	xfree(this_addr);
 
 	req.job_pid      = job_pid;
 	req_msg.msg_type = REQUEST_JOB_ID;
@@ -770,11 +773,11 @@ extern int32_t islurm_get_rem_time__(uint32_t *jobid)
 extern int32_t islurm_get_rem_time2__()
 {
 	uint32_t jobid;
-	char *slurm_jobid = getenv("SLURM_JOBID");
+	char *slurm_job_id = getenv("SLURM_JOB_ID");
 
-	if (slurm_jobid == NULL)
+	if (slurm_job_id == NULL)
 		return 0;
-	jobid = atol(slurm_jobid);
+	jobid = atol(slurm_job_id);
 	return islurm_get_rem_time__(&jobid);
 }
 
@@ -809,7 +812,7 @@ slurm_get_end_time(uint32_t jobid, time_t *end_time_ptr)
 		if (jobid_env) {
 			jobid = jobid_env;
 		} else {
-			char *env = getenv("SLURM_JOBID");
+			char *env = getenv("SLURM_JOB_ID");
 			if (env) {
 				jobid = (uint32_t) atol(env);
 				jobid_env = jobid;
diff --git a/src/api/job_info.h b/src/api/job_info.h
index b9fe249ba8fb71f624c0d5193137a0e6f30ae6b5..6037fcbc8975e7c0b602fd0a97bdd315bf76cff1 100644
--- a/src/api/job_info.h
+++ b/src/api/job_info.h
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  job_info.h - get/print the job state information of slurm
  *
- *  $Id: job_info.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: job_info.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/job_step_info.c b/src/api/job_step_info.c
index f90d56f925d060c9a847a7b0a7b7435299a976ec..31bf6114f345ff98a3a3c4ee1de51c729d643798 100644
--- a/src/api/job_step_info.c
+++ b/src/api/job_step_info.c
@@ -1,15 +1,17 @@
 /*****************************************************************************\
  *  job_step_info.c - get/print the job step state information of slurm
- *  $Id: job_step_info.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: job_step_info.c 16867 2009-03-12 16:35:42Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, 
  *             Joey Ekstrom <ekstrom1@llnl.gov>,  et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -128,10 +130,9 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 
 	/****** Line 2 ******/
 	snprintf(tmp_line, sizeof(tmp_line),
-		"Partition=%s Nodes=%s Name=%s Network=%s Checkpoint=%u", 
+		"Partition=%s Nodes=%s Name=%s Network=%s", 
 		job_step_ptr->partition, job_step_ptr->nodes,
-		job_step_ptr->name, job_step_ptr->network,
-		job_step_ptr->ckpt_interval);
+		job_step_ptr->name, job_step_ptr->network);
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, " ");
@@ -140,8 +141,9 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 
 	/****** Line 3 ******/
 	snprintf(tmp_line, sizeof(tmp_line),
-		"CheckpointPath=%s\n\n", 
-		 job_step_ptr->ckpt_path);
+		"ResvPorts=%s Checkpoint=%u CheckpointDir=%s\n\n", 
+		 job_step_ptr->resv_ports,
+		 job_step_ptr->ckpt_interval, job_step_ptr->ckpt_dir);
 	xstrcat(out, tmp_line);
 
 	return out;
diff --git a/src/api/node_info.c b/src/api/node_info.c
index b26c8c365074341cf2541e58b3b0e138331a6ec9..4f5b3a1494b28a271fc7da9f68c0ab7213923378 100644
--- a/src/api/node_info.c
+++ b/src/api/node_info.c
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  *  node_info.c - get/print the node state information of slurm
- *  $Id: node_info.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: node_info.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -113,7 +115,7 @@ char *
 slurm_sprint_node_table (node_info_t * node_ptr, int one_liner )
 {
 	uint16_t my_state = node_ptr->node_state;
-	char *comp_str = "", *drain_str = "";
+	char *comp_str = "", *drain_str = "", *power_str = "";
 	char tmp_line[512];
 	char *out = NULL;
 
@@ -125,13 +127,18 @@ slurm_sprint_node_table (node_info_t * node_ptr, int one_liner )
 		my_state &= (~NODE_STATE_DRAIN);
 		drain_str = "+DRAIN";
 	}
+	if (my_state & NODE_STATE_POWER_SAVE) {
+		my_state &= (~NODE_STATE_POWER_SAVE);
+		power_str = "+POWER";
+	}
 
 	/****** Line 1 ******/
 	snprintf(tmp_line, sizeof(tmp_line),
-		"NodeName=%s State=%s%s%s CPUs=%u AllocCPUs=%u "
+		"NodeName=%s State=%s%s%s%s Procs=%u AllocProcs=%u "
 		"RealMemory=%u TmpDisk=%u",
 		node_ptr->name, node_state_string(my_state),
-		comp_str, drain_str, node_ptr->cpus, node_ptr->used_cpus,
+		comp_str, drain_str, power_str,
+		node_ptr->cpus, node_ptr->used_cpus,
 		node_ptr->real_memory, node_ptr->tmp_disk);
 	xstrcat(out, tmp_line);
 	if (one_liner)
@@ -141,14 +148,22 @@ slurm_sprint_node_table (node_info_t * node_ptr, int one_liner )
 
 	/****** Line 2 ******/
 	snprintf(tmp_line, sizeof(tmp_line),
-		"Sockets=%u Cores=%u Threads=%u "
+		"Sockets=%u CoresPerSocket=%u ThreadsPerCore=%u",
+		node_ptr->sockets, node_ptr->cores, node_ptr->threads);
+	xstrcat(out, tmp_line);
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+
+	/****** Line 3 ******/
+	snprintf(tmp_line, sizeof(tmp_line),
 		"Weight=%u Features=%s Reason=%s" ,
-		node_ptr->sockets, node_ptr->cores, node_ptr->threads,
 		node_ptr->weight, node_ptr->features,
 		node_ptr->reason);
 	xstrcat(out, tmp_line);
 
-	/****** Line 3 (optional) ******/
+	/****** Line 4 (optional) ******/
 	if (node_ptr->arch || node_ptr->os) {
 		if (one_liner)
 			xstrcat(out, " ");
diff --git a/src/api/node_select_info.c b/src/api/node_select_info.c
index 2e5f2f982778c76891babb4a14f8368c88b1d7da..513c3ae936f8ba46febe2ce748166c23a91cc812 100644
--- a/src/api/node_select_info.c
+++ b/src/api/node_select_info.c
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  node_select_info.c - get the node select plugin state information of slurm
  *
- *  $Id: node_select_info.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: node_select_info.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/node_select_info.h b/src/api/node_select_info.h
index be41041e5aaef022859fec2352a244bee5e74115..62e6508dbd5fbe61f2f877826cc054e718fdd2af 100644
--- a/src/api/node_select_info.h
+++ b/src/api/node_select_info.h
@@ -5,15 +5,16 @@
  *  NOTE: This software specifically supports only BlueGene/L for now. It 
  *	will be made more general in the future
  *
- *  $Id: node_select_info.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: node_select_info.h 17534 2009-05-19 00:58:46Z da $
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -48,33 +49,32 @@
 #include <time.h>
 
 typedef struct {
-	char *nodes;
-	char *ionodes;
-	char *owner_name;
 	char *bg_block_id;
-	int state;
-	int conn_type;
-	int node_use;
-	int quarter;
-	int nodecard;
-	int node_cnt;
+	char *blrtsimage;       /* BlrtsImage for this block */
 	int *bp_inx;            /* list index pairs into node_table for *nodes:
 				 * start_range_1, end_range_1,
 				 * start_range_2, .., -1  */
+	int conn_type;
+	char *ionodes;
 	int *ionode_inx;        /* list index pairs for ionodes in the
 				 * node listed for *ionodes:
 				 * start_range_1, end_range_1,
 				 * start_range_2, .., -1  */
-	char *blrtsimage;       /* BlrtsImage for this block */
+	int job_running;
 	char *linuximage;       /* LinuxImage for this block */
 	char *mloaderimage;     /* mloaderImage for this block */
+	char *nodes;
+	int node_cnt;
+	int node_use;
+	char *owner_name;
 	char *ramdiskimage;     /* RamDiskImage for this block */
+	int state;
 } bg_info_record_t;
 
 typedef struct {
+	bg_info_record_t *bg_info_array;
 	time_t    last_update;
 	uint32_t  record_count;
-	bg_info_record_t *bg_info_array;
 } node_select_info_msg_t;
 
 /*
diff --git a/src/api/partition_info.c b/src/api/partition_info.c
index 9bc41dd767d8aac9c68d294b7b829901d4b9c0dd..28ead16cb0be3e86617c9e88c72a4f17961c2c00 100644
--- a/src/api/partition_info.c
+++ b/src/api/partition_info.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *   
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -221,6 +222,19 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 		xstrcat(out, "\n   ");
 	
 	/****** Line 4 ******/
+	if (part_ptr->allow_alloc_nodes == NULL)
+		snprintf(tmp_line, sizeof(tmp_line), "AllocNodes=%s","ALL");
+	else
+		snprintf(tmp_line, sizeof(tmp_line), "AllocNodes=%s",
+			 part_ptr->allow_alloc_nodes);
+	xstrcat(out, tmp_line);
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+	
+
+	/****** Line 5 ******/
 #ifdef HAVE_BG
 	snprintf(tmp_line, sizeof(tmp_line), "BasePartitions=%s BPIndices=", 
 		part_ptr->nodes);
@@ -237,6 +251,19 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 			part_ptr->node_inx[j+1]);
 		xstrcat(out, tmp_line);
 	}
+
+	if (part_ptr->default_time == INFINITE)
+		sprintf(tmp_line, " DefaultTime=UNLIMITED ");
+	else if (part_ptr->default_time == NO_VAL)
+		sprintf(tmp_line, " DefaultTime=NONE ");	  
+	else {
+		char time_line[32];
+		secs2time_str(part_ptr->default_time * 60, time_line, 
+			sizeof(time_line));
+		sprintf(tmp_line, " DefaultTime=%s ", time_line);
+	}
+	xstrcat(out, tmp_line);
+
 	if (one_liner)
 		xstrcat(out, "\n");
 	else
diff --git a/src/api/pmi.c b/src/api/pmi.c
index d0a7754beb1d3a336318e3bb75e9af5cf09ea5ee..549226efa9478fc1ca864fc553656a5d40bf3b2a 100644
--- a/src/api/pmi.c
+++ b/src/api/pmi.c
@@ -49,10 +49,11 @@
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -184,7 +185,7 @@ int PMI_Init( int *spawned )
 	if (pmi_init)
 		goto replay;
 
-	env = getenv("SLURM_JOBID");
+	env = getenv("SLURM_JOB_ID");
 	if (env)
 		pmi_jobid = atoi(env);
 	else
@@ -445,7 +446,7 @@ int PMI_Get_appnum( int *appnum )
 	if (appnum == NULL)
 		return PMI_ERR_INVALID_ARG;
 
-	env = getenv("SLURM_JOBID");
+	env = getenv("SLURM_JOB_ID");
 	if (env) {
 		*appnum = atoi(env);
 		return PMI_SUCCESS;
@@ -711,9 +712,14 @@ int PMI_Get_clique_size( int *size )
 	if (size == NULL)
 		return PMI_ERR_INVALID_ARG;
 
-	env = getenv("SLURM_CPUS_ON_NODE");
+	env = getenv("SLURM_GTIDS");
 	if (env) {
-		*size = atoi(env);
+		int i, tids=1;
+		for (i=0; env[i]; i++) {
+			if (env[i] == ',')
+				tids++;
+		}
+		*size = tids;
 		return PMI_SUCCESS;
 	}
 	return PMI_FAIL;
@@ -742,7 +748,7 @@ communicate through IPC mechanisms (e.g., shared memory) and other network
 mechanisms.
 
 @*/
-int PMI_Get_clique_ranks( char ranks[], int length )
+int PMI_Get_clique_ranks( int ranks[], int length )
 {
 	char *env;
 
@@ -754,7 +760,19 @@ int PMI_Get_clique_ranks( char ranks[], int length )
 
 	env = getenv("SLURM_GTIDS");
 	if (env) {
-		strcpy(ranks, env);
+		int i = 0;
+		char *tid, *tids, *last = NULL;
+		tids = strdup(env);
+		tid = strtok_r(tids, ",", &last);
+		while (tid) {
+			if (i >= length) {
+				free(tids);
+				return PMI_ERR_INVALID_LENGTH;
+			}
+			ranks[i++] = atoi(tid);
+			tid = strtok_r(NULL, ",", &last);
+		}
+		free(tids);
 		return PMI_SUCCESS;
 	}
 
diff --git a/src/api/pmi_server.c b/src/api/pmi_server.c
index d621d590eab3bb26f30b4cc23910097a2993d5c6..b0d32873a9572c9f157f1d157b52ba0309869359 100644
--- a/src/api/pmi_server.c
+++ b/src/api/pmi_server.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  pmi_server.c - Global PMI data as maintained within srun
- *  $Id: pmi_server.c 15376 2008-10-10 19:28:11Z da $
+ *  $Id: pmi_server.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/pmi_server.h b/src/api/pmi_server.h
index 03e42c52e4fabf235a0cd001483086cde7efc4d2..035965536cddd59377ccbc1ed00748020a0f21bd 100644
--- a/src/api/pmi_server.h
+++ b/src/api/pmi_server.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  pmi.h - Global PMI data as maintained within srun
- *  $Id: pmi_server.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: pmi_server.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/reconfigure.c b/src/api/reconfigure.c
index 4e865fd4d13a8b4315eedf7b71097a28906e1d73..7f164b48cd76b2c39bff1728e0210783fe927143 100644
--- a/src/api/reconfigure.c
+++ b/src/api/reconfigure.c
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  reconfigure.c - request that slurmctld shutdown or re-read the 
  *	            configuration files
- *  $Id: reconfigure.c 14872 2008-08-25 16:25:28Z jette $
+ *  $Id: reconfigure.c 17450 2009-05-12 16:22:58Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -133,6 +134,23 @@ slurm_shutdown (uint16_t options)
 	return _send_message_controller(PRIMARY_CONTROLLER,   &req_msg);
 }
 
+/*
+ * slurm_takeover - issue RPC to have Slurm backup controller take over the 
+ *                  primary controller. REQUEST_CONTROL is sent by the backup 
+ *                  to the primary controller to take control
+ * RET 0 or a slurm error code
+ */
+int
+slurm_takeover ( void )
+{
+	slurm_msg_t req_msg;
+
+	slurm_msg_t_init(&req_msg);
+	req_msg.msg_type     = REQUEST_TAKEOVER;
+		
+	return _send_message_controller(SECONDARY_CONTROLLER, &req_msg);
+}
+
 int
 _send_message_controller (enum controller_id dest, slurm_msg_t *req) 
 {
diff --git a/src/api/reservation_info.c b/src/api/reservation_info.c
new file mode 100644
index 0000000000000000000000000000000000000000..77e3991f711dec9d46456be1d93b41f56091057f
--- /dev/null
+++ b/src/api/reservation_info.c
@@ -0,0 +1,199 @@
+/*****************************************************************************\
+ *  reseration_info.c - get/print the reservation state information of slurm
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov> et. al.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *   
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <slurm/slurm.h>
+
+#include "src/api/job_info.h"
+#include "src/common/parse_time.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
+/*
+ * slurm_print_reservation_info_msg - output information about all Slurm 
+ *	reservations based upon message as loaded using slurm_load_reservation
+ * IN out - file to write to
+ * IN resv_info_ptr - reservation information message pointer
+ * IN one_liner - print as a single line if true
+ */
+void slurm_print_reservation_info_msg ( FILE* out, 
+		reserve_info_msg_t * resv_info_ptr, int one_liner )
+{
+	int i ;
+	reserve_info_t * resv_ptr = resv_info_ptr->reservation_array ;
+	char time_str[32];
+
+	slurm_make_time_str( (time_t *)&resv_info_ptr->last_update, time_str, 
+			     sizeof(time_str));
+	fprintf( out, "Reservation data as of %s, record count %d\n",
+		time_str, resv_info_ptr->record_count);
+
+	for (i = 0; i < resv_info_ptr->record_count; i++) {
+		slurm_print_reservation_info ( out, & resv_ptr[i], one_liner );
+	}
+
+}
+
+/*
+ * slurm_print_reservation_info - output information about a specific Slurm 
+ *	reservation based upon message as loaded using slurm_load_reservation
+ * IN out - file to write to
+ * IN resv_ptr - an individual reservation information record pointer
+ * IN one_liner - print as a single line if true
+ */
+void slurm_print_reservation_info ( FILE* out, reserve_info_t * resv_ptr, 
+				    int one_liner )
+{
+	char *print_this = slurm_sprint_reservation_info(resv_ptr, one_liner);
+	fprintf ( out, "%s", print_this);
+	xfree(print_this);
+}
+
+
+/*
+ * slurm_sprint_reservation_info - output information about a specific Slurm 
+ *	reservation based upon message as loaded using slurm_load_reservations
+ * IN resv_ptr - an individual reservation information record pointer
+ * IN one_liner - print as a single line if true
+ * RET out - char * containing formatted output (must be freed after call)
+ *           NULL is returned on failure.
+ */
+char *slurm_sprint_reservation_info ( reserve_info_t * resv_ptr, 
+				      int one_liner )
+{
+	char tmp1[32], tmp2[32], *flag_str = NULL;
+	char tmp_line[MAXHOSTRANGELEN];
+	char *out = NULL;
+
+	/****** Line 1 ******/
+	slurm_make_time_str(&resv_ptr->start_time, tmp1, sizeof(tmp1));
+	slurm_make_time_str(&resv_ptr->end_time,   tmp2, sizeof(tmp2));
+	snprintf(tmp_line, sizeof(tmp_line),
+		 "ReservationName=%s StartTime=%s EndTime=%s Duration=%u",
+		 resv_ptr->name, tmp1, tmp2, 
+		 (uint32_t) (difftime(resv_ptr->end_time, 
+				      resv_ptr->start_time) / 60));
+	xstrcat(out, tmp_line);
+
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+	
+	/****** Line 2 ******/
+	flag_str = reservation_flags_string(resv_ptr->flags);
+
+	snprintf(tmp_line, sizeof(tmp_line), 
+		 "Nodes=%s NodeCnt=%u Features=%s PartitionName=%s Flags=%s",
+		 resv_ptr->node_list, resv_ptr->node_cnt,
+		 resv_ptr->features,  resv_ptr->partition, flag_str);
+	xfree(flag_str);
+	xstrcat(out, tmp_line);
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+	
+	/****** Line 3 ******/
+	snprintf(tmp_line, sizeof(tmp_line), 
+		 "Users=%s Accounts=%s", 
+		 resv_ptr->users, resv_ptr->accounts);
+	xstrcat(out, tmp_line);
+	if (one_liner)
+		xstrcat(out, "\n");
+	else
+		xstrcat(out, "\n\n");
+	
+	return out;
+}
+
+
+
+/*
+ * slurm_load_reservations - issue RPC to get all slurm reservation 
+ *	configuration information if changed since update_time 
+ * IN update_time - time of current configuration data
+ * IN reserve_info_msg_pptr - place to store a reservation configuration 
+ *	pointer
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_reservation_info_msg
+ */
+extern int slurm_load_reservations (time_t update_time, 
+		reserve_info_msg_t **resp)
+{
+        int rc;
+        slurm_msg_t req_msg;
+        slurm_msg_t resp_msg;
+        resv_info_request_msg_t req;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+
+        req.last_update  = update_time;
+        req_msg.msg_type = REQUEST_RESERVATION_INFO;
+        req_msg.data     = &req;
+	
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+	
+	switch (resp_msg.msg_type) {
+	case RESPONSE_RESERVATION_INFO:
+		*resp = (reserve_info_msg_t *) resp_msg.data;
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);	
+		if (rc) 
+			slurm_seterrno_ret(rc);
+		*resp = NULL;
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+
+	return SLURM_PROTOCOL_SUCCESS;
+}
diff --git a/src/api/signal.c b/src/api/signal.c
index 3ea420ec6803bcdd88628eb6b969e222f74d2c0d..48020e169d1a57672a06b4e864ef6f519418987f 100644
--- a/src/api/signal.c
+++ b/src/api/signal.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  signal.c - Send a signal to a slurm job or job step
- *  $Id: signal.c 15602 2008-11-04 23:36:01Z jette $
+ *  $Id: signal.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/slurm_hostlist.c b/src/api/slurm_hostlist.c
new file mode 100644
index 0000000000000000000000000000000000000000..16e9fb06b3bffbe7786191b095fc772b6b433002
--- /dev/null
+++ b/src/api/slurm_hostlist.c
@@ -0,0 +1,96 @@
+/****************************************************************************\
+ *  slurm_hostname.c - wrapper functions to allow for systems that
+ *                     don't allow strong alias'
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>.
+ *  LLNL-CODE-402394.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif                /* HAVE_CONFIG_H */
+
+#if USE_ALIAS == 0
+/* only do anything if we don't use alias */
+#include "src/common/hostlist.h"
+
+// make wrappers
+extern hostlist_t slurm_hostlist_count(const char *hostlist)
+{
+	return hostlist_create(hostlist);
+}
+
+extern int slurm_hostlist_create(hostlist_t hl)
+{
+	return hostlist_count(hl);
+}
+
+extern void slurm_hostlist_destroy(hostlist_t hl)
+{
+	hostlist_count(hl);
+	return;
+}
+
+extern int slurm_hostlist_find(hostlist_t hl, const char *hostname)
+{
+	return hostlist_find(hl, hostname);
+}
+
+extern int slurm_hostlist_push(hostlist_t hl, const char *hosts)
+{
+	return hostlist_push(hl, hosts);
+}
+
+extern int slurm_hostlist_push_host(hostlist_t hl, const char *host)
+{
+	return hostlist_push_host(hl, host);
+}
+
+extern ssize_t slurm_hostlist_ranged_string(hostlist_t hl, size_t n, char *buf)
+{
+	return hostlist_ranged_string(hl, n, buf);
+}
+
+extern char *slurm_hostlist_shift(hostlist_t hl)
+{
+	return hostlist_shift(hl);
+}
+
+extern void slurm_hostlist_uniq(hostlist_t hl)
+{
+	hostlist_uniq(hl);
+	return;
+}
+
+#endif
diff --git a/src/api/slurm_pmi.c b/src/api/slurm_pmi.c
index 54165bfd1332efb7f71acf4eaa1ccd7591583e30..f3c556d5e782bcec99ef73149aa55f13bf2f75e4 100644
--- a/src/api/slurm_pmi.c
+++ b/src/api/slurm_pmi.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/slurm_pmi.h b/src/api/slurm_pmi.h
index 8b0a61ba2d282c29d8f382d8021c1ba8f4b7adc2..675fbfae99e162ea1b7244c6dee11c611fb8fd2a 100644
--- a/src/api/slurm_pmi.h
+++ b/src/api/slurm_pmi.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/step_ctx.c b/src/api/step_ctx.c
index 95956f3aca2fd55158f6f0981213ea094a38ca34..b1bb5cf876eee77195d7d1bb2bd7c8ac3ff25153 100644
--- a/src/api/step_ctx.c
+++ b/src/api/step_ctx.c
@@ -1,15 +1,15 @@
 /*****************************************************************************\
  *  step_ctx.c - step_ctx task functions for use by AIX/POE
- *
- *  $Id: step_ctx.c 15262 2008-10-01 22:58:26Z jette $
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -44,28 +44,38 @@
 
 #include <slurm/slurm.h>
 
+#include "src/common/bitstring.h"
 #include "src/common/hostlist.h"
 #include "src/common/net.h"
+#include "src/common/slurm_cred.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 #include "src/common/slurm_cred.h"
-
 #include "src/api/step_ctx.h"
 
 static void
 _job_fake_cred(struct slurm_step_ctx_struct *ctx)
 {
 	slurm_cred_arg_t arg;
-	arg.alloc_lps_cnt = 0;
-	arg.alloc_lps     = NULL;
+	uint32_t node_cnt = ctx->step_resp->step_layout->node_cnt;
+
 	arg.hostlist      = ctx->step_req->node_list;
 	arg.job_mem       = 0;
 	arg.jobid         = ctx->job_id;
 	arg.stepid        = ctx->step_resp->job_step_id;
-	arg.task_mem      = 0;
 	arg.uid           = ctx->user_id;
+	arg.core_bitmap   = bit_alloc(node_cnt);
+	bit_nset(arg.core_bitmap, 0, node_cnt-1);
+	arg.cores_per_socket = xmalloc(sizeof(uint16_t));
+	arg.cores_per_socket[0] = 1;
+	arg.sockets_per_node = xmalloc(sizeof(uint16_t));
+	arg.sockets_per_node[0] = 1;
+	arg.sock_core_rep_count = xmalloc(sizeof(uint32_t));
+	arg.sock_core_rep_count[0] = node_cnt;
+	arg.job_nhosts    = node_cnt;
+	arg.job_hostlist  = ctx->step_resp->step_layout->node_list;
 	ctx->step_resp->cred = slurm_cred_faker(&arg);
 }
 
@@ -80,15 +90,17 @@ static job_step_create_request_msg_t *_create_step_request(
 	step_req->cpu_count = step_params->cpu_count;
 	step_req->num_tasks = step_params->task_count;
 	step_req->relative = step_params->relative;
+	step_req->resv_port_cnt = step_params->resv_port_cnt;
 	step_req->exclusive  = step_params->exclusive;
 	step_req->immediate  = step_params->immediate;
 	step_req->ckpt_interval = step_params->ckpt_interval;
-	step_req->ckpt_path = xstrdup(step_params->ckpt_path);
+	step_req->ckpt_dir = xstrdup(step_params->ckpt_dir);
 	step_req->task_dist = step_params->task_dist;
 	step_req->plane_size = step_params->plane_size;
 	step_req->node_list = xstrdup(step_params->node_list);
 	step_req->network = xstrdup(step_params->network);
 	step_req->name = xstrdup(step_params->name);
+	step_req->no_kill = step_params->no_kill;
 	step_req->overcommit = step_params->overcommit ? 1 : 0;
 	step_req->mem_per_task = step_params->mem_per_task;
 
@@ -140,7 +152,6 @@ slurm_step_ctx_create (const slurm_step_ctx_params_t *step_params)
 	ctx->magic	= STEP_CTX_MAGIC;
 	ctx->job_id	= step_req->job_id;
 	ctx->user_id	= step_req->user_id;
-	ctx->no_kill	= step_params->no_kill;
 	ctx->step_req   = step_req;
 	ctx->step_resp	= step_resp;
 	ctx->verbose_level = step_params->verbose_level;
@@ -215,7 +226,6 @@ slurm_step_ctx_create_no_alloc (const slurm_step_ctx_params_t *step_params,
 	ctx->magic	= STEP_CTX_MAGIC;
 	ctx->job_id	= step_req->job_id;
 	ctx->user_id	= step_req->user_id;
-	ctx->no_kill	= step_params->no_kill;
 	ctx->step_req   = step_req;
 	ctx->step_resp	= step_resp;
 	ctx->verbose_level = step_params->verbose_level;
@@ -445,6 +455,7 @@ extern void slurm_step_ctx_params_t_init (slurm_step_ctx_params_t *ptr)
 	ptr->relative = (uint16_t)NO_VAL;
 	ptr->task_dist = SLURM_DIST_CYCLIC;
 	ptr->plane_size = (uint16_t)NO_VAL;
+	ptr->resv_port_cnt = (uint16_t)NO_VAL;
 
 	ptr->uid = getuid();
 
diff --git a/src/api/step_ctx.h b/src/api/step_ctx.h
index 74dc6e39722f0a7d8e897d28495f8d3ace1736fe..8ecd26597890712d5324ba5d26246c391ff85812 100644
--- a/src/api/step_ctx.h
+++ b/src/api/step_ctx.h
@@ -7,10 +7,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>,
  *  Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -55,7 +56,6 @@ struct slurm_step_ctx_struct {
 	struct step_launch_state *launch_state;
 	uint16_t verbose_level; /* for extra logging decisions in step
 				 * launch api */
-	bool no_kill;		/* if set, don't kill step on node DOWN */
 };
 
 #endif /* _STEP_CTX_H */
diff --git a/src/api/step_io.c b/src/api/step_io.c
index 37e673bd93ee09dfe891c30568220a53cccf8407..acc6e55503d68b6f03932ac6abe85cb2a7a0d2ee 100644
--- a/src/api/step_io.c
+++ b/src/api/step_io.c
@@ -1,14 +1,15 @@
 /****************************************************************************\
  *  step_io.c - process stdin, stdout, and stderr for parallel jobs.
- *  $Id: step_io.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: step_io.c 17056 2009-03-26 23:35:52Z dbremer $
  *****************************************************************************
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -57,6 +58,7 @@
 #include "src/common/eio.h"
 #include "src/common/io_hdr.h"
 #include "src/common/net.h"
+#include "src/common/write_labelled_message.h"
 
 #include "src/api/step_io.h"
 
@@ -490,139 +492,6 @@ create_file_write_eio_obj(int fd, uint32_t taskid, uint32_t nodeid,
 	return eio;
 }
 
-static int _write_label(int fd, int taskid, int label_width)
-{
-	int n;
-	int left = label_width + 2;
-	char buf[16];
-	void *ptr = buf;
-
-	snprintf(buf, 16, "%0*d: ", label_width, taskid);
-	while (left > 0) {
-	again:
-		if ((n = write(fd, ptr, left)) < 0) {
-			if (errno == EINTR)
-				goto again;
-			if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
-				debug3("  got EAGAIN in _write_label");
-				goto again;
-			}
-			error("In _write_label: %m");
-			return SLURM_ERROR;
-		}
-		left -= n;
-		ptr += n;
-	}
-
-	return SLURM_SUCCESS;
-}
-
-static int _write_newline(int fd)
-{
-	int n;
-
-	debug2("Called _write_newline");
-again:
-	if ((n = write(fd, "\n", 1)) < 0) {
-		if (errno == EINTR
-		    || errno == EAGAIN
-		    || errno == EWOULDBLOCK) {
-			goto again;
-		}
-		error("In _write_newline: %m");
-		return SLURM_ERROR;
-	}
-	return SLURM_SUCCESS;
-}
-
-/*
- * Blocks until write is complete, regardless of the file
- * descriptor being in non-blocking mode.
- */
-static int _write_line(int fd, void *buf, int len)
-{
-	int n;
-	int left = len;
-	void *ptr = buf;
-
-	debug2("Called _write_line");
-	while (left > 0) {
-	again:
-		if ((n = write(fd, ptr, left)) < 0) {
-			if (errno == EINTR)
-				goto again;
-			if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
-				debug3("  got EAGAIN in _write_line");
-				goto again;
-			}
-			return -1;
-		}
-		left -= n;
-		ptr += n;
-	}
-	
-	return len;
-}
-
-
-/*
- * Write as many lines from the message as possible.  Return
- * the number of bytes from the message that have been written,
- * or -1 on error.
- *
- * Prepend a label of the task number if label parameter was
- * specified.
- *
- * If the message ends in a partial line (line does not end
- * in a '\n'), then add a newline to the output file, but only
- * in label mode.
- */
-static int _write_msg(int fd, void *buf, int len, int taskid,
-		      bool label, int label_width)
-{
-	void *start;
-	void *end;
-	int remaining = len;
-	int written = 0;
-	int line_len;
-	int rc = -1;
-
-	while (remaining > 0) {
-		start = buf + written;
-		end = memchr(start, '\n', remaining);
-		if (label)
-			if (_write_label(fd, taskid, label_width)
-			    != SLURM_SUCCESS)
-				goto done;
-		if (end == NULL) { /* no newline found */
-			rc = _write_line(fd, start, remaining);
-			if (rc <= 0) {
-				goto done;
-			} else {
-				remaining -= rc;
-				written += rc;
-			}
-			if (label)
-				if (_write_newline(fd) != SLURM_SUCCESS)
-					goto done;
-		} else {
-			line_len = (int)(end - start) + 1;
-			rc = _write_line(fd, start, line_len);
-			if (rc <= 0) {
-				goto done;
-			} else {
-				remaining -= rc;
-				written += rc;
-			}
-		}
-
-	}
-done:
-	if (written > 0)
-		return written;
-	else
-		return rc;
-}
 
 static bool _file_writable(eio_obj_t *obj)
 {
@@ -667,11 +536,11 @@ static int _file_write(eio_obj_t *obj, List objs)
 	} else if (!info->eof) {
 		ptr = info->out_msg->data + (info->out_msg->length
 					     - info->out_remaining);
-		if ((n = _write_msg(obj->fd, ptr,
-				    info->out_remaining,
-				    info->out_msg->header.gtaskid,
-				    info->cio->label,
-				    info->cio->label_width)) < 0) {
+		if ((n = write_labelled_message(obj->fd, ptr,
+					        info->out_remaining,
+					        info->out_msg->header.gtaskid,
+					        info->cio->label,
+					        info->cio->label_width)) < 0) {
 			list_enqueue(info->cio->free_outgoing, info->out_msg);
 			info->eof = true;
 			return SLURM_ERROR;
diff --git a/src/api/step_io.h b/src/api/step_io.h
index 477eb9a6485a895a8ab43d62424ceed733b6015b..2a9ca6179e02ec9537745a0a8749569d7d2ab9e4 100644
--- a/src/api/step_io.h
+++ b/src/api/step_io.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/api/step_io.h - job-step client-side I/O routines
- * $Id: step_io.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: step_io.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/step_launch.c b/src/api/step_launch.c
index d73b3c4726d2c52c23d6c35f8aa101ec2b4fd6ee..068fd7dd4081669a7e3d3a54ed6f23cc82c5849f 100644
--- a/src/api/step_launch.c
+++ b/src/api/step_launch.c
@@ -2,13 +2,14 @@
  *  step_launch.c - launch a parallel job step
  *****************************************************************************
  *  Copyright (C) 2006-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -91,6 +92,7 @@ static int  _msg_thr_create(struct step_launch_state *sls, int num_nodes);
 static void _handle_msg(struct step_launch_state *sls, slurm_msg_t *msg);
 static bool _message_socket_readable(eio_obj_t *obj);
 static int  _message_socket_accept(eio_obj_t *obj, List objs);
+static int  _cr_notify_step_launch(slurm_step_ctx_t *ctx);
 
 static struct io_operations message_socket_ops = {
 	readable:	&_message_socket_readable,
@@ -119,17 +121,19 @@ void slurm_step_launch_params_t_init (slurm_step_launch_params_t *ptr)
 	ptr->buffered_stdio = true;
 	memcpy(&ptr->local_fds, &fds, sizeof(fds));
 	ptr->gid = getgid();
-	ptr->acctg_freq = (uint16_t) NO_VAL;
+	ptr->acctg_freq  = (uint16_t) NO_VAL;
+	ptr->max_cores   = 0xffff;
+	ptr->max_sockets = 0xffff;
+	ptr->max_threads = 0xffff;
 }
 
 /*
  * slurm_step_launch - launch a parallel job step
  * IN ctx - job step context generated by slurm_step_ctx_create
- * IN launcher_host - address used for PMI communications
  * IN callbacks - Identify functions to be called when various events occur
  * RET SLURM_SUCCESS or SLURM_ERROR (with errno set)
  */
-int slurm_step_launch (slurm_step_ctx_t *ctx, char *launcher_host,
+int slurm_step_launch (slurm_step_ctx_t *ctx,
 		       const slurm_step_launch_params_t *params,
 		       const slurm_step_launch_callbacks_t *callbacks)
 {
@@ -195,8 +199,9 @@ int slurm_step_launch (slurm_step_ctx_t *ctx, char *launcher_host,
 	} else {
 		env_array_merge(&env, (const char **)params->env);
 	}
-	env_array_for_step(&env, ctx->step_resp, launcher_host,
-			   ctx->launch_state->resp_port[0]);
+	env_array_for_step(&env, ctx->step_resp,
+			   ctx->launch_state->resp_port[0], 
+			   params->preserve_env);
 	env_array_merge(&env, (const char **)mpi_env);
 	env_array_free(mpi_env);
 
@@ -222,13 +227,10 @@ int slurm_step_launch (slurm_step_ctx_t *ctx, char *launcher_host,
 	launch.max_cores	= params->max_cores;
 	launch.max_threads	= params->max_threads;
 	launch.cpus_per_task	= params->cpus_per_task;
-	launch.ntasks_per_node	= params->ntasks_per_node;
-	launch.ntasks_per_socket= params->ntasks_per_socket;
-	launch.ntasks_per_core	= params->ntasks_per_core;
 	launch.task_dist	= params->task_dist;
-	launch.plane_size	= params->plane_size;
 	launch.pty              = params->pty;
-	launch.ckpt_path        = params->ckpt_path;
+	launch.ckpt_dir         = params->ckpt_dir;
+	launch.restart_dir      = params->restart_dir;
 	launch.acctg_freq	= params->acctg_freq;
 	launch.open_mode        = params->open_mode;
 	launch.options          = job_options_create();
@@ -251,6 +253,7 @@ int slurm_step_launch (slurm_step_ctx_t *ctx, char *launcher_host,
 		launch.efname = params->remote_error_filename;
 		launch.ifname = params->remote_input_filename;
 		launch.buffered_stdio = params->buffered_stdio ? 1 : 0;
+		launch.labelio = params->labelio ? 1 : 0;
 		ctx->launch_state->io.normal =
 			client_io_handler_create(params->local_fds,
 						 ctx->step_req->num_tasks,
@@ -345,6 +348,8 @@ int slurm_step_launch_wait_start(slurm_step_ctx_t *ctx)
 		}
 	}
 
+	_cr_notify_step_launch(ctx);
+
 	pthread_mutex_unlock(&sls->lock);
 	return SLURM_SUCCESS;
 }
@@ -422,6 +427,17 @@ void slurm_step_launch_wait_finish(slurm_step_ctx_t *ctx)
 		info("Force Terminated job step %u.%u",
 		     ctx->job_id, ctx->step_resp->job_step_id);
 
+	/* task_exit_signal != 0 when srun receives a message that a task
+	   exited with a SIGTERM or SIGKILL.  Without this test, a hang in srun
+	   might occur when a node gets a hard power failure, and TCP does not
+	   indicate that the I/O connection closed.  The I/O thread could 
+	   block waiting for an EOF message, even though the remote process
+	   has died.  In this case, use client_io_handler_abort to force the 
+	   I/O thread to stop listening for stdout or stderr and shutdown.*/
+	if (task_exit_signal && !sls->user_managed_io) {
+		client_io_handler_abort(sls->io.normal);
+	}
+
 	/* Then shutdown the message handler thread */
 	eio_signal_shutdown(sls->msg_handle);
 	pthread_mutex_unlock(&sls->lock);
@@ -571,7 +587,6 @@ struct step_launch_state *step_launch_state_create(slurm_step_ctx_t *ctx)
 	sls->resp_port = NULL;
 	sls->abort = false;
 	sls->abort_action_taken = false;
-	sls->no_kill = ctx->no_kill;
 	sls->mpi_info->jobid = ctx->step_req->job_id;
 	sls->mpi_info->stepid = ctx->step_resp->job_step_id;
 	sls->mpi_info->step_layout = layout;
@@ -598,6 +613,82 @@ void step_launch_state_destroy(struct step_launch_state *sls)
 	}
 }
 
+/**********************************************************************
+ * CR functions
+ **********************************************************************/
+
+/* connect to srun_cr */
+static int _connect_srun_cr(char *addr)
+{
+	struct sockaddr_un sa;
+	unsigned int sa_len;
+	int fd, rc;
+
+	fd = socket(AF_UNIX, SOCK_STREAM, 0);
+	if (fd < 0) {
+		error("failed creating cr socket: %m");
+		return -1;
+	}
+	memset(&sa, 0, sizeof(sa));
+
+	sa.sun_family = AF_UNIX;
+	strcpy(sa.sun_path, addr);
+	sa_len = strlen(sa.sun_path) + sizeof(sa.sun_family);
+
+	while ((rc = connect(fd, (struct sockaddr *)&sa, sa_len) < 0) &&
+	       (errno == EINTR));
+
+	if (rc < 0) {
+		debug2("failed connecting cr socket: %m");
+		close(fd);
+		return -1;
+	}
+	return fd;
+}
+
+/* send job_id, step_id, node_list to srun_cr */
+static int _cr_notify_step_launch(slurm_step_ctx_t *ctx)
+{
+	int fd, len, rc = 0;
+	char *cr_sock_addr = NULL;
+
+	cr_sock_addr = getenv("SLURM_SRUN_CR_SOCKET");
+	if (cr_sock_addr == NULL) { /* not run under srun_cr */
+		return 0;
+	}
+
+	if ((fd = _connect_srun_cr(cr_sock_addr)) < 0) {
+		debug2("failed connecting srun_cr. take it not running under "
+		       "srun_cr.");
+		return 0;
+	}
+	if (write(fd, &ctx->job_id, sizeof(uint32_t)) != sizeof(uint32_t)) {
+		error("failed writing job_id to srun_cr: %m");
+		rc = -1;
+		goto out;
+	}
+	if (write(fd, &ctx->step_resp->job_step_id, sizeof(uint32_t)) != 
+	    sizeof(uint32_t)) {
+		error("failed writing job_step_id to srun_cr: %m");
+		rc = -1;
+		goto out;
+	}
+	len = strlen(ctx->step_resp->step_layout->node_list);
+	if (write(fd, &len, sizeof(int)) != sizeof(int)) {
+		error("failed writing nodelist length to srun_cr: %m");
+		rc = -1;
+		goto out;
+	}
+	if (write(fd, ctx->step_resp->step_layout->node_list, len + 1) != 
+	    (len + 1)) {
+		error("failed writing nodelist to srun_cr: %m");
+		rc = -1;
+	}
+ out:
+	close (fd);
+	return rc;
+}
+
 /**********************************************************************
  * Message handler functions
  **********************************************************************/
@@ -856,14 +947,6 @@ _node_fail_handler(struct step_launch_state *sls, slurm_msg_t *fail_msg)
 	int node_id, num_tasks;
 
 	error("Node failure on %s", nf->nodelist);
-	if (!sls->no_kill) {
-		info("Cancelling job step %u.%u", nf->job_id, nf->step_id);
-		slurm_kill_job_step(nf->job_id, nf->step_id, SIGKILL);
-		/* In an ideal world, we close the socket to this node and
-		 * normally terminate the remaining tasks. In practice this
-		 * is very difficult. The exercise is left to the reader. */
-		exit(1);
-	}
 
 	fail_nodes = hostset_create(nf->nodelist);
 	fail_itr = hostset_iterator_create(fail_nodes);
@@ -906,6 +989,21 @@ _node_fail_handler(struct step_launch_state *sls, slurm_msg_t *fail_msg)
 	hostset_destroy(all_nodes);
 }
 
+/*
+ * FIXME: Verify that tasks on these nodes(s) are still alive.
+ * This message could be the result of the slurmd daemon cold-starting
+ * or a race condition when tasks are starting or terminating.
+ */
+static void
+_step_missing_handler(struct step_launch_state *sls, slurm_msg_t *missing_msg)
+{
+	srun_step_missing_msg_t *step_missing = missing_msg->data;
+
+	debug("Step %u.%u missing from node(s) %s", 
+	      step_missing->job_id, step_missing->step_id,
+	      step_missing->nodelist);
+}
+
 /*
  * The TCP connection that was used to send the task_spawn_io_msg_t message
  * will be used as the user managed IO stream.  The remote end of the TCP stream
@@ -1004,6 +1102,11 @@ _handle_msg(struct step_launch_state *sls, slurm_msg_t *msg)
 		_node_fail_handler(sls, msg);
 		slurm_free_srun_node_fail_msg(msg->data);
 		break;
+	case SRUN_STEP_MISSING:
+		debug2("received notice of missing job step");
+		_step_missing_handler(sls, msg);
+		slurm_free_srun_step_missing_msg(msg->data);
+		break;
 	case PMI_KVS_PUT_REQ:
 		debug2("PMI_KVS_PUT_REQ received");
 		rc = pmi_kvs_put((struct kvs_comm_set *) msg->data);
@@ -1030,6 +1133,38 @@ _handle_msg(struct step_launch_state *sls, slurm_msg_t *msg)
 /**********************************************************************
  * Task launch functions
  **********************************************************************/
+
+/* Since the slurmd usually controls the finishing of tasks to the
+ * controller this needs to happen here if there was a problem with a
+ * task launch to the slurmd since there will not be cleanup of this
+ * anywhere else.
+ */
+static int _fail_step_tasks(slurm_step_ctx_t *ctx, char *node, int ret_code)
+{
+	slurm_msg_t req;
+	step_complete_msg_t msg;
+	int rc = -1;
+	int nodeid = NO_VAL;
+
+	nodeid = nodelist_find(ctx->step_resp->step_layout->node_list, node);
+
+	memset(&msg, 0, sizeof(step_complete_msg_t));
+	msg.job_id = ctx->job_id;
+	msg.job_step_id = ctx->step_resp->job_step_id;
+
+	msg.range_first = msg.range_last = nodeid;
+	msg.step_rc = ret_code;
+
+	slurm_msg_t_init(&req);
+	req.msg_type = REQUEST_STEP_COMPLETE;
+	req.data = &msg;
+	
+	if (slurm_send_recv_controller_rc_msg(&req, &rc) < 0)
+	       return SLURM_ERROR;
+	
+	return SLURM_SUCCESS;
+}
+
 static int _launch_tasks(slurm_step_ctx_t *ctx,
 			 launch_tasks_request_msg_t *launch_msg,
 			 uint32_t timeout)
@@ -1071,13 +1206,17 @@ static int _launch_tasks(slurm_step_ctx_t *ctx,
 		      rc, ret_data->err, ret_data->type);
 		if (rc != SLURM_SUCCESS) {
 			if (ret_data->err)
-				errno = ret_data->err;
+				tot_rc = ret_data->err;
 			else
-				errno = rc;
-			error("Task launch failed on node %s: %m",
+				tot_rc = rc;
+	
+			_fail_step_tasks(ctx, ret_data->node_name, tot_rc);
+
+			errno = tot_rc;
+			tot_rc = SLURM_ERROR;
+			error("Task launch for %u.%u failed on node %s: %m",
+			      ctx->job_id, ctx->step_resp->job_step_id,
 			      ret_data->node_name);
-			rc = SLURM_ERROR;
-			tot_rc = rc;
 		} else {
 #if 0 /* only for debugging, might want to make this a callback */
 			errno = ret_data->err;
diff --git a/src/api/step_launch.h b/src/api/step_launch.h
index e4d99903e2100496eb40fff169617d46c6cfa4ec..53b72dcd054b7ed4169be6de623bb09d3bbf5ab6 100644
--- a/src/api/step_launch.h
+++ b/src/api/step_launch.h
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  step_launch.h - launch a parallel job step
  *
- *  $Id: step_launch.h 15262 2008-10-01 22:58:26Z jette $
+ *  $Id: step_launch.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -58,7 +59,6 @@ struct step_launch_state {
 	bitstr_t *tasks_exited;  /* or never started correctly */
 	bool abort;
 	bool abort_action_taken;
-	bool no_kill;
 
 	/* message thread variables */
 	eio_handle_t *msg_handle;
diff --git a/src/api/submit.c b/src/api/submit.c
index c66b349611c1a2f0dcf4a7d09ad8c61d822789ae..e4bdd026653b537b6ce35ebde2362f3ab1db9fdd 100644
--- a/src/api/submit.c
+++ b/src/api/submit.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  submit.c - submit a job with supplied contraints
- *  $Id: submit.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: submit.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/suspend.c b/src/api/suspend.c
index d447a8d97d56c2155a54a2948697adb7eb1794de..540dd50d0f3cee41f37551933ca88e5231c41c1f 100644
--- a/src/api/suspend.c
+++ b/src/api/suspend.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  suspend.c - job step suspend and resume functions.
- *  $Id: suspend.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: suspend.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/topo_info.c b/src/api/topo_info.c
new file mode 100644
index 0000000000000000000000000000000000000000..4cfa670a7e421d896283571f84bdd5960bf314f5
--- /dev/null
+++ b/src/api/topo_info.c
@@ -0,0 +1,164 @@
+/*****************************************************************************\
+ *  topo_info.c - get/print the switch topology state information of slurm
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#ifdef HAVE_SYS_SYSLOG_H
+#  include <sys/syslog.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+
+#include <slurm/slurm.h>
+
+#include "src/common/parse_time.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
+/*
+ * slurm_load_topo - issue RPC to get slurm all switch topology configuration 
+ *	information 
+ * IN node_info_msg_pptr - place to store a node configuration pointer
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_topo_info_msg
+ */
+extern int slurm_load_topo(topo_info_response_msg_t **resp)
+{
+	int rc;
+	slurm_msg_t req_msg;
+	slurm_msg_t resp_msg;
+	
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+	req_msg.msg_type = REQUEST_TOPO_INFO;
+	req_msg.data     = NULL;
+	
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+		
+	switch (resp_msg.msg_type) {
+	case RESPONSE_TOPO_INFO:
+		*resp = (topo_info_response_msg_t *) resp_msg.data;
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);	
+		if (rc) 
+			slurm_seterrno_ret(rc);
+		*resp = NULL;
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+
+	return SLURM_PROTOCOL_SUCCESS;
+}
+
+/*
+ * slurm_print_topo_info_msg - output information about all switch topology 
+ *	configuration information based upon message as loaded using 
+ *	slurm_load_topo
+ * IN out - file to write to
+ * IN topo_info_msg_ptr - switch topology information message pointer
+ * IN one_liner - print as a single line if not zero
+ */
+extern void slurm_print_topo_info_msg(FILE * out, 
+				      topo_info_response_msg_t *topo_info_msg_ptr, 
+				      int one_liner)
+{
+	int i;
+	topo_info_t *topo_ptr = topo_info_msg_ptr->topo_array;
+
+	if (topo_info_msg_ptr->record_count == 0) {
+		error("No topology information available");
+		return;
+	}
+
+	for (i = 0; i < topo_info_msg_ptr->record_count; i++)
+		slurm_print_topo_record(out, &topo_ptr[i], one_liner);
+}
+
+
+
+/*
+ * slurm_print_topo_record - output information about a specific Slurm topology
+ *	record based upon message as loaded using slurm_load_topo
+ * IN out - file to write to
+ * IN topo_ptr - an individual switch information record pointer
+ * IN one_liner - print as a single line if not zero
+ * RET out - char * containing formatted output (must be freed after call)
+ *	   NULL is returned on failure.
+ */
+extern void slurm_print_topo_record(FILE * out, topo_info_t *topo_ptr, 
+				    int one_liner)
+{
+	char tmp_line[512];
+	char *out_buf = NULL;
+
+	/****** Line 1 ******/
+	snprintf(tmp_line, sizeof(tmp_line),
+		"SwitchName=%s Level=%u LinkSpeed=%u ",
+		topo_ptr->name, topo_ptr->level, topo_ptr->link_speed);
+	xstrcat(out_buf, tmp_line);
+
+	if (topo_ptr->nodes && topo_ptr->nodes[0]) {
+		snprintf(tmp_line, sizeof(tmp_line), 
+			 "Nodes=%s ", topo_ptr->nodes);
+		xstrcat(out_buf, tmp_line);
+	}
+	if (topo_ptr->switches && topo_ptr->switches[0]) {
+		snprintf(tmp_line, sizeof(tmp_line), 
+			 "Switches=%s ", topo_ptr->switches);
+		xstrcat(out_buf, tmp_line);
+	}
+
+	xstrcat(out_buf, "\n");
+	fprintf(out, "%s", out_buf);
+	xfree(out_buf);
+}
diff --git a/src/api/triggers.c b/src/api/triggers.c
index 67981abd3b9a23f2e3afb244b5a250fa18ea6a45..6d03bfbaa57ba0e527d901ef4faca7effede7ac6 100644
--- a/src/api/triggers.c
+++ b/src/api/triggers.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/api/update_config.c b/src/api/update_config.c
index b317331d66194d85463f8c4854a0064fb6a2e36e..c04ffb00708f9cd38818ca3038a8408cc04b4ae4 100644
--- a/src/api/update_config.c
+++ b/src/api/update_config.c
@@ -1,14 +1,16 @@
 /****************************************************************************\
  *  update_config.c - request that slurmctld update its configuration
- *  $Id: update_config.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: update_config.c 17334 2009-04-22 23:49:13Z da $
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> and Kevin Tew <tew1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -16,7 +18,7 @@
  *  any later version.
  *
  *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
+ *  to link the code of portions of this program with the OpenSSL library under
  *  certain conditions as described in each individual source file, and 
  *  distribute linked combinations including the two. You must obey the GNU 
  *  General Public License in all respects for all of the code used other than 
@@ -43,6 +45,7 @@
 #include <errno.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
 
 #include <slurm/slurm.h>
 
@@ -74,6 +77,17 @@ slurm_update_node ( update_node_msg_t * node_msg )
 	return _slurm_update ((void *) node_msg, REQUEST_UPDATE_NODE);
 }
 
+/*
+ * slurm_create_partition - create a new partition, only usable by user root
+ * IN part_msg - description of partition configuration
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+int 
+slurm_create_partition ( update_part_msg_t * part_msg ) 
+{
+	return _slurm_update ((void *) part_msg, REQUEST_CREATE_PARTITION);
+}
+
 /*
  * slurm_update_partition - issue RPC to a partition's configuration per  
  *	request, only usable by user root
@@ -89,7 +103,7 @@ slurm_update_partition ( update_part_msg_t * part_msg )
 /*
  * slurm_delete_partition - issue RPC to delete a partition, only usable 
  *	by user root
- * IN part_msg - description of partition updates
+ * IN part_msg - description of partition to delete
  * RET 0 on success, otherwise return -1 and set errno to indicate the error
  */
 int 
@@ -98,6 +112,69 @@ slurm_delete_partition ( delete_part_msg_t * part_msg )
 	return _slurm_update ((void *) part_msg, REQUEST_DELETE_PARTITION);
 }
 
+/*
+ * slurm_create_reservation - create a new reservation, only usable by user root
+ * IN resv_msg - description of reservation
+ * RET name of reservation on success (caller must free the memory),
+ *	otherwise return NULL and set errno to indicate the error
+ */
+char * 
+slurm_create_reservation (resv_desc_msg_t * resv_msg ) 
+{
+	int rc;
+	char *resv_name = NULL;
+	slurm_msg_t req_msg;
+	slurm_msg_t resp_msg;
+	reservation_name_msg_t *resp;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+
+	req_msg.msg_type = REQUEST_CREATE_RESERVATION;
+	req_msg.data     = resv_msg; 
+			
+	rc = slurm_send_recv_controller_msg(&req_msg, &resp_msg);
+	switch (resp_msg.msg_type) {
+	case RESPONSE_CREATE_RESERVATION:
+		resp = (reservation_name_msg_t *) resp_msg.data;
+		resv_name = strdup(resp->name);
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		if (rc) 
+			slurm_seterrno(rc);
+		break;
+	default:
+		slurm_seterrno(SLURM_UNEXPECTED_MSG_ERROR);
+	}
+	slurm_free_msg_data(resp_msg.msg_type, resp_msg.data);
+	return resv_name;
+}
+
+/*
+ * slurm_update_reservation - modify an existing reservation, only usable by 
+ *	user root
+ * IN resv_msg - description of reservation
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int slurm_update_reservation ( resv_desc_msg_t * resv_msg )
+{
+	return _slurm_update ((void *) resv_msg, REQUEST_UPDATE_RESERVATION);
+}
+
+/*
+ * slurm_delete_reservation - issue RPC to delete a reservation, only usable 
+ *	by user root
+ * IN resv_msg - description of reservation to delete
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+int 
+slurm_delete_reservation ( reservation_name_msg_t * resv_msg ) 
+{
+	return _slurm_update ((void *) resv_msg, REQUEST_DELETE_RESERVATION);
+}
+
+
 /* _slurm_update - issue RPC for all update requests */
 static int 
 _slurm_update (void *data, slurm_msg_type_t msg_type)
diff --git a/src/common/Makefile.am b/src/common/Makefile.am
index f6127ebe20e3e9b59465d712d50760f0923ab420..50976649aa9bbe82fdd792c8a2e7e010c3901a1b 100644
--- a/src/common/Makefile.am
+++ b/src/common/Makefile.am
@@ -30,6 +30,7 @@ noinst_LTLIBRARIES = 			\
 
 libcommon_la_SOURCES = 			\
 	assoc_mgr.c assoc_mgr.h 	\
+	basil_resv_conf.c basil_resv_conf.h \
 	xmalloc.c xmalloc.h 		\
 	xassert.c xassert.h		\
 	xstring.c xstring.h		\
@@ -56,6 +57,8 @@ libcommon_la_SOURCES = 			\
 	slurm_cred.h       		\
 	slurm_cred.c			\
 	slurm_errno.c			\
+	slurm_priority.c		\
+	slurm_priority.h		\
 	slurm_protocol_api.c		\
 	slurm_protocol_api.h		\
 	slurm_protocol_pack.c		\
@@ -90,13 +93,17 @@ libcommon_la_SOURCES = 			\
 	hostlist.c hostlist.h		\
 	slurm_step_layout.c slurm_step_layout.h	\
 	checkpoint.c checkpoint.h	\
+	select_job_res.c select_job_res.h	\
 	parse_time.c parse_time.h	\
 	job_options.c job_options.h	\
 	global_defaults.c		\
 	timers.c timers.h		\
 	slurm_xlator.h			\
 	stepd_api.c stepd_api.h		\
-	proc_args.c proc_args.h		
+	write_labelled_message.c	\
+	write_labelled_message.h	\
+	proc_args.c proc_args.h		\
+	slurm_strcasestr.c slurm_strcasestr.h		
 
 EXTRA_libcommon_la_SOURCES = 	\
 	$(extra_unsetenv_src)
@@ -118,19 +125,19 @@ libcommon_la_LIBADD   = -ldl
 
 libcommon_la_LDFLAGS  = $(LIB_LDFLAGS) -module --export-dynamic
 
-# This was made so we chould export all symbols from libcommon 
+# This was made so we could export all symbols from libcommon 
 # on multiple platforms
 libcommon_o_SOURCES = 
 libcommon.o :  $(libcommon_la_OBJECTS) $(libcommon_la_DEPENDENCIES) 
-	$(libcommon_la_LINK)  $(libcommon_la_OBJECTS) 
+	$(LINK)  $(libcommon_la_OBJECTS) 
 
-# This was made so we chould export all symbols from libeio 
+# This was made so we could export all symbols from libeio 
 # on multiple platforms
 libeio_o_SOURCES = 
 libeio.o :  $(libeio_la_OBJECTS) $(libeio_la_DEPENDENCIES) 
 	$(LINK)  $(libeio_la_OBJECTS) 
 
-# This was made so we chould export all symbols from libspank 
+# This was made so we could export all symbols from libspank 
 # on multiple platforms
 libspank_o_SOURCES = 
 libspank.o :  $(libspank_la_OBJECTS) $(libspank_la_DEPENDENCIES) 
diff --git a/src/common/Makefile.in b/src/common/Makefile.in
index b614f67034a14b8121b25821fe505da0b430f1de..32ae8009d51b9746812bb8fc4710319892447f6f 100644
--- a/src/common/Makefile.in
+++ b/src/common/Makefile.in
@@ -54,14 +54,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -80,19 +84,21 @@ CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
 CONFIG_CLEAN_FILES =
 LTLIBRARIES = $(noinst_LTLIBRARIES)
 libcommon_la_DEPENDENCIES =
-am__libcommon_la_SOURCES_DIST = assoc_mgr.c assoc_mgr.h xmalloc.c \
-	xmalloc.h xassert.c xassert.h xstring.c xstring.h xsignal.c \
-	xsignal.h forward.c forward.h strlcpy.c strlcpy.h list.c \
-	list.h net.c net.h fd.c fd.h log.c log.h cbuf.c cbuf.h \
-	safeopen.c safeopen.h bitstring.c bitstring.h mpi.c mpi.h \
-	pack.c pack.h parse_config.c parse_config.h parse_spec.c \
-	parse_spec.h plugin.c plugin.h plugrack.c plugrack.h \
-	print_fields.c print_fields.h read_config.c read_config.h \
-	node_select.c node_select.h env.c env.h slurm_cred.h \
-	slurm_cred.c slurm_errno.c slurm_protocol_api.c \
-	slurm_protocol_api.h slurm_protocol_pack.c \
-	slurm_protocol_pack.h slurm_protocol_util.c \
-	slurm_protocol_util.h slurm_protocol_socket_implementation.c \
+am__libcommon_la_SOURCES_DIST = assoc_mgr.c assoc_mgr.h \
+	basil_resv_conf.c basil_resv_conf.h xmalloc.c xmalloc.h \
+	xassert.c xassert.h xstring.c xstring.h xsignal.c xsignal.h \
+	forward.c forward.h strlcpy.c strlcpy.h list.c list.h net.c \
+	net.h fd.c fd.h log.c log.h cbuf.c cbuf.h safeopen.c \
+	safeopen.h bitstring.c bitstring.h mpi.c mpi.h pack.c pack.h \
+	parse_config.c parse_config.h parse_spec.c parse_spec.h \
+	plugin.c plugin.h plugrack.c plugrack.h print_fields.c \
+	print_fields.h read_config.c read_config.h node_select.c \
+	node_select.h env.c env.h slurm_cred.h slurm_cred.c \
+	slurm_errno.c slurm_priority.c slurm_priority.h \
+	slurm_protocol_api.c slurm_protocol_api.h \
+	slurm_protocol_pack.c slurm_protocol_pack.h \
+	slurm_protocol_util.c slurm_protocol_util.h \
+	slurm_protocol_socket_implementation.c \
 	slurm_protocol_socket_common.h slurm_protocol_common.h \
 	slurm_protocol_interface.h slurm_protocol_defs.c \
 	slurm_protocol_defs.h slurm_rlimits_info.h \
@@ -106,26 +112,29 @@ am__libcommon_la_SOURCES_DIST = assoc_mgr.c assoc_mgr.h xmalloc.c \
 	slurm_selecttype_info.c slurm_resource_info.c \
 	slurm_resource_info.h hostlist.c hostlist.h \
 	slurm_step_layout.c slurm_step_layout.h checkpoint.c \
-	checkpoint.h parse_time.c parse_time.h job_options.c \
-	job_options.h global_defaults.c timers.c timers.h \
-	slurm_xlator.h stepd_api.c stepd_api.h proc_args.c proc_args.h
+	checkpoint.h select_job_res.c select_job_res.h parse_time.c \
+	parse_time.h job_options.c job_options.h global_defaults.c \
+	timers.c timers.h slurm_xlator.h stepd_api.c stepd_api.h \
+	write_labelled_message.c write_labelled_message.h proc_args.c \
+	proc_args.h slurm_strcasestr.c slurm_strcasestr.h
 @HAVE_UNSETENV_FALSE@am__objects_1 = unsetenv.lo
-am_libcommon_la_OBJECTS = assoc_mgr.lo xmalloc.lo xassert.lo \
-	xstring.lo xsignal.lo forward.lo strlcpy.lo list.lo net.lo \
-	fd.lo log.lo cbuf.lo safeopen.lo bitstring.lo mpi.lo pack.lo \
-	parse_config.lo parse_spec.lo plugin.lo plugrack.lo \
+am_libcommon_la_OBJECTS = assoc_mgr.lo basil_resv_conf.lo xmalloc.lo \
+	xassert.lo xstring.lo xsignal.lo forward.lo strlcpy.lo list.lo \
+	net.lo fd.lo log.lo cbuf.lo safeopen.lo bitstring.lo mpi.lo \
+	pack.lo parse_config.lo parse_spec.lo plugin.lo plugrack.lo \
 	print_fields.lo read_config.lo node_select.lo env.lo \
-	slurm_cred.lo slurm_errno.lo slurm_protocol_api.lo \
-	slurm_protocol_pack.lo slurm_protocol_util.lo \
-	slurm_protocol_socket_implementation.lo slurm_protocol_defs.lo \
-	slurm_rlimits_info.lo slurmdbd_defs.lo uid.lo util-net.lo \
-	slurm_auth.lo jobacct_common.lo slurm_accounting_storage.lo \
-	slurm_jobacct_gather.lo slurm_jobcomp.lo switch.lo arg_desc.lo \
-	malloc.lo getopt.lo getopt1.lo $(am__objects_1) \
-	slurm_selecttype_info.lo slurm_resource_info.lo hostlist.lo \
-	slurm_step_layout.lo checkpoint.lo parse_time.lo \
-	job_options.lo global_defaults.lo timers.lo stepd_api.lo \
-	proc_args.lo
+	slurm_cred.lo slurm_errno.lo slurm_priority.lo \
+	slurm_protocol_api.lo slurm_protocol_pack.lo \
+	slurm_protocol_util.lo slurm_protocol_socket_implementation.lo \
+	slurm_protocol_defs.lo slurm_rlimits_info.lo slurmdbd_defs.lo \
+	uid.lo util-net.lo slurm_auth.lo jobacct_common.lo \
+	slurm_accounting_storage.lo slurm_jobacct_gather.lo \
+	slurm_jobcomp.lo switch.lo arg_desc.lo malloc.lo getopt.lo \
+	getopt1.lo $(am__objects_1) slurm_selecttype_info.lo \
+	slurm_resource_info.lo hostlist.lo slurm_step_layout.lo \
+	checkpoint.lo select_job_res.lo parse_time.lo job_options.lo \
+	global_defaults.lo timers.lo stepd_api.lo \
+	write_labelled_message.lo proc_args.lo slurm_strcasestr.lo
 am__EXTRA_libcommon_la_SOURCES_DIST = unsetenv.c unsetenv.h
 libcommon_la_OBJECTS = $(am_libcommon_la_OBJECTS)
 libcommon_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
@@ -184,6 +193,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -357,6 +370,7 @@ noinst_LTLIBRARIES = \
 
 libcommon_la_SOURCES = \
 	assoc_mgr.c assoc_mgr.h 	\
+	basil_resv_conf.c basil_resv_conf.h \
 	xmalloc.c xmalloc.h 		\
 	xassert.c xassert.h		\
 	xstring.c xstring.h		\
@@ -383,6 +397,8 @@ libcommon_la_SOURCES = \
 	slurm_cred.h       		\
 	slurm_cred.c			\
 	slurm_errno.c			\
+	slurm_priority.c		\
+	slurm_priority.h		\
 	slurm_protocol_api.c		\
 	slurm_protocol_api.h		\
 	slurm_protocol_pack.c		\
@@ -417,13 +433,17 @@ libcommon_la_SOURCES = \
 	hostlist.c hostlist.h		\
 	slurm_step_layout.c slurm_step_layout.h	\
 	checkpoint.c checkpoint.h	\
+	select_job_res.c select_job_res.h	\
 	parse_time.c parse_time.h	\
 	job_options.c job_options.h	\
 	global_defaults.c		\
 	timers.c timers.h		\
 	slurm_xlator.h			\
 	stepd_api.c stepd_api.h		\
-	proc_args.c proc_args.h		
+	write_labelled_message.c	\
+	write_labelled_message.h	\
+	proc_args.c proc_args.h		\
+	slurm_strcasestr.c slurm_strcasestr.h		
 
 EXTRA_libcommon_la_SOURCES = \
 	$(extra_unsetenv_src)
@@ -444,15 +464,15 @@ libspank_la_SOURCES = \
 libcommon_la_LIBADD = -ldl 
 libcommon_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic
 
-# This was made so we chould export all symbols from libcommon 
+# This was made so we could export all symbols from libcommon 
 # on multiple platforms
 libcommon_o_SOURCES = 
 
-# This was made so we chould export all symbols from libeio 
+# This was made so we could export all symbols from libeio 
 # on multiple platforms
 libeio_o_SOURCES = 
 
-# This was made so we chould export all symbols from libspank 
+# This was made so we could export all symbols from libspank 
 # on multiple platforms
 libspank_o_SOURCES = 
 all: all-am
@@ -521,6 +541,7 @@ distclean-compile:
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arg_desc.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/assoc_mgr.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/basil_resv_conf.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bitstring.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cbuf.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint.Plo@am__quote@
@@ -554,12 +575,14 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_args.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_config.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/safeopen.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_job_res.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_accounting_storage.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_auth.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_cred.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_errno.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_jobacct_gather.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_jobcomp.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_priority.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_protocol_api.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_protocol_defs.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_protocol_pack.Plo@am__quote@
@@ -569,6 +592,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_rlimits_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_selecttype_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_step_layout.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_strcasestr.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmdbd_defs.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/stepd_api.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/strlcpy.Plo@am__quote@
@@ -577,6 +601,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/uid.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/unsetenv.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/util-net.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/write_labelled_message.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xassert.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xmalloc.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xsignal.Plo@am__quote@
@@ -786,7 +811,7 @@ uninstall-am:
 	tags uninstall uninstall-am
 
 libcommon.o :  $(libcommon_la_OBJECTS) $(libcommon_la_DEPENDENCIES) 
-	$(libcommon_la_LINK)  $(libcommon_la_OBJECTS) 
+	$(LINK)  $(libcommon_la_OBJECTS) 
 libeio.o :  $(libeio_la_OBJECTS) $(libeio_la_DEPENDENCIES) 
 	$(LINK)  $(libeio_la_OBJECTS) 
 libspank.o :  $(libspank_la_OBJECTS) $(libspank_la_DEPENDENCIES) 
diff --git a/src/common/arg_desc.c b/src/common/arg_desc.c
index 72b82dfebd8a917e9f0ef62da3c554dedb83051d..79bb8714b6b24e30c8c1d747ba91475ea801d819 100644
--- a/src/common/arg_desc.c
+++ b/src/common/arg_desc.c
@@ -3,10 +3,11 @@
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/arg_desc.h b/src/common/arg_desc.h
index f365582efb6e8fd21b75634c6b000182febb81b7..331c3fb6ed8f7ef20ce21ca8d6e1e24c32f95b1a 100644
--- a/src/common/arg_desc.h
+++ b/src/common/arg_desc.h
@@ -3,10 +3,11 @@
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index 3d281c4794e207e400a80bfb7a97cba0d4d254d0..befdea1a7ae054a082adf4cf74869cb0e9ae935c 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -7,7 +7,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -43,21 +44,31 @@
 
 #include "src/common/uid.h"
 #include "src/common/xstring.h"
+#include "src/common/slurm_priority.h"
 #include "src/slurmdbd/read_config.h"
 
-static List local_association_list = NULL;
-static List local_qos_list = NULL;
-static List local_user_list = NULL;
-static List assoc_mgr_wckey_list = NULL;
-static char *local_cluster_name = NULL;
+#define ASSOC_USAGE_VERSION 1
+
+acct_association_rec_t *assoc_mgr_root_assoc = NULL;
+uint32_t qos_max_priority = 0;
+
+List assoc_mgr_association_list = NULL;
+List assoc_mgr_qos_list = NULL;
+List assoc_mgr_user_list = NULL;
+List assoc_mgr_wckey_list = NULL;
+
+static char *assoc_mgr_cluster_name = NULL;
+static int setup_childern = 0;
 
 void (*remove_assoc_notify) (acct_association_rec_t *rec) = NULL;
 
 pthread_mutex_t assoc_mgr_association_lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t local_file_lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t local_qos_lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t local_user_lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t assoc_mgr_wckey_lock = PTHREAD_MUTEX_INITIALIZER;
+
+pthread_mutex_t assoc_mgr_qos_lock = PTHREAD_MUTEX_INITIALIZER;
+pthread_mutex_t assoc_mgr_user_lock = PTHREAD_MUTEX_INITIALIZER;
+pthread_mutex_t assoc_mgr_file_lock = PTHREAD_MUTEX_INITIALIZER;
+pthread_mutex_t assoc_mgr_wckey_lock = PTHREAD_MUTEX_INITIALIZER;
+
 
 /* 
  * Comparator used for sorting assocs largest cpu to smallest cpu
@@ -74,20 +85,32 @@ static int _sort_assoc_dec(acct_association_rec_t *assoc_a,
 	return -1;
 }
 
+/* you should check for assoc == NULL before this function */
+static void _normalize_assoc_shares(acct_association_rec_t *assoc)
+{
+	acct_association_rec_t *assoc2 = assoc;
+
+	assoc2->shares_norm = 1.0;
+	while(assoc->parent_assoc_ptr) {
+		assoc2->shares_norm *=
+			(double)assoc->shares_raw / (double)assoc->level_shares;
+		assoc = assoc->parent_assoc_ptr;
+	}
+}
+
 static int _addto_used_info(acct_association_rec_t *assoc1,
 			    acct_association_rec_t *assoc2)
 {
 	if(!assoc1 || !assoc2)
 		return SLURM_ERROR;
 
-	assoc1->grp_used_cpu_mins += assoc2->grp_used_cpu_mins;
 	assoc1->grp_used_cpus += assoc2->grp_used_cpus;
 	assoc1->grp_used_nodes += assoc2->grp_used_nodes;
 	assoc1->grp_used_wall += assoc2->grp_used_wall;
 	
 	assoc1->used_jobs += assoc2->used_jobs;
 	assoc1->used_submit_jobs += assoc2->used_submit_jobs;
-	assoc1->used_shares += assoc2->used_shares;
+	assoc1->usage_raw += assoc2->usage_raw;
 
 	return SLURM_SUCCESS;
 }
@@ -97,14 +120,13 @@ static int _clear_used_info(acct_association_rec_t *assoc)
 	if(!assoc)
 		return SLURM_ERROR;
 
-	assoc->grp_used_cpu_mins = 0;
 	assoc->grp_used_cpus = 0;
 	assoc->grp_used_nodes = 0;
-	assoc->grp_used_wall = 0;
 	
 	assoc->used_jobs  = 0;
 	assoc->used_submit_jobs = 0;
-	/* do not reset used_shares if you need to reset it do it
+	/* do not reset usage_raw or grp_used_wall.
+	 * if you need to reset it do it
 	 * else where since sometimes we call this and do not want
 	 * shares reset */
 
@@ -243,13 +265,22 @@ static int _set_assoc_parent_and_user(acct_association_rec_t *assoc,
 			}
 			list_iterator_destroy(itr);
 		}
+		if(assoc->parent_assoc_ptr && setup_childern) {
+			if(!assoc->parent_assoc_ptr->childern_list) 
+				assoc->parent_assoc_ptr->childern_list = 
+					list_create(NULL);
+			list_append(assoc->parent_assoc_ptr->childern_list,
+				    assoc);
+		}	
+			
 		if(assoc == assoc->parent_assoc_ptr) {
 			assoc->parent_assoc_ptr = NULL;
 			error("association %u was pointing to "
 			      "itself as it's parent");
 		}
-	}
-
+	} else 
+		assoc_mgr_root_assoc = assoc;
+		
 	if(assoc->user) {
 		uid_t pw_uid = uid_from_string(assoc->user);
 		if(pw_uid == (uid_t) -1) 
@@ -263,7 +294,7 @@ static int _set_assoc_parent_and_user(acct_association_rec_t *assoc,
 
 	return SLURM_SUCCESS;
 }
-
+	
 static int _post_association_list(List assoc_list)
 {
 	acct_association_rec_t *assoc = NULL;
@@ -280,11 +311,35 @@ static int _post_association_list(List assoc_list)
 		_set_assoc_parent_and_user(assoc, assoc_list, reset);
 		reset = 0;
 	}
+
+	if(setup_childern) {
+		acct_association_rec_t *assoc2 = NULL;
+		ListIterator itr2 = NULL;
+		/* Now set the shares on each level */
+		list_iterator_reset(itr);
+		while((assoc = list_next(itr))) {
+			int count = 0;
+			if(!assoc->childern_list
+			   || !list_count(assoc->childern_list))
+				continue;
+			itr2 = list_iterator_create(assoc->childern_list);
+			while((assoc2 = list_next(itr2))) 
+				count += assoc2->shares_raw;
+			list_iterator_reset(itr2);
+			while((assoc2 = list_next(itr2))) 
+				assoc2->level_shares = count;
+			list_iterator_destroy(itr2);
+		}	
+		/* Now normalize the static shares */
+		list_iterator_reset(itr);
+		while((assoc = list_next(itr))) 
+			_normalize_assoc_shares(assoc);
+	}
 	list_iterator_destroy(itr);
 	//END_TIMER2("load_associations");
 	return SLURM_SUCCESS;
 }
-	
+
 static int _post_user_list(List user_list)
 {
 	acct_user_rec_t *user = NULL;
@@ -324,43 +379,42 @@ static int _post_wckey_list(List wckey_list)
 	list_iterator_destroy(itr);
 	return SLURM_SUCCESS;
 }
-
-static int _get_local_association_list(void *db_conn, int enforce)
+static int _get_assoc_mgr_association_list(void *db_conn, int enforce)
 {
 	acct_association_cond_t assoc_q;
 	uid_t uid = getuid();
 
 //	DEF_TIMERS;
 	slurm_mutex_lock(&assoc_mgr_association_lock);
-	if(local_association_list)
-		list_destroy(local_association_list);
+	if(assoc_mgr_association_list)
+		list_destroy(assoc_mgr_association_list);
 
 	memset(&assoc_q, 0, sizeof(acct_association_cond_t));
-	if(local_cluster_name) {
+	if(assoc_mgr_cluster_name) {
 		assoc_q.cluster_list = list_create(NULL);
-		list_append(assoc_q.cluster_list, local_cluster_name);
+		list_append(assoc_q.cluster_list, assoc_mgr_cluster_name);
 	} else if((enforce & ACCOUNTING_ENFORCE_ASSOCS) && !slurmdbd_conf) {
-		error("_get_local_association_list: "
+		error("_get_assoc_mgr_association_list: "
 		      "no cluster name here going to get "
 		      "all associations.");
 	}
 
 //	START_TIMER;
-	local_association_list =
+	assoc_mgr_association_list =
 		acct_storage_g_get_associations(db_conn, uid, &assoc_q);
 //	END_TIMER2("get_associations");
 
 	if(assoc_q.cluster_list)
 		list_destroy(assoc_q.cluster_list);
 	
-	if(!local_association_list) {
+	if(!assoc_mgr_association_list) {
 		/* create list so we don't keep calling this if there
 		   isn't anything there */
-		local_association_list = 
+		assoc_mgr_association_list = 
 			list_create(destroy_acct_association_rec);
 		slurm_mutex_unlock(&assoc_mgr_association_lock);
 		if(enforce & ACCOUNTING_ENFORCE_ASSOCS) {
-			error("_get_local_association_list: "
+			error("_get_assoc_mgr_association_list: "
 			      "no list was made.");
 			return SLURM_ERROR;
 		} else {
@@ -370,37 +424,54 @@ static int _get_local_association_list(void *db_conn, int enforce)
 		}
 	} 
 
-	_post_association_list(local_association_list);
+	_post_association_list(assoc_mgr_association_list);
 
 	slurm_mutex_unlock(&assoc_mgr_association_lock);
 
 	return SLURM_SUCCESS;
 }
 
-static int _get_local_qos_list(void *db_conn, int enforce)
+static int _get_assoc_mgr_qos_list(void *db_conn, int enforce)
 {
 	uid_t uid = getuid();
 
-	slurm_mutex_lock(&local_qos_lock);
-	if(local_qos_list)
-		list_destroy(local_qos_list);
-	local_qos_list = acct_storage_g_get_qos(db_conn, uid, NULL);
+	slurm_mutex_lock(&assoc_mgr_qos_lock);
+	if(assoc_mgr_qos_list)
+		list_destroy(assoc_mgr_qos_list);
+	assoc_mgr_qos_list = acct_storage_g_get_qos(db_conn, uid, NULL);
 
-	if(!local_qos_list) {
-		slurm_mutex_unlock(&local_qos_lock);
+	if(!assoc_mgr_qos_list) {
+		slurm_mutex_unlock(&assoc_mgr_qos_lock);
 		if(enforce & ACCOUNTING_ENFORCE_ASSOCS) {
-			error("_get_local_qos_list: no list was made.");
+			error("_get_assoc_mgr_qos_list: no list was made.");
 			return SLURM_ERROR;
 		} else {
 			return SLURM_SUCCESS;
 		}		
+	} else {
+		ListIterator itr = list_iterator_create(assoc_mgr_qos_list);
+		acct_qos_rec_t *qos = NULL;
+		while((qos = list_next(itr))) {
+			if(qos->priority > qos_max_priority) 
+				qos_max_priority = qos->priority;
+		}
+
+		if(qos_max_priority) {
+			list_iterator_reset(itr);
+			
+			while((qos = list_next(itr))) {
+				qos->norm_priority = (double)qos->priority 
+					/ (double)qos_max_priority;
+			}
+		}
+		list_iterator_destroy(itr);
 	}
 
-	slurm_mutex_unlock(&local_qos_lock);
+	slurm_mutex_unlock(&assoc_mgr_qos_lock);
 	return SLURM_SUCCESS;
 }
 
-static int _get_local_user_list(void *db_conn, int enforce)
+static int _get_assoc_mgr_user_list(void *db_conn, int enforce)
 {
 	acct_user_cond_t user_q;
 	uid_t uid = getuid();
@@ -408,15 +479,15 @@ static int _get_local_user_list(void *db_conn, int enforce)
 	memset(&user_q, 0, sizeof(acct_user_cond_t));
 	user_q.with_coords = 1;
 	
-	slurm_mutex_lock(&local_user_lock);
-	if(local_user_list)
-		list_destroy(local_user_list);
-	local_user_list = acct_storage_g_get_users(db_conn, uid, &user_q);
+	slurm_mutex_lock(&assoc_mgr_user_lock);
+	if(assoc_mgr_user_list)
+		list_destroy(assoc_mgr_user_list);
+	assoc_mgr_user_list = acct_storage_g_get_users(db_conn, uid, &user_q);
 
-	if(!local_user_list) {
-		slurm_mutex_unlock(&local_user_lock);
+	if(!assoc_mgr_user_list) {
+		slurm_mutex_unlock(&assoc_mgr_user_lock);
 		if(enforce & ACCOUNTING_ENFORCE_ASSOCS) {
-			error("_get_local_user_list: "
+			error("_get_assoc_mgr_user_list: "
 			      "no list was made.");
 			return SLURM_ERROR;
 		} else {
@@ -424,12 +495,13 @@ static int _get_local_user_list(void *db_conn, int enforce)
 		}		
 	} 
 
-	_post_user_list(local_user_list);
+	_post_user_list(assoc_mgr_user_list);
 	
-	slurm_mutex_unlock(&local_user_lock);
+	slurm_mutex_unlock(&assoc_mgr_user_lock);
 	return SLURM_SUCCESS;
 }
 
+
 static int _get_local_wckey_list(void *db_conn, int enforce)
 {
 	acct_wckey_cond_t wckey_q;
@@ -441,9 +513,9 @@ static int _get_local_wckey_list(void *db_conn, int enforce)
 		list_destroy(assoc_mgr_wckey_list);
 
 	memset(&wckey_q, 0, sizeof(acct_wckey_cond_t));
-	if(local_cluster_name) {
+	if(assoc_mgr_cluster_name) {
 		wckey_q.cluster_list = list_create(NULL);
-		list_append(wckey_q.cluster_list, local_cluster_name);
+		list_append(wckey_q.cluster_list, assoc_mgr_cluster_name);
 	} else if((enforce & ACCOUNTING_ENFORCE_WCKEYS) && !slurmdbd_conf) {
 		error("_get_local_wckey_list: "
 		      "no cluster name here going to get "
@@ -481,48 +553,48 @@ static int _get_local_wckey_list(void *db_conn, int enforce)
 	return SLURM_SUCCESS;
 }
 
-static int _refresh_local_association_list(void *db_conn, int enforce)
+static int _refresh_assoc_mgr_association_list(void *db_conn, int enforce)
 {
 	acct_association_cond_t assoc_q;
 	List current_assocs = NULL;
 	uid_t uid = getuid();
 	ListIterator curr_itr = NULL;
-	ListIterator local_itr = NULL;
+	ListIterator assoc_mgr_itr = NULL;
 	acct_association_rec_t *curr_assoc = NULL, *assoc = NULL;
 //	DEF_TIMERS;
 
 	memset(&assoc_q, 0, sizeof(acct_association_cond_t));
-	if(local_cluster_name) {
+	if(assoc_mgr_cluster_name) {
 		assoc_q.cluster_list = list_create(NULL);
-		list_append(assoc_q.cluster_list, local_cluster_name);
+		list_append(assoc_q.cluster_list, assoc_mgr_cluster_name);
 	} else if((enforce & ACCOUNTING_ENFORCE_ASSOCS) && !slurmdbd_conf) {
-		error("_refresh_local_association_list: "
+		error("_refresh_assoc_mgr_association_list: "
 		      "no cluster name here going to get "
 		      "all associations.");
 	}
 
 	slurm_mutex_lock(&assoc_mgr_association_lock);
 
-	current_assocs = local_association_list;
+	current_assocs = assoc_mgr_association_list;
 
 //	START_TIMER;
-	local_association_list = 
+	assoc_mgr_association_list = 
 		acct_storage_g_get_associations(db_conn, uid, &assoc_q);
 //	END_TIMER2("get_associations");
 
 	if(assoc_q.cluster_list)
 		list_destroy(assoc_q.cluster_list);
 	
-	if(!local_association_list) {
-		local_association_list = current_assocs;
+	if(!assoc_mgr_association_list) {
+		assoc_mgr_association_list = current_assocs;
 		slurm_mutex_unlock(&assoc_mgr_association_lock);
 		
-		error("_refresh_local_association_list: "
+		error("_refresh_assoc_mgr_association_list: "
 		      "no new list given back keeping cached one.");
 		return SLURM_ERROR;
 	}
- 
-	_post_association_list(local_association_list);
+
+	_post_association_list(assoc_mgr_association_list);
 	
 	if(!current_assocs) {
 		slurm_mutex_unlock(&assoc_mgr_association_lock);
@@ -530,29 +602,29 @@ static int _refresh_local_association_list(void *db_conn, int enforce)
 	}
 	
 	curr_itr = list_iterator_create(current_assocs);
-	local_itr = list_iterator_create(local_association_list);
+	assoc_mgr_itr = list_iterator_create(assoc_mgr_association_list);
 	
 	/* add used limits We only look for the user associations to
 	 * do the parents since a parent may have moved */
 	while((curr_assoc = list_next(curr_itr))) {
 		if(!curr_assoc->user)
 			continue;
-		while((assoc = list_next(local_itr))) {
+		while((assoc = list_next(assoc_mgr_itr))) {
 			if(assoc->id == curr_assoc->id) 
 				break;
 		}
-		
+
 		while(assoc) {
 			_addto_used_info(assoc, curr_assoc);
 			/* get the parent last since this pointer is
 			   different than the one we are updating from */
 			assoc = assoc->parent_assoc_ptr;
 		}
-		list_iterator_reset(local_itr);			
+		list_iterator_reset(assoc_mgr_itr);			
 	}
 	
 	list_iterator_destroy(curr_itr);
-	list_iterator_destroy(local_itr);
+	list_iterator_destroy(assoc_mgr_itr);
 		
 	slurm_mutex_unlock(&assoc_mgr_association_lock);
 
@@ -565,7 +637,7 @@ static int _refresh_local_association_list(void *db_conn, int enforce)
 /* This only gets a new list if available dropping the old one if
  * needed
  */
-static int _refresh_local_qos_list(void *db_conn, int enforce)
+static int _refresh_assoc_mgr_qos_list(void *db_conn, int enforce)
 {
 	List current_qos = NULL;
 	uid_t uid = getuid();
@@ -573,18 +645,18 @@ static int _refresh_local_qos_list(void *db_conn, int enforce)
 	current_qos = acct_storage_g_get_qos(db_conn, uid, NULL);
 
 	if(!current_qos) {
-		error("_refresh_local_qos_list: "
+		error("_refresh_assoc_mgr_qos_list: "
 		      "no new list given back keeping cached one.");
 		return SLURM_ERROR;
 	}
 
-	slurm_mutex_lock(&local_qos_lock);
-	if(local_qos_list)
-		list_destroy(local_qos_list);
+	slurm_mutex_lock(&assoc_mgr_qos_lock);
+	if(assoc_mgr_qos_list)
+		list_destroy(assoc_mgr_qos_list);
 
-	local_qos_list = current_qos;
+	assoc_mgr_qos_list = current_qos;
 
-	slurm_mutex_unlock(&local_qos_lock);
+	slurm_mutex_unlock(&assoc_mgr_qos_lock);
 
 	return SLURM_SUCCESS;
 }
@@ -592,7 +664,7 @@ static int _refresh_local_qos_list(void *db_conn, int enforce)
 /* This only gets a new list if available dropping the old one if
  * needed 
  */
-static int _refresh_local_user_list(void *db_conn, int enforce)
+static int _refresh_assoc_mgr_user_list(void *db_conn, int enforce)
 {
 	List current_users = NULL;
 	acct_user_cond_t user_q;
@@ -604,20 +676,20 @@ static int _refresh_local_user_list(void *db_conn, int enforce)
 	current_users = acct_storage_g_get_users(db_conn, uid, &user_q);
 
 	if(!current_users) {
-		error("_refresh_local_user_list: "
+		error("_refresh_assoc_mgr_user_list: "
 		      "no new list given back keeping cached one.");
 		return SLURM_ERROR;
 	}
 	_post_user_list(current_users);
 
-	slurm_mutex_lock(&local_user_lock);
+	slurm_mutex_lock(&assoc_mgr_user_lock);
 
-	if(local_user_list) 
-		list_destroy(local_user_list);
+	if(assoc_mgr_user_list) 
+		list_destroy(assoc_mgr_user_list);
 
-	local_user_list = current_users;
+	assoc_mgr_user_list = current_users;
 	
-	slurm_mutex_unlock(&local_user_lock);
+	slurm_mutex_unlock(&assoc_mgr_user_lock);
 
 	return SLURM_SUCCESS;
 }
@@ -625,18 +697,18 @@ static int _refresh_local_user_list(void *db_conn, int enforce)
 /* This only gets a new list if available dropping the old one if
  * needed
  */
-static int _refresh_local_wckey_list(void *db_conn, int enforce)
+static int _refresh_assoc_wckey_list(void *db_conn, int enforce)
 {
 	acct_wckey_cond_t wckey_q;
 	List current_wckeys = NULL;
 	uid_t uid = getuid();
 
 	memset(&wckey_q, 0, sizeof(acct_wckey_cond_t));
-	if(local_cluster_name) {
+	if(assoc_mgr_cluster_name) {
 		wckey_q.cluster_list = list_create(NULL);
-		list_append(wckey_q.cluster_list, local_cluster_name);
+		list_append(wckey_q.cluster_list, assoc_mgr_cluster_name);
 	} else if((enforce & ACCOUNTING_ENFORCE_WCKEYS) && !slurmdbd_conf) {
-		error("_refresh_local_wckey_list: "
+		error("_refresh_assoc_wckey_list: "
 		      "no cluster name here going to get "
 		      "all wckeys.");
 	}
@@ -644,7 +716,7 @@ static int _refresh_local_wckey_list(void *db_conn, int enforce)
 	current_wckeys = acct_storage_g_get_wckeys(db_conn, uid, &wckey_q);
 
 	if(!current_wckeys) {
-		error("_refresh_local_wckey_list: "
+		error("_refresh_assoc_wckey_list: "
 		      "no new list given back keeping cached one.");
 		return SLURM_ERROR;
 	}
@@ -665,6 +737,16 @@ extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args)
 {
 	static uint16_t enforce = 0;
 	static uint16_t cache_level = ASSOC_MGR_CACHE_ALL;
+	static uint16_t checked_prio = 0;
+
+	if(!checked_prio) {
+		char *prio = slurm_get_priority_type();
+		if(prio && !strcmp(prio, "priority/multifactor")) 
+			setup_childern = 1;
+		
+		xfree(prio);
+		checked_prio = 1;
+	}
 
 	if(args) {
 		enforce = args->enforce;
@@ -681,27 +763,37 @@ extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args)
 		return SLURM_SUCCESS;
 	}
 
-	if((!local_cluster_name) && !slurmdbd_conf) {
-		xfree(local_cluster_name);
-		local_cluster_name = slurm_get_cluster_name();
+	if((!assoc_mgr_cluster_name) && !slurmdbd_conf) {
+		xfree(assoc_mgr_cluster_name);
+		assoc_mgr_cluster_name = slurm_get_cluster_name();
 	}
 
 	/* check if we can't talk to the db yet */
 	if(errno == ESLURM_ACCESS_DENIED)
 		return SLURM_ERROR;
 	
-	if((!local_association_list) && (cache_level & ASSOC_MGR_CACHE_ASSOC)) 
-		if(_get_local_association_list(db_conn, enforce) == SLURM_ERROR)
+	if((!assoc_mgr_association_list)
+	   && (cache_level & ASSOC_MGR_CACHE_ASSOC)) 
+		if(_get_assoc_mgr_association_list(db_conn, enforce)
+		   == SLURM_ERROR)
 			return SLURM_ERROR;
-
-	if((!local_qos_list) && (cache_level & ASSOC_MGR_CACHE_QOS))
-		if(_get_local_qos_list(db_conn, enforce) == SLURM_ERROR)
+		
+	if((!assoc_mgr_qos_list) && (cache_level & ASSOC_MGR_CACHE_QOS))
+		if(_get_assoc_mgr_qos_list(db_conn, enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if((!local_user_list) && (cache_level & ASSOC_MGR_CACHE_USER))
-		if(_get_local_user_list(db_conn, enforce) == SLURM_ERROR)
+	if((!assoc_mgr_user_list) && (cache_level & ASSOC_MGR_CACHE_USER))
+		if(_get_assoc_mgr_user_list(db_conn, enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
-
+	if(assoc_mgr_association_list && !setup_childern) {
+		acct_association_rec_t *assoc = NULL;
+		ListIterator itr =
+			list_iterator_create(assoc_mgr_association_list);
+		while((assoc = list_next(itr))) {
+			log_assoc_rec(assoc, assoc_mgr_qos_list);
+		}
+		list_iterator_destroy(itr);
+	}
 	if((!assoc_mgr_wckey_list) && (cache_level & ASSOC_MGR_CACHE_WCKEY))
 		if(_get_local_wckey_list(db_conn, enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
@@ -714,23 +806,70 @@ extern int assoc_mgr_fini(char *state_save_location)
 	if(state_save_location)
 		dump_assoc_mgr_state(state_save_location);
 
-	if(local_association_list) 
-		list_destroy(local_association_list);
-	if(local_qos_list)
-		list_destroy(local_qos_list);
-	if(local_user_list)
-		list_destroy(local_user_list);
+	if(assoc_mgr_association_list) 
+		list_destroy(assoc_mgr_association_list);
+	if(assoc_mgr_qos_list)
+		list_destroy(assoc_mgr_qos_list);
+	if(assoc_mgr_user_list)
+		list_destroy(assoc_mgr_user_list);
 	if(assoc_mgr_wckey_list)
 		list_destroy(assoc_mgr_wckey_list);
-	xfree(local_cluster_name);
-	local_association_list = NULL;
-	local_qos_list = NULL;
-	local_user_list = NULL;
+	xfree(assoc_mgr_cluster_name);
+	assoc_mgr_association_list = NULL;
+	assoc_mgr_qos_list = NULL;
+	assoc_mgr_user_list = NULL;
 	assoc_mgr_wckey_list = NULL;
 
 	return SLURM_SUCCESS;
 }
 
+extern int assoc_mgr_get_user_assocs(void *db_conn,
+				     acct_association_rec_t *assoc,
+				     int enforce, 
+				     List assoc_list)
+{
+	ListIterator itr = NULL;
+	acct_association_rec_t *found_assoc = NULL;
+	int set = 1;
+
+	xassert(assoc);
+	xassert(assoc->uid != (uint32_t)NO_VAL);
+	xassert(assoc_list);
+
+	if(!assoc_mgr_association_list) {
+		if(_get_assoc_mgr_association_list(db_conn, enforce) 
+		   == SLURM_ERROR)
+			return SLURM_ERROR;
+	}
+
+	if((!assoc_mgr_association_list
+	    || !list_count(assoc_mgr_association_list))
+	   && !(enforce & ACCOUNTING_ENFORCE_ASSOCS)) 
+		return SLURM_SUCCESS;
+
+	slurm_mutex_lock(&assoc_mgr_association_lock);
+	itr = list_iterator_create(assoc_mgr_association_list);
+	while((found_assoc = list_next(itr))) {
+		if(assoc->uid != found_assoc->uid) {
+			debug4("not the right user %u != %u",
+			       assoc->uid, found_assoc->uid);
+			continue;
+		}
+
+		list_append(assoc_list, found_assoc);
+		set = 1;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&assoc_mgr_association_lock);
+
+	if(set)
+		return SLURM_SUCCESS;
+	else {
+		debug("user %u does not have any associations", assoc->uid);
+		return SLURM_ERROR;
+	}
+}
+
 extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 				   int enforce, 
 				   acct_association_rec_t **assoc_pptr)
@@ -741,11 +880,13 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 
 	if (assoc_pptr)
 		*assoc_pptr = NULL;
-	if(!local_association_list) {
-		if(_get_local_association_list(db_conn, enforce) == SLURM_ERROR)
+	if(!assoc_mgr_association_list) {
+		if(_get_assoc_mgr_association_list(db_conn, enforce) 
+		   == SLURM_ERROR)
 			return SLURM_ERROR;
 	}
-	if((!local_association_list || !list_count(local_association_list))
+	if((!assoc_mgr_association_list
+	    || !list_count(assoc_mgr_association_list))
 	   && !(enforce & ACCOUNTING_ENFORCE_ASSOCS)) 
 		return SLURM_SUCCESS;
 
@@ -765,7 +906,8 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 			}
 			memset(&user, 0, sizeof(acct_user_rec_t));
 			user.uid = assoc->uid;
-			if(assoc_mgr_fill_in_user(db_conn, &user, enforce) 
+			if(assoc_mgr_fill_in_user(db_conn, &user,
+						  enforce, NULL) 
 			   == SLURM_ERROR) {
 				if(enforce & ACCOUNTING_ENFORCE_ASSOCS) 
 					return SLURM_ERROR;
@@ -778,14 +920,14 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 		}		
 		
 		if(!assoc->cluster)
-			assoc->cluster = local_cluster_name;
+			assoc->cluster = assoc_mgr_cluster_name;
 	}
 /* 	info("looking for assoc of user=%s(%u), acct=%s, " */
 /* 	     "cluster=%s, partition=%s", */
 /* 	     assoc->user, assoc->uid, assoc->acct, */
 /* 	     assoc->cluster, assoc->partition); */
 	slurm_mutex_lock(&assoc_mgr_association_lock);
-	itr = list_iterator_create(local_association_list);
+	itr = list_iterator_create(assoc_mgr_association_list);
 	while((found_assoc = list_next(itr))) {
 		if(assoc->id) {
 			if(assoc->id == found_assoc->id) {
@@ -813,7 +955,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 			}
 
 			/* only check for on the slurmdbd */
-			if(!local_cluster_name && found_assoc->cluster
+			if(!assoc_mgr_cluster_name && found_assoc->cluster
 			   && strcasecmp(assoc->cluster,
 					 found_assoc->cluster)) {
 				debug4("not the right cluster");
@@ -856,7 +998,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 	if(!assoc->partition)
 		assoc->partition = ret_assoc->partition;
 
-	assoc->fairshare       = ret_assoc->fairshare;
+	assoc->shares_raw       = ret_assoc->shares_raw;
 
 	assoc->grp_cpu_mins   = ret_assoc->grp_cpu_mins;
 	assoc->grp_cpus        = ret_assoc->grp_cpus;
@@ -877,7 +1019,6 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 		assoc->parent_acct       = xstrdup(ret_assoc->parent_acct);
 	} else 
 		assoc->parent_acct       = ret_assoc->parent_acct;
-
 	assoc->parent_assoc_ptr          = ret_assoc->parent_assoc_ptr;
 	assoc->parent_id                 = ret_assoc->parent_id;
 
@@ -887,21 +1028,24 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn, acct_association_rec_t *assoc,
 }
 
 extern int assoc_mgr_fill_in_user(void *db_conn, acct_user_rec_t *user,
-				  int enforce)
+				  int enforce,
+				  acct_user_rec_t **user_pptr)
 {
 	ListIterator itr = NULL;
 	acct_user_rec_t * found_user = NULL;
 
-	if(!local_user_list) 
-		if(_get_local_user_list(db_conn, enforce) == SLURM_ERROR)
+	if(user_pptr)
+		*user_pptr = NULL;
+	if(!assoc_mgr_user_list) 
+		if(_get_assoc_mgr_user_list(db_conn, enforce) == SLURM_ERROR) 
 			return SLURM_ERROR;
 
-	if((!local_user_list || !list_count(local_user_list)) 
+	if((!assoc_mgr_user_list || !list_count(assoc_mgr_user_list))
 	   && !(enforce & ACCOUNTING_ENFORCE_ASSOCS)) 
 		return SLURM_SUCCESS;
 
-	slurm_mutex_lock(&local_user_lock);
-	itr = list_iterator_create(local_user_list);
+	slurm_mutex_lock(&assoc_mgr_user_lock);
+	itr = list_iterator_create(assoc_mgr_user_list);
 	while((found_user = list_next(itr))) {
 		if(user->uid != NO_VAL) {
 			if(user->uid == found_user->uid)
@@ -912,18 +1056,119 @@ extern int assoc_mgr_fill_in_user(void *db_conn, acct_user_rec_t *user,
 	}
 	list_iterator_destroy(itr);
 
-	if(found_user) {
-		/* This needs to be here just incase we don't have a
-		   list since it gets checked outside here and needs
-		   to exist. */
-		if(!found_user->coord_accts)
-			found_user->coord_accts = 
-				list_create(destroy_acct_coord_rec);
-		memcpy(user, found_user, sizeof(acct_user_rec_t));		
-		slurm_mutex_unlock(&local_user_lock);
+	if(!found_user) {
+		slurm_mutex_unlock(&assoc_mgr_user_lock);
+		if(enforce) 
+			return SLURM_ERROR;
+		else
+			return SLURM_SUCCESS;
+	}
+
+	debug3("found correct user");	
+	if(user_pptr)
+		*user_pptr = found_user;
+
+	/* create coord_accts just incase the list does not exist */
+	if(!found_user->coord_accts)
+		found_user->coord_accts = list_create(destroy_acct_coord_rec);
+
+	user->admin_level = found_user->admin_level;
+	if(!user->assoc_list)
+		user->assoc_list = found_user->assoc_list;
+	if(!user->coord_accts)
+		user->coord_accts = found_user->coord_accts;
+	if(!user->default_acct)
+		user->default_acct = found_user->default_acct;
+	if(!user->default_wckey)
+		user->default_wckey = found_user->default_wckey;
+	if(!user->name)
+		user->name = found_user->name;
+
+	slurm_mutex_unlock(&assoc_mgr_user_lock);
+	return SLURM_SUCCESS;
+
+}
+
+extern int assoc_mgr_fill_in_qos(void *db_conn, acct_qos_rec_t *qos,
+				 int enforce,
+				 acct_qos_rec_t **qos_pptr)
+{
+	ListIterator itr = NULL;
+	acct_qos_rec_t * found_qos = NULL;
+
+	if(qos_pptr)
+		*qos_pptr = NULL;
+	if(!assoc_mgr_qos_list) 
+		if(_get_assoc_mgr_qos_list(db_conn, enforce) == SLURM_ERROR)
+			return SLURM_ERROR;
+
+	if((!assoc_mgr_qos_list 
+	    || !list_count(assoc_mgr_qos_list)) && !enforce) 
 		return SLURM_SUCCESS;
+
+	slurm_mutex_lock(&assoc_mgr_qos_lock);
+	itr = list_iterator_create(assoc_mgr_qos_list);
+	while((found_qos = list_next(itr))) {
+		if(qos->id == found_qos->id) 
+			break;
+		else if(qos->name && strcasecmp(qos->name, found_qos->name))
+			break;
+	}
+	list_iterator_destroy(itr);
+	
+	if(!found_qos) {
+		slurm_mutex_unlock(&assoc_mgr_qos_lock);
+		if(enforce) 
+			return SLURM_ERROR;
+		else
+			return SLURM_SUCCESS;
 	}
-	slurm_mutex_unlock(&local_user_lock);
+
+	debug3("found correct qos");
+	if (qos_pptr)
+		*qos_pptr = found_qos;
+
+	if(!qos->description)
+		qos->description = found_qos->description;
+
+	qos->id = found_qos->id;
+
+	if(!qos->job_flags)
+		qos->job_flags = found_qos->job_flags;
+
+	if(!qos->job_list)
+		qos->job_list = found_qos->job_list;
+
+	qos->grp_cpu_mins    = found_qos->grp_cpu_mins;
+	qos->grp_cpus        = found_qos->grp_cpus;
+	qos->grp_jobs        = found_qos->grp_jobs;
+	qos->grp_nodes       = found_qos->grp_nodes;
+	qos->grp_submit_jobs = found_qos->grp_submit_jobs;
+	qos->grp_wall        = found_qos->grp_wall;
+
+	qos->max_cpu_mins_pu = found_qos->max_cpu_mins_pu;
+	qos->max_cpus_pu     = found_qos->max_cpus_pu;
+	qos->max_jobs_pu     = found_qos->max_jobs_pu;
+	qos->max_nodes_pu    = found_qos->max_nodes_pu;
+	qos->max_submit_jobs_pu = found_qos->max_submit_jobs_pu;
+	qos->max_wall_pu     = found_qos->max_wall_pu;
+
+	if(!qos->name) 
+		qos->name = found_qos->name;
+
+	qos->norm_priority = found_qos->norm_priority;
+
+	if(!qos->preemptee_list)
+		qos->preemptee_list = found_qos->preemptee_list;
+	if(!qos->preemptor_list)
+		qos->preemptor_list = found_qos->preemptor_list;
+
+	qos->priority = found_qos->priority;
+
+	if(!qos->user_limit_list)
+		qos->user_limit_list = found_qos->user_limit_list;
+
+	slurm_mutex_unlock(&assoc_mgr_qos_lock);
 	return SLURM_ERROR;
 }
 
@@ -962,7 +1207,8 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 			memset(&user, 0, sizeof(acct_user_rec_t));
 			user.uid = wckey->uid;
 			user.name = wckey->user;
-			if(assoc_mgr_fill_in_user(db_conn, &user, enforce) 
+			if(assoc_mgr_fill_in_user(db_conn, &user,
+						  enforce, NULL) 
 			   == SLURM_ERROR) {
 				if(enforce & ACCOUNTING_ENFORCE_WCKEYS) 
 					return SLURM_ERROR;
@@ -986,7 +1232,7 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 			
 		
 		if(!wckey->cluster)
-			wckey->cluster = local_cluster_name;
+			wckey->cluster = assoc_mgr_cluster_name;
 	}
 /* 	info("looking for wckey of user=%s(%u), name=%s, " */
 /* 	     "cluster=%s", */
@@ -1021,7 +1267,7 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, acct_wckey_rec_t *wckey,
 			}
 
 			/* only check for on the slurmdbd */
-			if(!local_cluster_name) {
+			if(!assoc_mgr_cluster_name) {
 				if(!wckey->cluster) {
 					error("No cluster name was given "
 					      "to check against, "
@@ -1076,21 +1322,21 @@ extern acct_admin_level_t assoc_mgr_get_admin_level(void *db_conn,
 	ListIterator itr = NULL;
 	acct_user_rec_t * found_user = NULL;
 
-	if(!local_user_list) 
-		if(_get_local_user_list(db_conn, 0) == SLURM_ERROR)
+	if(!assoc_mgr_user_list) 
+		if(_get_assoc_mgr_user_list(db_conn, 0) == SLURM_ERROR)
 			return ACCT_ADMIN_NOTSET;
 
-	if(!local_user_list) 
+	if(!assoc_mgr_user_list) 
 		return ACCT_ADMIN_NOTSET;
 
-	slurm_mutex_lock(&local_user_lock);
-	itr = list_iterator_create(local_user_list);
+	slurm_mutex_lock(&assoc_mgr_user_lock);
+	itr = list_iterator_create(assoc_mgr_user_list);
 	while((found_user = list_next(itr))) {
 		if(uid == found_user->uid) 
 			break;
 	}
 	list_iterator_destroy(itr);
-	slurm_mutex_unlock(&local_user_lock);
+	slurm_mutex_unlock(&assoc_mgr_user_lock);
 		
 	if(found_user) 
 		return found_user->admin_level;
@@ -1106,15 +1352,15 @@ extern int assoc_mgr_is_user_acct_coord(void *db_conn,
 	acct_coord_rec_t *acct = NULL;
 	acct_user_rec_t * found_user = NULL;
 
-	if(!local_user_list) 
-		if(_get_local_user_list(db_conn, 0) == SLURM_ERROR)
+	if(!assoc_mgr_user_list) 
+		if(_get_assoc_mgr_user_list(db_conn, 0) == SLURM_ERROR)
 			return ACCT_ADMIN_NOTSET;
 
-	if(!local_user_list) 
+	if(!assoc_mgr_user_list) 
 		return ACCT_ADMIN_NOTSET;
 
-	slurm_mutex_lock(&local_user_lock);
-	itr = list_iterator_create(local_user_list);
+	slurm_mutex_lock(&assoc_mgr_user_lock);
+	itr = list_iterator_create(assoc_mgr_user_list);
 	while((found_user = list_next(itr))) {
 		if(uid == found_user->uid) 
 			break;
@@ -1122,7 +1368,7 @@ extern int assoc_mgr_is_user_acct_coord(void *db_conn,
 	list_iterator_destroy(itr);
 		
 	if(!found_user || !found_user->coord_accts) {
-		slurm_mutex_unlock(&local_user_lock);
+		slurm_mutex_unlock(&assoc_mgr_user_lock);
 		return 0;
 	}
 	itr = list_iterator_create(found_user->coord_accts);
@@ -1133,15 +1379,166 @@ extern int assoc_mgr_is_user_acct_coord(void *db_conn,
 	list_iterator_destroy(itr);
 	
 	if(acct) {
-		slurm_mutex_unlock(&local_user_lock);
+		slurm_mutex_unlock(&assoc_mgr_user_lock);
 		return 1;
 	}
-	slurm_mutex_unlock(&local_user_lock);
+	slurm_mutex_unlock(&assoc_mgr_user_lock);
 
 	return 0;	
 }
 
-extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
+extern List assoc_mgr_get_shares(void *db_conn,
+				 uid_t uid, List acct_list, List user_list)
+{
+	ListIterator itr = NULL;
+	ListIterator user_itr = NULL;
+	ListIterator acct_itr = NULL;
+	acct_association_rec_t *assoc = NULL;
+	association_shares_object_t *share = NULL;
+	List ret_list = NULL;
+	char *tmp_char = NULL;
+	acct_user_rec_t user;
+	int is_admin=1;
+	uint16_t private_data = slurm_get_private_data();
+
+	if(!assoc_mgr_association_list
+	   || !list_count(assoc_mgr_association_list))
+		return NULL;
+
+	memset(&user, 0, sizeof(acct_user_rec_t));
+	user.uid = uid;
+	
+	if(user_list && list_count(user_list)) 
+		user_itr = list_iterator_create(user_list);
+
+	if(acct_list && list_count(acct_list)) 
+		acct_itr = list_iterator_create(acct_list);
+
+	if (private_data & PRIVATE_DATA_USAGE) {
+		uint32_t slurm_uid = slurm_get_slurm_user_id();
+		is_admin = 0;
+		/* Check permissions of the requesting user.
+		 */
+		if((uid == slurm_uid || uid == 0)
+		   || assoc_mgr_get_admin_level(db_conn, uid) 
+		   >= ACCT_ADMIN_OPERATOR) 
+			is_admin = 1;	
+		else {
+			assoc_mgr_fill_in_user(db_conn, &user, 1, NULL);
+		}
+	}
+
+	ret_list = list_create(slurm_destroy_association_shares_object);
+
+	slurm_mutex_lock(&assoc_mgr_association_lock);
+	itr = list_iterator_create(assoc_mgr_association_list);
+	while((assoc = list_next(itr))) {		
+		if(user_itr && assoc->user) {
+			while((tmp_char = list_next(user_itr))) {
+				if(!strcasecmp(tmp_char, assoc->user))
+					break;
+			}
+			list_iterator_reset(user_itr);
+			/* not correct user */
+			if(!tmp_char) 
+				continue;
+		}
+
+		if(acct_itr) {
+			while((tmp_char = list_next(acct_itr))) {
+				if(!strcasecmp(tmp_char, assoc->acct))
+					break;
+			}
+			list_iterator_reset(acct_itr);
+			/* not correct account */
+			if(!tmp_char) 
+				continue;
+		}
+
+		if (private_data & PRIVATE_DATA_USAGE) {
+			if(!is_admin) {
+				ListIterator itr = NULL;
+				acct_coord_rec_t *coord = NULL;
+
+				if(assoc->user && 
+				   !strcmp(assoc->user, user.name)) 
+					goto is_user;
+				
+				if(!user.coord_accts) {
+					debug4("This user isn't a coord.");
+					goto bad_user;
+				}
+
+				if(!assoc->acct) {
+					debug("No account name given "
+					      "in association.");
+					goto bad_user;				
+				}
+				
+				itr = list_iterator_create(user.coord_accts);
+				while((coord = list_next(itr))) {
+					if(!strcasecmp(coord->name, 
+						       assoc->acct))
+						break;
+				}
+				list_iterator_destroy(itr);
+				
+				if(coord) 
+					goto is_user;
+				
+			bad_user:
+				continue;
+			}
+		}
+	is_user:
+
+		share = xmalloc(sizeof(association_shares_object_t));
+		list_append(ret_list, share);
+
+		share->assoc_id = assoc->id;
+		share->cluster = xstrdup(assoc->cluster);
+
+		if(assoc == assoc_mgr_root_assoc) 
+			share->shares_raw = NO_VAL;
+		else 
+			share->shares_raw = assoc->shares_raw;
+
+		share->shares_norm = assoc->shares_norm;
+		share->usage_raw = (uint64_t)assoc->usage_raw;
+
+		if(assoc->user) {
+			/* We only calculate user effective usage when
+			 * we need it
+			 */
+			if(assoc->usage_efctv == (long double)NO_VAL) 
+				priority_g_set_assoc_usage(assoc);
+			
+			share->name = xstrdup(assoc->user);
+			share->parent = xstrdup(assoc->acct);
+			share->user = 1;
+		} else {
+			share->name = xstrdup(assoc->acct);
+			if(!assoc->parent_acct && assoc->parent_assoc_ptr)
+				share->parent = 
+					xstrdup(assoc->parent_assoc_ptr->acct);
+			else
+				share->parent = xstrdup(assoc->parent_acct);
+		}
+		share->usage_norm = (double)assoc->usage_norm;
+		share->usage_efctv = (double)assoc->usage_efctv;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&assoc_mgr_association_lock);
+	
+	if(user_itr) 
+		list_iterator_destroy(user_itr);
+	if(acct_itr) 
+		list_iterator_destroy(acct_itr);
+		
+	return ret_list;
+}
+
+extern int assoc_mgr_update_assocs(acct_update_object_t *update)
 {
 	acct_association_rec_t * rec = NULL;
 	acct_association_rec_t * object = NULL;
@@ -1150,15 +1547,16 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 	int parents_changed = 0;
 	List remove_list = NULL;
 
-	if(!local_association_list)
+	if(!assoc_mgr_association_list)
 		return SLURM_SUCCESS;
 
 	slurm_mutex_lock(&assoc_mgr_association_lock);
-	itr = list_iterator_create(local_association_list);
+	itr = list_iterator_create(assoc_mgr_association_list);
 	while((object = list_pop(update->objects))) {
-		if(object->cluster && local_cluster_name) {
+		if(object->cluster && assoc_mgr_cluster_name) {
 			/* only update the local clusters assocs */
-			if(strcasecmp(object->cluster, local_cluster_name)) {
+			if(strcasecmp(object->cluster, 
+				      assoc_mgr_cluster_name)) {
 				destroy_acct_association_rec(object);	
 				continue;
 			}
@@ -1197,7 +1595,7 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				}
 
 				/* only check for on the slurmdbd */
-				if(!local_cluster_name && object->cluster
+				if(!assoc_mgr_cluster_name && object->cluster
 				   && (!rec->cluster
 				       || strcasecmp(object->cluster,
 						     rec->cluster))) {
@@ -1215,9 +1613,17 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				break;
 			}
 
-			if(object->fairshare != NO_VAL) 
-				rec->fairshare = object->fairshare;
-			
+			if(object->shares_raw != NO_VAL) {
+				rec->shares_raw = object->shares_raw;
+				if(setup_childern) {
+					/* we need to update the shares on
+					   each sibling and child
+					   association now 
+					*/
+					parents_changed = 1;
+				}
+			}
+
 			if(object->grp_cpu_mins != NO_VAL) 
 				rec->grp_cpu_mins = object->grp_cpu_mins;
 			if(object->grp_cpus != NO_VAL) 
@@ -1254,7 +1660,6 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				// after all new parents have been set we will
 				// reset the parent pointers below
 				parents_changed = 1;
-				
 			}
 
 			if(object->qos_list) {
@@ -1266,11 +1671,12 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 					object->qos_list = NULL;
 				}
 			}
-			if(!slurmdbd_conf) {
+
+			if(!slurmdbd_conf && !parents_changed) {
 				debug("updating assoc %u", rec->id);
-				slurm_mutex_lock(&local_qos_lock);
-				log_assoc_rec(rec, local_qos_list);
-				slurm_mutex_unlock(&local_qos_lock);
+				slurm_mutex_lock(&assoc_mgr_qos_lock);
+				log_assoc_rec(rec, assoc_mgr_qos_list);
+				slurm_mutex_unlock(&assoc_mgr_qos_lock);
 			}
 			break;
 		case ACCT_ADD_ASSOC:
@@ -1278,7 +1684,7 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				//rc = SLURM_ERROR;
 				break;
 			}
-			list_append(local_association_list, object);
+			list_append(assoc_mgr_association_list, object);
 			object = NULL;
 			parents_changed = 1; // set since we need to
 					     // set the parent
@@ -1288,6 +1694,12 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				//rc = SLURM_ERROR;
 				break;
 			}
+
+			if(setup_childern)
+				parents_changed = 1; /* set since we need to
+							set the shares
+							of surrounding childern
+						     */
 			if(remove_assoc_notify) {
 				/* since there are some deadlock
 				   issues while inside our lock here
@@ -1300,7 +1712,6 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				list_append(remove_list, rec);
 			} else
 				list_delete_item(itr);
-
 			break;
 		default:
 			break;
@@ -1314,31 +1725,64 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 	 */
 	if(parents_changed) {
 		int reset = 1;
-		list_sort(local_association_list, 
+		list_sort(assoc_mgr_association_list, 
 			  (ListCmpF)_sort_assoc_dec);
 
 		list_iterator_reset(itr);
+		/* flush the childern lists */
+		if(setup_childern) {
+			while((object = list_next(itr))) {
+				if(object->childern_list)
+					list_flush(object->childern_list);
+			}
+			list_iterator_reset(itr);
+		}
 		while((object = list_next(itr))) {
+			/* The root never changes so just continue
+			   here. */
+			if (object == assoc_mgr_root_assoc)
+				continue;
+
 			/* reset the limits because since a parent
 			   changed we could have different usage
 			*/
 			if(!object->user) {
 				_clear_used_info(object);
-				object->used_shares = 0;
+				object->usage_raw = 0;
+				object->grp_used_wall = 0;
 			}
 			_set_assoc_parent_and_user(
-				object, local_association_list, reset);
+				object, assoc_mgr_association_list, reset);
 			reset = 0;
 		}
 		/* Now that we have set up the parents correctly we
 		   can update the used limits
 		*/
 		list_iterator_reset(itr);
-		while((object = list_next(itr))) {			
+		while((object = list_next(itr))) {
+			if(setup_childern) {
+				int count = 0;
+				ListIterator itr2 = NULL;
+				if(!object->childern_list
+				   || !list_count(object->childern_list))
+					goto is_user;
+				itr2 = list_iterator_create(
+					object->childern_list);
+				while((rec = list_next(itr2))) 
+					count += rec->shares_raw;
+				list_iterator_reset(itr2);
+				while((rec = list_next(itr2))) 
+					rec->level_shares = count;
+				list_iterator_destroy(itr2);
+			}
+		is_user:
 			if(!object->user)
 				continue;
 
 			rec = object;
+			/* look for a parent since we are starting at
+			   the parent instead of the child
+			*/
 			while(object->parent_assoc_ptr) {
 				/* we need to get the parent first
 				   here since we start at the child
@@ -1348,6 +1792,16 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 				_addto_used_info(object, rec);
 			}
 		}
+		if(setup_childern) {
+			/* Now normalize the static shares */
+			slurm_mutex_lock(&assoc_mgr_qos_lock);
+			list_iterator_reset(itr);
+			while((object = list_next(itr))) {
+				_normalize_assoc_shares(object);
+				log_assoc_rec(object, assoc_mgr_qos_list);
+			}
+			slurm_mutex_unlock(&assoc_mgr_qos_lock);
+		}
 	}
 
 	list_iterator_destroy(itr);
@@ -1368,7 +1822,7 @@ extern int assoc_mgr_update_local_assocs(acct_update_object_t *update)
 	return rc;	
 }
 
-extern int assoc_mgr_update_local_wckeys(acct_update_object_t *update)
+extern int assoc_mgr_update_wckeys(acct_update_object_t *update)
 {
 	acct_wckey_rec_t * rec = NULL;
 	acct_wckey_rec_t * object = NULL;
@@ -1382,9 +1836,9 @@ extern int assoc_mgr_update_local_wckeys(acct_update_object_t *update)
 	slurm_mutex_lock(&assoc_mgr_wckey_lock);
 	itr = list_iterator_create(assoc_mgr_wckey_list);
 	while((object = list_pop(update->objects))) {
-		if(object->cluster && local_cluster_name) {
+		if(object->cluster && assoc_mgr_cluster_name) {
 			/* only update the local clusters assocs */
-			if(strcasecmp(object->cluster, local_cluster_name)) {
+			if(strcasecmp(object->cluster, assoc_mgr_cluster_name)) {
 				destroy_acct_wckey_rec(object);	
 				continue;
 			}
@@ -1411,7 +1865,7 @@ extern int assoc_mgr_update_local_wckeys(acct_update_object_t *update)
 				}
 				
 				/* only check for on the slurmdbd */
-				if(!local_cluster_name && object->cluster
+				if(!assoc_mgr_cluster_name && object->cluster
 				   && (!rec->cluster
 				       || strcasecmp(object->cluster,
 						     rec->cluster))) {
@@ -1465,7 +1919,7 @@ extern int assoc_mgr_update_local_wckeys(acct_update_object_t *update)
 	return rc;	
 }
 
-extern int assoc_mgr_update_local_users(acct_update_object_t *update)
+extern int assoc_mgr_update_users(acct_update_object_t *update)
 {
 	acct_user_rec_t * rec = NULL;
 	acct_user_rec_t * object = NULL;
@@ -1474,11 +1928,11 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 	int rc = SLURM_SUCCESS;
 	uid_t pw_uid;
 
-	if(!local_user_list)
+	if(!assoc_mgr_user_list)
 		return SLURM_SUCCESS;
 
-	slurm_mutex_lock(&local_user_lock);
-	itr = list_iterator_create(local_user_list);
+	slurm_mutex_lock(&assoc_mgr_user_lock);
+	itr = list_iterator_create(assoc_mgr_user_list);
 	while((object = list_pop(update->objects))) {
 		list_iterator_reset(itr);
 		while((rec = list_next(itr))) {
@@ -1522,7 +1976,7 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 				object->uid = NO_VAL;
 			} else
 				object->uid = pw_uid;
-			list_append(local_user_list, object);
+			list_append(assoc_mgr_user_list, object);
 			object = NULL;
 			break;
 		case ACCT_REMOVE_USER:
@@ -1557,12 +2011,12 @@ extern int assoc_mgr_update_local_users(acct_update_object_t *update)
 		destroy_acct_user_rec(object);			
 	}
 	list_iterator_destroy(itr);
-	slurm_mutex_unlock(&local_user_lock);
+	slurm_mutex_unlock(&assoc_mgr_user_lock);
 
 	return rc;	
 }
 
-extern int assoc_mgr_update_local_qos(acct_update_object_t *update)
+extern int assoc_mgr_update_qos(acct_update_object_t *update)
 {
 	acct_qos_rec_t *rec = NULL;
 	acct_qos_rec_t *object = NULL;
@@ -1574,11 +2028,11 @@ extern int assoc_mgr_update_local_qos(acct_update_object_t *update)
 	acct_association_rec_t *assoc = NULL;
 	int rc = SLURM_SUCCESS;
 
-	if(!local_qos_list)
+	if(!assoc_mgr_qos_list)
 		return SLURM_SUCCESS;
 
-	slurm_mutex_lock(&local_qos_lock);
-	itr = list_iterator_create(local_qos_list);
+	slurm_mutex_lock(&assoc_mgr_qos_lock);
+	itr = list_iterator_create(assoc_mgr_qos_list);
 	while((object = list_pop(update->objects))) {
 		list_iterator_reset(itr);
 		while((rec = list_next(itr))) {
@@ -1594,7 +2048,7 @@ extern int assoc_mgr_update_local_qos(acct_update_object_t *update)
 				//rc = SLURM_ERROR;
 				break;
 			}
-			list_append(local_qos_list, object);
+			list_append(assoc_mgr_qos_list, object);
 			object = NULL;			
 			break;
 		case ACCT_MODIFY_QOS:
@@ -1607,7 +2061,7 @@ extern int assoc_mgr_update_local_qos(acct_update_object_t *update)
 			tmp_char = xstrdup_printf("%d", object->id);
 			slurm_mutex_lock(&assoc_mgr_association_lock);
 			assoc_itr = list_iterator_create(
-				local_association_list);
+				assoc_mgr_association_list);
 			while((assoc = list_next(assoc_itr))) {
 				if(!assoc->qos_list
 				   || !list_count(assoc->qos_list))
@@ -1637,7 +2091,7 @@ extern int assoc_mgr_update_local_qos(acct_update_object_t *update)
 		destroy_acct_qos_rec(object);			
 	}
 	list_iterator_destroy(itr);
-	slurm_mutex_unlock(&local_qos_lock);
+	slurm_mutex_unlock(&assoc_mgr_qos_lock);
 
 	return rc;	
 }
@@ -1649,16 +2103,18 @@ extern int assoc_mgr_validate_assoc_id(void *db_conn,
 	ListIterator itr = NULL;
 	acct_association_rec_t * found_assoc = NULL;
 
-	if(!local_association_list) 
-		if(_get_local_association_list(db_conn, enforce) == SLURM_ERROR)
+	if(!assoc_mgr_association_list) 
+		if(_get_assoc_mgr_association_list(db_conn, enforce) 
+		   == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if((!local_association_list || !list_count(local_association_list))
+	if((!assoc_mgr_association_list
+	    || !list_count(assoc_mgr_association_list))
 	   && !(enforce & ACCOUNTING_ENFORCE_ASSOCS)) 
 		return SLURM_SUCCESS;
 	
 	slurm_mutex_lock(&assoc_mgr_association_lock);
-	itr = list_iterator_create(local_association_list);
+	itr = list_iterator_create(assoc_mgr_association_list);
 	while((found_assoc = list_next(itr))) {
 		if(assoc_id == found_assoc->id) 
 			break;
@@ -1677,11 +2133,11 @@ extern void assoc_mgr_clear_used_info(void)
 	ListIterator itr = NULL;
 	acct_association_rec_t * found_assoc = NULL;
 
-	if (!local_association_list)
+	if (!assoc_mgr_association_list)
 		return;
 
 	slurm_mutex_lock(&assoc_mgr_association_lock);
-	itr = list_iterator_create(local_association_list);
+	itr = list_iterator_create(assoc_mgr_association_list);
 	while((found_assoc = list_next(itr))) {
 		_clear_used_info(found_assoc);
 	}
@@ -1703,10 +2159,10 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 	pack16(SLURMDBD_VERSION, buffer);
 	pack_time(time(NULL), buffer);
 
-	if(local_association_list) {
+	if(assoc_mgr_association_list) {
 		memset(&msg, 0, sizeof(dbd_list_msg_t));
 		slurm_mutex_lock(&assoc_mgr_association_lock);
-		msg.my_list = local_association_list;
+		msg.my_list = assoc_mgr_association_list;
 		/* let us know what to unpack */
 		pack16(DBD_ADD_ASSOCS, buffer);
 		slurmdbd_pack_list_msg(SLURMDBD_VERSION, 
@@ -1714,26 +2170,26 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 		slurm_mutex_unlock(&assoc_mgr_association_lock);
 	}
 	
-	if(local_user_list) {
+	if(assoc_mgr_user_list) {
 		memset(&msg, 0, sizeof(dbd_list_msg_t));
-		slurm_mutex_lock(&local_user_lock);
-		msg.my_list = local_user_list;
+		slurm_mutex_lock(&assoc_mgr_user_lock);
+		msg.my_list = assoc_mgr_user_list;
 		/* let us know what to unpack */
 		pack16(DBD_ADD_USERS, buffer);
 		slurmdbd_pack_list_msg(SLURMDBD_VERSION, 
 				       DBD_ADD_USERS, &msg, buffer);
-		slurm_mutex_unlock(&local_user_lock);
+		slurm_mutex_unlock(&assoc_mgr_user_lock);
 	}
 
-	if(local_qos_list) {		
+	if(assoc_mgr_qos_list) {		
 		memset(&msg, 0, sizeof(dbd_list_msg_t));
-		slurm_mutex_lock(&local_qos_lock);
-		msg.my_list = local_qos_list;
+		slurm_mutex_lock(&assoc_mgr_qos_lock);
+		msg.my_list = assoc_mgr_qos_list;
 		/* let us know what to unpack */
 		pack16(DBD_ADD_QOS, buffer);
 		slurmdbd_pack_list_msg(SLURMDBD_VERSION, 
 				       DBD_ADD_QOS, &msg, buffer);	
-		slurm_mutex_unlock(&local_qos_lock);
+		slurm_mutex_unlock(&assoc_mgr_qos_lock);
 	}
 
 	if(assoc_mgr_wckey_list) {		
@@ -1748,14 +2204,78 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 	}
 
 	/* write the buffer to file */
-	old_file = xstrdup(state_save_location);
-	xstrcat(old_file, "/assoc_mgr_state.old");
-	reg_file = xstrdup(state_save_location);
-	xstrcat(reg_file, "/assoc_mgr_state");
-	new_file = xstrdup(state_save_location);
-	xstrcat(new_file, "/assoc_mgr_state.new");
+	reg_file = xstrdup_printf("%s/assoc_mgr_state", state_save_location);
+	old_file = xstrdup_printf("%s.old", reg_file);
+	new_file = xstrdup_printf("%s.new", reg_file);
+	
+	slurm_mutex_lock(&assoc_mgr_file_lock);
+	log_fd = creat(new_file, 0600);
+	if (log_fd == 0) {
+		error("Can't save state, create file %s error %m",
+		      new_file);
+		error_code = errno;
+	} else {
+		int pos = 0, nwrite = get_buf_offset(buffer), amount;
+		char *data = (char *)get_buf_data(buffer);
+		high_buffer_size = MAX(nwrite, high_buffer_size);
+		while (nwrite > 0) {
+			amount = write(log_fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				error("Error writing file %s, %m", new_file);
+				error_code = errno;
+				break;
+			}
+			nwrite -= amount;
+			pos    += amount;
+		}
+		fsync(log_fd);
+		close(log_fd);
+	}
+	if (error_code)
+		(void) unlink(new_file);
+	else {			/* file shuffle */
+		(void) unlink(old_file);
+		(void) link(reg_file, old_file);
+		(void) unlink(reg_file);
+		(void) link(new_file, reg_file);
+		(void) unlink(new_file);
+	}
+	xfree(old_file);
+	xfree(reg_file);
+	xfree(new_file);
+
+	free_buf(buffer);
+	/* now make a file for assoc_usage */
+
+	buffer = init_buf(high_buffer_size);
+	/* write header: version, time */
+	pack16(ASSOC_USAGE_VERSION, buffer);
+	pack_time(time(NULL), buffer);
+
+	if(assoc_mgr_association_list) {
+		ListIterator itr = NULL;
+		acct_association_rec_t *assoc = NULL;
+		slurm_mutex_lock(&assoc_mgr_association_lock);
+		itr = list_iterator_create(assoc_mgr_association_list);
+		while((assoc = list_next(itr))) {
+			if(!assoc->user)
+				continue;
+			
+			pack32(assoc->id, buffer);
+			/* we only care about the main part here so
+			   anything under 1 we are dropping 
+			*/
+			pack64((uint64_t)assoc->usage_raw, buffer);
+			pack32(assoc->grp_used_wall, buffer);
+		}
+		list_iterator_destroy(itr);
+		slurm_mutex_unlock(&assoc_mgr_association_lock);
+	}
+
+	reg_file = xstrdup_printf("%s/assoc_usage", state_save_location);
+	old_file = xstrdup_printf("%s.old", reg_file);
+	new_file = xstrdup_printf("%s.new", reg_file);
 	
-	slurm_mutex_lock(&local_file_lock);
 	log_fd = creat(new_file, 0600);
 	if (log_fd == 0) {
 		error("Can't save state, create file %s error %m",
@@ -1790,7 +2310,7 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 	xfree(old_file);
 	xfree(reg_file);
 	xfree(new_file);
-	slurm_mutex_unlock(&local_file_lock);
+	slurm_mutex_unlock(&assoc_mgr_file_lock);
 	
 	free_buf(buffer);
 	END_TIMER2("dump_assoc_mgr_state");
@@ -1798,6 +2318,110 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 
 }
 
+extern int load_assoc_usage(char *state_save_location)
+{
+	int data_allocated, data_read = 0, error_code = SLURM_SUCCESS;
+	uint32_t data_size = 0;
+	uint16_t ver = 0;
+	int state_fd;
+	char *data = NULL, *state_file;
+	Buf buffer;
+	time_t buf_time;
+	ListIterator itr = NULL;
+
+	if(!assoc_mgr_association_list)
+		return SLURM_SUCCESS;
+
+	/* read the file */
+	state_file = xstrdup(state_save_location);
+	xstrcat(state_file, "/assoc_usage");
+	//info("looking at the %s file", state_file);
+	slurm_mutex_lock(&assoc_mgr_file_lock);
+	state_fd = open(state_file, O_RDONLY);
+	if (state_fd < 0) {
+		debug2("No Assoc usage file (%s) to recover", state_file);
+		error_code = ENOENT;
+	} else {
+		data_allocated = BUF_SIZE;
+		data = xmalloc(data_allocated);
+		while (1) {
+			data_read = read(state_fd, &data[data_size],
+					 BUF_SIZE);
+			if (data_read < 0) {
+				if (errno == EINTR)
+					continue;
+				else {
+					error("Read error on %s: %m", 
+					      state_file);
+					break;
+				}
+			} else if (data_read == 0)	/* eof */
+				break;
+			data_size      += data_read;
+			data_allocated += data_read;
+			xrealloc(data, data_allocated);
+		}
+		close(state_fd);
+	}
+	xfree(state_file);
+	slurm_mutex_unlock(&assoc_mgr_file_lock);
+
+	buffer = create_buf(data, data_size);
+
+	safe_unpack16(&ver, buffer);
+	debug3("Version in assoc_mgr_state header is %u", ver);
+	if (ver != ASSOC_USAGE_VERSION) {
+		error("***********************************************");
+		error("Can not recover usage_mgr state, incompatable version, "
+		      "got %u need %u", ver, ASSOC_USAGE_VERSION);
+		error("***********************************************");
+		free_buf(buffer);
+		return EFAULT;
+	}
+
+	safe_unpack_time(&buf_time, buffer);
+	
+	slurm_mutex_lock(&assoc_mgr_association_lock);
+	itr = list_iterator_create(assoc_mgr_association_list);
+	while (remaining_buf(buffer) > 0) {
+		uint32_t assoc_id = 0;
+		uint32_t grp_used_wall = 0;
+		uint64_t usage_raw = 0;
+		acct_association_rec_t *assoc = NULL;
+
+		safe_unpack32(&assoc_id, buffer);
+		safe_unpack64(&usage_raw, buffer);
+		safe_unpack32(&grp_used_wall, buffer);
+		while((assoc = list_next(itr))) 
+			if(assoc->id == assoc_id)
+				break;
+		
+		while(assoc) {
+			assoc->grp_used_wall += grp_used_wall;
+			assoc->usage_raw += (long double)usage_raw;
+
+			assoc = assoc->parent_assoc_ptr;
+			if(assoc == assoc_mgr_root_assoc)
+				break;
+		}
+		list_iterator_reset(itr);
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&assoc_mgr_association_lock);
+			
+	free_buf(buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	if(buffer)
+		free_buf(buffer);
+	if(itr) {
+		list_iterator_destroy(itr);
+		slurm_mutex_unlock(&assoc_mgr_association_lock);
+	}
+	return SLURM_ERROR;
+}
+
 extern int load_assoc_mgr_state(char *state_save_location)
 {
 	int data_allocated, data_read = 0, error_code = SLURM_SUCCESS;
@@ -1814,10 +2438,10 @@ extern int load_assoc_mgr_state(char *state_save_location)
 	state_file = xstrdup(state_save_location);
 	xstrcat(state_file, "/assoc_mgr_state");
 	//info("looking at the %s file", state_file);
-	slurm_mutex_lock(&local_file_lock);
+	slurm_mutex_lock(&assoc_mgr_file_lock);
 	state_fd = open(state_file, O_RDONLY);
 	if (state_fd < 0) {
-		info("No job state file (%s) to recover", state_file);
+		debug2("No association state file (%s) to recover", state_file);
 		error_code = ENOENT;
 	} else {
 		data_allocated = BUF_SIZE;
@@ -1842,7 +2466,7 @@ extern int load_assoc_mgr_state(char *state_save_location)
 		close(state_fd);
 	}
 	xfree(state_file);
-	slurm_mutex_unlock(&local_file_lock);
+	slurm_mutex_unlock(&assoc_mgr_file_lock);
 
 	buffer = create_buf(data, data_size);
 
@@ -1850,7 +2474,9 @@ extern int load_assoc_mgr_state(char *state_save_location)
 	debug3("Version in assoc_mgr_state header is %u", ver);
 	if (ver > SLURMDBD_VERSION || ver < SLURMDBD_VERSION_MIN) {
 		error("***********************************************");
-		error("Can not recover assoc_mgr state, incompatable version, got %u need > %u <= %u", ver, SLURMDBD_VERSION_MIN, SLURMDBD_VERSION);
+		error("Can not recover assoc_mgr state, incompatable version, "
+		      "got %u need > %u <= %u", ver,
+		      SLURMDBD_VERSION_MIN, SLURMDBD_VERSION);
 		error("***********************************************");
 		free_buf(buffer);
 		return EFAULT;
@@ -1870,12 +2496,13 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				break;
 			}
 			slurm_mutex_lock(&assoc_mgr_association_lock);
-			if(local_association_list)
-				list_destroy(local_association_list);
-			local_association_list = msg->my_list;
-			_post_association_list(local_association_list);
+			if(assoc_mgr_association_list)
+				list_destroy(assoc_mgr_association_list);
+			assoc_mgr_association_list = msg->my_list;
+			_post_association_list(assoc_mgr_association_list);
+
 			debug("Recovered %u associations", 
-			      list_count(local_association_list));
+			      list_count(assoc_mgr_association_list));
 			slurm_mutex_unlock(&assoc_mgr_association_lock);
 			msg->my_list = NULL;
 			slurmdbd_free_list_msg(SLURMDBD_VERSION, msg);
@@ -1889,14 +2516,14 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				error("No users retrieved");
 				break;
 			}
-			slurm_mutex_lock(&local_user_lock);
-			if(local_user_list)
-				list_destroy(local_user_list);
-			local_user_list = msg->my_list;
-			_post_user_list(local_user_list);
+			slurm_mutex_lock(&assoc_mgr_user_lock);
+			if(assoc_mgr_user_list)
+				list_destroy(assoc_mgr_user_list);
+			assoc_mgr_user_list = msg->my_list;
+			_post_user_list(assoc_mgr_user_list);
 			debug("Recovered %u users", 
-			      list_count(local_user_list));
-			slurm_mutex_unlock(&local_user_lock);
+			      list_count(assoc_mgr_user_list));
+			slurm_mutex_unlock(&assoc_mgr_user_lock);
 			msg->my_list = NULL;
 			slurmdbd_free_list_msg(SLURMDBD_VERSION, msg);
 			break;
@@ -1909,13 +2536,13 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				error("No qos retrieved");
 				break;
 			}
-			slurm_mutex_lock(&local_qos_lock);
-			if(local_qos_list)
-				list_destroy(local_qos_list);
-			local_qos_list = msg->my_list;
+			slurm_mutex_lock(&assoc_mgr_qos_lock);
+			if(assoc_mgr_qos_list)
+				list_destroy(assoc_mgr_qos_list);
+			assoc_mgr_qos_list = msg->my_list;
 			debug("Recovered %u qos", 
-			      list_count(local_qos_list));
-			slurm_mutex_unlock(&local_qos_lock);
+			      list_count(assoc_mgr_qos_list));
+			slurm_mutex_unlock(&assoc_mgr_qos_lock);
 			msg->my_list = NULL;
 			slurmdbd_free_list_msg(SLURMDBD_VERSION, msg);	
 			break;
@@ -1970,25 +2597,27 @@ extern int assoc_mgr_refresh_lists(void *db_conn, assoc_init_args_t *args)
 		return SLURM_SUCCESS;
 	}
 
-	if(cache_level & ASSOC_MGR_CACHE_ASSOC) 
-		if(_refresh_local_association_list(db_conn, enforce)
+	if(cache_level & ASSOC_MGR_CACHE_ASSOC) {
+		if(_refresh_assoc_mgr_association_list(db_conn, enforce)
 		   == SLURM_ERROR)
 			return SLURM_ERROR;
-
+	}
 	if(cache_level & ASSOC_MGR_CACHE_QOS)
-		if(_refresh_local_qos_list(db_conn, enforce) == SLURM_ERROR)
+		if(_refresh_assoc_mgr_qos_list(db_conn, enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
 	if(cache_level & ASSOC_MGR_CACHE_USER)
-		if(_refresh_local_user_list(db_conn, enforce) == SLURM_ERROR)
+		if(_refresh_assoc_mgr_user_list(db_conn, enforce)
+		   == SLURM_ERROR)
 			return SLURM_ERROR;
 
 	if(cache_level & ASSOC_MGR_CACHE_WCKEY)
-		if(_refresh_local_wckey_list(db_conn, enforce) == SLURM_ERROR)
+		if(_refresh_assoc_wckey_list(db_conn, enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
 	running_cache = 0;
-	
-	return SLURM_SUCCESS;
+
+	return SLURM_SUCCESS;	
 }
 
+
diff --git a/src/common/assoc_mgr.h b/src/common/assoc_mgr.h
index 6c8414d4a583d411dba03b8cef4d681f77f31d66..a2c3e9dc15e1439dc2bd32378d1a6ad6fef2899f 100644
--- a/src/common/assoc_mgr.h
+++ b/src/common/assoc_mgr.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -62,7 +63,35 @@ typedef struct {
  	void (*remove_assoc_notify) (acct_association_rec_t *rec);
 } assoc_init_args_t;
 
+extern List assoc_mgr_association_list;
+extern List assoc_mgr_qos_list;
+extern List assoc_mgr_user_list;
+extern List assoc_mgr_wckey_list;
+
+extern acct_association_rec_t *assoc_mgr_root_assoc;
 extern pthread_mutex_t assoc_mgr_association_lock;
+extern pthread_mutex_t assoc_mgr_qos_lock;
+extern pthread_mutex_t assoc_mgr_user_lock;
+extern pthread_mutex_t assoc_mgr_file_lock;
+extern pthread_mutex_t assoc_mgr_wckey_lock;
+
+/* 
+ * get info from the storage 
+ * IN:  assoc - acct_association_rec_t with at least cluster and
+ *		    account set for account association.  To get user
+ *		    association set user, and optional partition.
+ *		    Sets "id" field with the association ID.
+ * IN: enforce - return an error if no such association exists
+ * IN/OUT: assoc_list - contains a list of assoc_rec ptrs to
+ *                      associations this user has in the list.  This
+ *                      list should be created with list_create(NULL)
+ *                      since we are putting pointers to memory used elsewhere.
+ * RET: SLURM_SUCCESS on success, else SLURM_ERROR
+ */
+extern int assoc_mgr_get_user_assocs(void *db_conn,
+				     acct_association_rec_t *assoc,
+				     int enforce, 
+				     List assoc_list);
 
 /* 
  * get info from the storage 
@@ -73,6 +102,7 @@ extern pthread_mutex_t assoc_mgr_association_lock;
  * IN: enforce - return an error if no such association exists
  * IN/OUT: assoc_pptr - if non-NULL then return a pointer to the 
  *			acct_association record in cache on success
+ *                      DO NOT FREE.
  * RET: SLURM_SUCCESS on success, else SLURM_ERROR
  */
 extern int assoc_mgr_fill_in_assoc(void *db_conn,
@@ -85,11 +115,26 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn,
  * IN/OUT:  user - acct_user_rec_t with the name set of the user.
  *                 "default_account" will be filled in on
  *                 successful return DO NOT FREE.
+ * IN/OUT: user_pptr - if non-NULL then return a pointer to the 
+ *		       acct_user record in cache on success
+ *                     DO NOT FREE.
  * RET: SLURM_SUCCESS on success SLURM_ERROR else
  */
 extern int assoc_mgr_fill_in_user(void *db_conn, acct_user_rec_t *user,
-				  int enforce);
+				  int enforce,
+				  acct_user_rec_t **user_pptr);
 
+/* 
+ * get info from the storage 
+ * IN/OUT:  qos - acct_qos_rec_t with the id set of the qos.
+ * IN/OUT:  qos_pptr - if non-NULL then return a pointer to the 
+ *		       acct_qos record in cache on success
+ *                     DO NOT FREE.
+ * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ */
+extern int assoc_mgr_fill_in_qos(void *db_conn, acct_qos_rec_t *qos,
+				 int enforce,
+				 acct_qos_rec_t **qos_pptr);
 /* 
  * get info from the storage 
  * IN/OUT:  wckey - acct_wckey_rec_t with the name, cluster and user
@@ -125,33 +170,43 @@ extern int assoc_mgr_is_user_acct_coord(void *db_conn, uint32_t uid,
 extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args);
 extern int assoc_mgr_fini(char *state_save_location);
 
+/*
+ * get the share information from the association list in the form of
+ * a list containing association_share_object_t's 
+ * IN: uid: uid_t of user issuing the request
+ * IN: acct_list: char * list of accounts you want (NULL for all)
+ * IN: user_list: char * list of user names you want (NULL for all)
+ */
+extern List assoc_mgr_get_shares(
+	void *db_conn, uid_t uid, List acct_list, List user_list);
+
 /* 
- * update associations in local cache 
+ * update associations in cache 
  * IN:  acct_update_object_t *object
  * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
  */
-extern int assoc_mgr_update_local_assocs(acct_update_object_t *update);
+extern int assoc_mgr_update_assocs(acct_update_object_t *update);
 
 /* 
- * update wckeys in local cache 
+ * update wckeys in cache 
  * IN:  acct_update_object_t *object
  * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
  */
-extern int assoc_mgr_update_local_wckeys(acct_update_object_t *update);
+extern int assoc_mgr_update_wckeys(acct_update_object_t *update);
 
 /* 
- * update qos in local cache 
+ * update qos in cache 
  * IN:  acct_update_object_t *object
  * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
  */
-extern int assoc_mgr_update_local_qos(acct_update_object_t *update);
+extern int assoc_mgr_update_qos(acct_update_object_t *update);
 
 /* 
- * update users in local cache 
+ * update users in cache 
  * IN:  acct_update_object_t *object
  * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
  */
-extern int assoc_mgr_update_local_users(acct_update_object_t *update);
+extern int assoc_mgr_update_users(acct_update_object_t *update);
 
 /* 
  * validate that an association ID is still valid 
@@ -177,6 +232,12 @@ extern void assoc_mgr_clear_used_info(void);
  */
 extern int dump_assoc_mgr_state(char *state_save_location);
 
+/*
+ * Read in the usage for association if the database
+ * is up when starting.
+ */
+extern int load_assoc_usage(char *state_save_location);
+
 /*
  * Read in the information of the association mgr if the database
  * isn't up when starting.
diff --git a/src/common/basil_resv_conf.c b/src/common/basil_resv_conf.c
new file mode 100644
index 0000000000000000000000000000000000000000..7f185cd7b15ed861bbcd36d5a5f95814f7f75ae4
--- /dev/null
+++ b/src/common/basil_resv_conf.c
@@ -0,0 +1,85 @@
+/*****************************************************************************\
+ *  basil_resv_conf.h - user interface to BASIL for confirming a resource
+ *	reservation. BASIL is Cray's Batch Application Scheduler Interface 
+ *	Layer.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#  if HAVE_INTTYPES_H
+#    include <inttypes.h>
+#  else
+#    if HAVE_STDINT_H
+#      include <stdint.h>
+#    endif
+#  endif		/* HAVE_INTTYPES_H */
+#endif
+
+#include <slurm/slurm_errno.h>
+
+#include "src/common/log.h"
+
+#define BASIL_DEBUG 1
+
+/*
+ * basil_resv_conf - confirm a previously created BASIL resource reservation.
+ *	This must be called from the same container from which the user 
+ *	application is to run. The container is normally a Linux Process
+ *	Group or SGI Process Aggregate (see http://oss.sgi.com/projects/pagg).
+ * IN reservation_id - ID of reservation conform
+ * IN job_id - SLURM job ID
+ * RET 0 or error code
+ */
+extern int basil_resv_conf(char *reservation_id, uint32_t job_id)
+{
+	int error_code = SLURM_SUCCESS;
+#ifdef HAVE_CRAY_XT
+#ifdef APBASIL_LOC
+	/* Issue the BASIL CONFIRM request */
+	if (request_failure) {
+		error("basil confirm of %s error: %s", reservation_id, "TBD");
+		return SLURM_ERROR;
+	}
+	debug("basil confirm of reservation %s by job %u complete", 
+	      reservation_id, job_id);
+#else
+	debug("basil confirm of reservation %s by job %u complete",
+	      reservation_id, job_id);
+#endif	/* APBASIL_LOC */
+#endif	/* HAVE_CRAY_XT */
+	return error_code;
+}
diff --git a/src/common/basil_resv_conf.h b/src/common/basil_resv_conf.h
new file mode 100644
index 0000000000000000000000000000000000000000..ab72578f9a4e0c6e9c689457a77a882105428c0c
--- /dev/null
+++ b/src/common/basil_resv_conf.h
@@ -0,0 +1,66 @@
+/*****************************************************************************\
+ *  basil_resv_conf.h - user interface to BASIL for confirming a resource
+ *	reservation. BASIL is Cray's Batch Application Scheduler Interface 
+ *	Layer.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _HAVE_BASIL_RESV_CONF_H
+#define _HAVE_BASIL_RESV_CONF_H
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#  if HAVE_INTTYPES_H
+#    include <inttypes.h>
+#  else
+#    if HAVE_STDINT_H
+#      include <stdint.h>
+#    endif
+#  endif		/* HAVE_INTTYPES_H */
+#endif
+
+/*
+ * basil_resv_conf - confirm a previously created BASIL resource reservation.
+ *	This must be called from the same container from which the user 
+ *	application is to run. The container is normally a Linux Process
+ *	Group or SGI Process Aggregate (see http://oss.sgi.com/projects/pagg).
+ * IN reservation_id - ID of reservation conform
+ * IN job_id - SLURM job ID
+ * RET 0 or error code
+ */
+extern int basil_resv_conf(char *reservation_id, uint32_t job_id);
+
+#endif	/* !_HAVE_BASIL_RESV_CONF_H */
diff --git a/src/common/bitstring.c b/src/common/bitstring.c
index a1ecb9473eb745a9c3727ecb87cca5a9eac57700..e99faa631688bff430080071ea91a3bcc9ee74d3 100644
--- a/src/common/bitstring.c
+++ b/src/common/bitstring.c
@@ -7,10 +7,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jim Garlick <garlick@llnl.gov>, Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -654,12 +655,21 @@ int
 bit_set_count(bitstr_t *b)
 {
 	int count = 0;
-	bitoff_t bit;
+	bitoff_t bit, bit_cnt;
+	int word_size = sizeof(bitstr_t) * 8;
 
 	_assert_bitstr_valid(b);
 
-	for (bit = 0; bit < _bitstr_bits(b); bit += sizeof(bitstr_t)*8)
+	bit_cnt = _bitstr_bits(b);
+	for (bit = 0; bit < bit_cnt; bit += word_size) {
+		if ((bit + word_size - 1) >= bit_cnt)
+			break;
 		count += hweight(b[_bit_word(bit)]);
+	}
+	for ( ; bit < bit_cnt; bit++) {
+		if (bit_test(b, bit))
+			count++;
+	}
 
 	return count;
 }
@@ -671,14 +681,23 @@ extern int
 bit_overlap(bitstr_t *b1, bitstr_t *b2)
 {
 	int count = 0;
-	bitoff_t bit;
-	
+	bitoff_t bit, bit_cnt;
+	int word_size = sizeof(bitstr_t) * 8;
+
 	_assert_bitstr_valid(b1);
 	_assert_bitstr_valid(b2);
 	assert(_bitstr_bits(b1) == _bitstr_bits(b2));
 
-	for (bit = 0; bit < _bitstr_bits(b1); bit += sizeof(bitstr_t)*8) 
+	bit_cnt = _bitstr_bits(b1);
+	for (bit = 0; bit < bit_cnt; bit += word_size) {
+		if ((bit + word_size - 1) >= bit_cnt)
+			break;
 		count += hweight(b1[_bit_word(bit)] & b2[_bit_word(bit)]);
+	}
+	for ( ; bit < bit_cnt; bit++) {
+		if (bit_test(b1, bit) && bit_test(b2, bit))
+			count++;
+	}
 
 	return count;
 }
@@ -824,6 +843,7 @@ bitstr_t *
 bit_pick_cnt(bitstr_t *b, bitoff_t nbits) {
 	bitoff_t bit = 0, new_bits, count = 0;
 	bitstr_t *new;
+	int word_size = sizeof(bitstr_t) * 8;
 
 	_assert_bitstr_valid(b);
 
@@ -838,15 +858,16 @@ bit_pick_cnt(bitstr_t *b, bitoff_t nbits) {
 		int word = _bit_word(bit);
 
 		if (b[word] == 0) {
-			bit += sizeof(bitstr_t)*8;
+			bit += word_size;
 			continue;
 		}
 
 		new_bits = hweight(b[word]);
-		if ((count + new_bits) <= nbits) {
+		if (((count + new_bits) <= nbits) && 
+		    ((bit + word_size - 1) < _bitstr_bits(b))) {
 			new[word] = b[word];
 			count += new_bits;
-			bit += sizeof(bitstr_t)*8;
+			bit += word_size;
 			continue;
 		}
 		while ((bit < _bitstr_bits(b)) && (count < nbits)) {
@@ -930,6 +951,8 @@ bit_unfmt(bitstr_t *b, char *str)
 	int *intvec, *p, rc = 0; 
 
 	_assert_bitstr_valid(b);
+	if (str[0] == '\0')	/* no bits set */
+		return rc;
 	intvec = bitfmt2int(str);
 	if (intvec == NULL) 
 		return -1;
diff --git a/src/common/bitstring.h b/src/common/bitstring.h
index e937b5725c846a74561c344c7847019d81bb3afb..b34d7cb2b2d0e65faea1f52b9420127921fd2e7e 100644
--- a/src/common/bitstring.h
+++ b/src/common/bitstring.h
@@ -8,10 +8,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jim Garlick <garlick@llnl.gov>, Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/checkpoint.c b/src/common/checkpoint.c
index aae9f9f84c5bd9691309722438f6152971528917..ee2a606fc3ed70de941abc1fff9c92e11c2f1483 100644
--- a/src/common/checkpoint.c
+++ b/src/common/checkpoint.c
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  *  checkpoint.c - implementation-independent checkpoint functions
- *  $Id: checkpoint.c 17005 2009-03-24 21:57:43Z da $
+ *  $Id: checkpoint.c 17008 2009-03-24 23:28:13Z da $
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -57,18 +59,22 @@
  * at the end of the structure.
  */
 typedef struct slurm_checkpoint_ops {
-	int     (*ckpt_op) (uint16_t op, uint16_t data, 
-			struct step_record * step_ptr, time_t * event_time,
-			 uint32_t *error_code, char **error_msg);
+	int     (*ckpt_op) (uint32_t job_id, uint32_t step_id, 
+			    struct step_record *step_ptr, uint16_t op,
+			    uint16_t data, char *image_dir, time_t *event_time,
+			    uint32_t *error_code, char **error_msg);
 	int	(*ckpt_comp) (struct step_record * step_ptr, time_t event_time,
-			 uint32_t error_code, char *error_msg);
+			      uint32_t error_code, char *error_msg);
 	int	(*ckpt_task_comp) (struct step_record * step_ptr, uint32_t task_id,
-			 time_t event_time, uint32_t error_code, char *error_msg);
+				   time_t event_time, uint32_t error_code, char *error_msg);
 
 	int	(*ckpt_alloc_jobinfo) (check_jobinfo_t *jobinfo);
 	int	(*ckpt_free_jobinfo) (check_jobinfo_t jobinfo);
 	int	(*ckpt_pack_jobinfo) (check_jobinfo_t jobinfo, Buf buffer);
 	int	(*ckpt_unpack_jobinfo) (check_jobinfo_t jobinfo, Buf buffer);
+	int     (*ckpt_stepd_prefork) (void *slurmd_job);
+	int     (*ckpt_signal_tasks) (void *slurmd_job, char *image_dir);
+	int     (*ckpt_restart_task) (void *slurmd_job, char *image_dir, int gtid);
 } slurm_checkpoint_ops_t;
 
 /*
@@ -155,7 +161,10 @@ _slurm_checkpoint_get_ops( slurm_checkpoint_context_t c )
 		"slurm_ckpt_alloc_job",
 		"slurm_ckpt_free_job",
 		"slurm_ckpt_pack_job",
-		"slurm_ckpt_unpack_job"
+		"slurm_ckpt_unpack_job",
+		"slurm_ckpt_stepd_prefork",
+		"slurm_ckpt_signal_tasks",
+		"slurm_ckpt_restart_task"
 	};
         int n_syms = sizeof( syms ) / sizeof( char * );
 
@@ -256,17 +265,20 @@ checkpoint_fini(void)
 
 /* perform some checkpoint operation */
 extern int
-checkpoint_op(uint16_t op, uint16_t data, void * step_ptr,
-		time_t * event_time, uint32_t *error_code, char **error_msg)
+checkpoint_op(uint32_t job_id, uint32_t step_id, 
+	      void *step_ptr, uint16_t op,
+	      uint16_t data, char *image_dir, time_t *event_time,
+	      uint32_t *error_code, char **error_msg)
 {
 	int retval = SLURM_SUCCESS;
 
 	slurm_mutex_lock( &context_lock );
-	if ( g_context )
-		retval = (*(g_context->ops.ckpt_op))(op, data, 
-			(struct step_record *) step_ptr, event_time, 
-			error_code, error_msg);
-	else {
+	if ( g_context ) {
+		retval = (*(g_context->ops.ckpt_op))(job_id, step_id, 
+					(struct step_record *) step_ptr,
+					op, data, image_dir, 
+					event_time, error_code, error_msg);
+	} else {
 		error ("slurm_checkpoint plugin context not initialized");
 		retval = ENOENT;
 	}
@@ -378,3 +390,84 @@ extern int  checkpoint_unpack_jobinfo  (check_jobinfo_t jobinfo, Buf buffer)
 	slurm_mutex_unlock( &context_lock );
 	return retval;
 }
+
+extern int checkpoint_stepd_prefork (void *job)
+{
+        int retval = SLURM_SUCCESS;
+
+        slurm_mutex_lock( &context_lock );
+        if ( g_context )
+                retval = (*(g_context->ops.ckpt_stepd_prefork))(job);
+        else {
+                error ("slurm_checkpoint plugin context not initialized");
+                retval = ENOENT;
+        }
+        slurm_mutex_unlock( &context_lock );
+        return retval;
+}
+
+extern int checkpoint_signal_tasks (void *job, char *image_dir)
+{
+        int retval = SLURM_SUCCESS;
+
+        slurm_mutex_lock( &context_lock );
+        if ( g_context )
+                retval = (*(g_context->ops.ckpt_signal_tasks))(job, image_dir);
+        else {
+                error ("slurm_checkpoint plugin context not initialized");
+                retval = ENOENT;
+        }
+        slurm_mutex_unlock( &context_lock );
+        return retval;
+}
+
+
+extern int checkpoint_restart_task (void *job, char *image_dir, int gtid)
+{
+        int retval = SLURM_SUCCESS;
+
+        slurm_mutex_lock( &context_lock );
+        if ( g_context ) {
+                retval = (*(g_context->ops.ckpt_restart_task))(job, image_dir, 
+							       gtid);
+        } else {
+                error ("slurm_checkpoint plugin context not initialized");
+                retval = ENOENT;
+        }
+        slurm_mutex_unlock( &context_lock );
+        return retval;
+}
+
+extern int checkpoint_tasks (uint32_t job_id, uint32_t step_id, 
+			     time_t begin_time, char *image_dir, 
+			     uint16_t wait, char *nodelist)
+{
+	int rc = SLURM_SUCCESS, temp_rc;
+	checkpoint_tasks_msg_t ckpt_req;
+	slurm_msg_t req_msg;
+	List ret_list;
+        ret_data_info_t *ret_data_info = NULL;
+
+	slurm_msg_t_init(&req_msg);
+	ckpt_req.job_id		= job_id;
+	ckpt_req.job_step_id 	= step_id;
+	ckpt_req.timestamp	= begin_time,
+	ckpt_req.image_dir	= image_dir;
+	req_msg.msg_type	= REQUEST_CHECKPOINT_TASKS;
+	req_msg.data		= &ckpt_req;
+
+	if ((ret_list = slurm_send_recv_msgs(nodelist, &req_msg, (wait*1000),
+					     false))) {
+		while((ret_data_info = list_pop(ret_list))) {
+                        temp_rc = slurm_get_return_code(ret_data_info->type,
+                                                        ret_data_info->data);
+                        if(temp_rc)
+                                rc = temp_rc;
+                }
+	} else {
+                error("slurm_checkpoint_tasks: no list was returned");
+                rc = SLURM_ERROR;
+	}
+	slurm_seterrno(rc);
+	return rc;
+}
diff --git a/src/common/checkpoint.h b/src/common/checkpoint.h
index 0a5c8b92c22cb202f02e14266734bf5ec968d40d..ce367c2fe0730ae58e170c9338779d1a9370f981 100644
--- a/src/common/checkpoint.h
+++ b/src/common/checkpoint.h
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  *  checkpoint.h - implementation-independent checkpoint API definitions. 
- *  $Id: checkpoint.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: checkpoint.h 16867 2009-03-12 16:35:42Z jette $
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -36,8 +38,8 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef __CHECKPOINT_H__
-#define __CHECKPOINT_H__
+#ifndef _HAVE_SLURM_CHECKPOINT_H__
+#define _HAVE_SLURM_CHECKPOINT_H__
 
 #include "slurm/slurm.h"
 #include "src/common/macros.h"
@@ -70,9 +72,11 @@ extern int checkpoint_init(char *checkpoint_type);
 /* shutdown checkpoint plugin */
 extern int checkpoint_fini(void);
 
-/* perform many checkpoint operation */
-extern int checkpoint_op(uint16_t op, uint16_t data, void * step_ptr, 
-		time_t * event_time, uint32_t *error_code, char **error_msg);
+/* perform many checkpoint operation on job/step */
+extern int checkpoint_op(uint32_t job_id, uint32_t step_id, 
+			 void *step_ptr, uint16_t op,
+			 uint16_t data, char *image_dir, time_t *event_time,
+			 uint32_t *error_code, char **error_msg);
 
 /* note checkpoint completion */
 extern int checkpoint_comp(void * step_ptr, time_t event_time, uint32_t error_code,
@@ -95,5 +99,18 @@ extern int checkpoint_free_jobinfo(check_jobinfo_t jobinfo);
 extern int  checkpoint_pack_jobinfo  (check_jobinfo_t jobinfo, Buf buffer);
 extern int  checkpoint_unpack_jobinfo  (check_jobinfo_t jobinfo, Buf buffer);
 
-#endif /*__CHECKPOINT_H__*/
+/* create the necessary threads before forking the tasks */
+extern int checkpoint_stepd_prefork (void *slurmd_job);
+
+/* send the checkpoint request to the tasks */
+extern int checkpoint_signal_tasks (void *slurmd_job, char *image_dir);
+
+/* restart the requested job task */
+extern int checkpoint_restart_task(void *slurmd_job, char *image_dir, int gtid);
+
+/* send checkpoint request to specified job/step */
+extern int checkpoint_tasks (uint32_t job_id, uint32_t step_id, 
+			     time_t begin_time, char *image_dir, 
+			     uint16_t wait, char *nodelist);
+#endif /*_HAVE_SLURM_CHECKPOINT_H__*/
 
diff --git a/src/common/daemonize.c b/src/common/daemonize.c
index c396d84311101af9ecd512ca251e7c8e917f68b8..4182100bf1e1879e53da218ccd78a69d960a14e2 100644
--- a/src/common/daemonize.c
+++ b/src/common/daemonize.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  daemonize.c - daemonization routine
- *  $Id: daemonize.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: daemonize.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark A. Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/daemonize.h b/src/common/daemonize.h
index 93c57a1c0ebc75b512942bec415a8728c75723df..39eeff6e9f99886cdb2685d6d70b598df4e97884 100644
--- a/src/common/daemonize.h
+++ b/src/common/daemonize.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/slurmd/daemonize.h - function definition for making a daemon
- * $Id: daemonize.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: daemonize.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/eio.c b/src/common/eio.c
index 3f7ce2ca85289c919f5f9272861db60fdc2c2522..8f5485e09c68661d2ad09f8a837cb26acf1c023b 100644
--- a/src/common/eio.c
+++ b/src/common/eio.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/eio.h b/src/common/eio.h
index 1351ad56093b7c542dfedb36adaa3f4c20aea0a5..53479bd8dd3965de6eb25c1b69f8c846a6bddb6b 100644
--- a/src/common/eio.h
+++ b/src/common/eio.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/env.c b/src/common/env.c
index 2b00701c959fce7b5f6546222b775b61bacb0f98..78498e26510c8724493a2dc745c375efaa172285 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -2,13 +2,14 @@
  *  src/common/env.c - add an environment variable to environment vector
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>, Danny Auble <da@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -146,6 +147,48 @@ static bool _discard_env(char *name, char *value)
 	return false;
 }
 
+static void _set_distribution(task_dist_states_t distribution,
+			      char **dist, char **lllp_dist)
+{
+	if (((int)distribution >= 0)
+	    &&  (distribution != SLURM_DIST_UNKNOWN)) {
+		switch(distribution) {
+		case SLURM_DIST_CYCLIC:
+			*dist      = "cyclic";
+			break;
+		case SLURM_DIST_BLOCK:
+			*dist      = "block";
+			break;
+		case SLURM_DIST_PLANE:
+			*dist      = "plane";
+			*lllp_dist = "plane";
+			break;
+		case SLURM_DIST_ARBITRARY:
+			*dist      = "arbitrary";
+			break;
+		case SLURM_DIST_CYCLIC_CYCLIC:
+			*dist      = "cyclic";
+			*lllp_dist = "cyclic";
+			break;
+		case SLURM_DIST_CYCLIC_BLOCK:
+			*dist      = "cyclic";
+			*lllp_dist = "block";
+			break;
+		case SLURM_DIST_BLOCK_CYCLIC:
+			*dist      = "block";
+			*lllp_dist = "cyclic";
+			break;
+		case SLURM_DIST_BLOCK_BLOCK:
+			*dist      = "block";
+			*lllp_dist = "block";
+			break;
+		default:
+			error("unknown dist, type %d", distribution);
+			break;
+		}
+	}
+}
+
 /*
  * Return the number of elements in the environment `env'
  */
@@ -265,8 +308,7 @@ char *getenvp(char **env, const char *name)
 int setup_env(env_t *env)
 {
 	int rc = SLURM_SUCCESS;
-	char *dist = NULL;
-	char *lllp_dist = NULL;
+	char *dist = NULL, *lllp_dist = NULL;
 	char addrbuf[INET_ADDRSTRLEN];
 
 	if (env == NULL)
@@ -319,74 +361,44 @@ int setup_env(env_t *env)
 		rc = SLURM_FAILURE;
 	} 
 
-	if (((int)env->distribution >= 0)
-	&&  (env->distribution != SLURM_DIST_UNKNOWN)) {
-		switch(env->distribution) {
-		case SLURM_DIST_CYCLIC:
-			dist      = "cyclic";
-			lllp_dist = "";
-			break;
-		case SLURM_DIST_BLOCK:
-			dist      = "block";
-			lllp_dist = "";
-			break;
-		case SLURM_DIST_PLANE:
-			dist      = "plane";
-			lllp_dist = "plane";
-			break;
-		case SLURM_DIST_ARBITRARY:
-			dist      = "arbitrary";
-			lllp_dist = "";
-			break;
-		case SLURM_DIST_CYCLIC_CYCLIC:
-			dist      = "cyclic";
-			lllp_dist = "cyclic";
-			break;
-		case SLURM_DIST_CYCLIC_BLOCK:
-			dist      = "cyclic";
-			lllp_dist = "block";
-			break;
-		case SLURM_DIST_BLOCK_CYCLIC:
-			dist      = "block";
-			lllp_dist = "cyclic";
-			break;
-		case SLURM_DIST_BLOCK_BLOCK:
-			dist      = "block";
-			lllp_dist = "block";
-			break;
-		default:
-			error("unknown dist, type %d", env->distribution);
-			dist      = "unknown";
-			lllp_dist = "unknown";
-			break;
-		}
-
+	_set_distribution(env->distribution, &dist, &lllp_dist);
+	if(dist) 
 		if (setenvf(&env->env, "SLURM_DISTRIBUTION", "%s", dist)) {
 			error("Can't set SLURM_DISTRIBUTION env variable");
 			rc = SLURM_FAILURE;
 		}
 
-		if (setenvf(&env->env, "SLURM_DIST_PLANESIZE", "%d", 
+	if(env->distribution == SLURM_DIST_PLANE) 
+		if (setenvf(&env->env, "SLURM_DIST_PLANESIZE", "%u", 
 			    env->plane_size)) {
-			error("Can't set SLURM_DIST_PLANESIZE env variable");
+			error("Can't set SLURM_DIST_PLANESIZE "
+			      "env variable");
 			rc = SLURM_FAILURE;
 		}
-
+	
+	if(lllp_dist)
 		if (setenvf(&env->env, "SLURM_DIST_LLLP", "%s", lllp_dist)) {
 			error("Can't set SLURM_DIST_LLLP env variable");
 			rc = SLURM_FAILURE;
 		}
-	}
+	
 	
 	if (env->cpu_bind_type) {
 		char *str_verbose, *str_bind_type, *str_bind_list;
 		char *str_bind;
 		int len;
 
-		unsetenvp(env->env, "SLURM_CPU_BIND_VERBOSE");
-		unsetenvp(env->env, "SLURM_CPU_BIND_TYPE");
-		unsetenvp(env->env, "SLURM_CPU_BIND_LIST");
-		unsetenvp(env->env, "SLURM_CPU_BIND");
+		if (env->batch_flag) {
+			unsetenvp(env->env, "SBATCH_CPU_BIND_VERBOSE");
+			unsetenvp(env->env, "SBATCH_CPU_BIND_TYPE");
+			unsetenvp(env->env, "SBATCH_CPU_BIND_LIST");
+			unsetenvp(env->env, "SBATCH_CPU_BIND");
+		} else {
+			unsetenvp(env->env, "SLURM_CPU_BIND_VERBOSE");
+			unsetenvp(env->env, "SLURM_CPU_BIND_TYPE");
+			unsetenvp(env->env, "SLURM_CPU_BIND_LIST");
+			unsetenvp(env->env, "SLURM_CPU_BIND");
+		}
 
 		str_verbose = xstrdup ("");
 		if (env->cpu_bind_type & CPU_BIND_VERBOSE) {
@@ -394,11 +406,6 @@ int setup_env(env_t *env)
 		} else {
 			xstrcat(str_verbose, "quiet");
 		}
-		if (setenvf(&env->env, "SLURM_CPU_BIND_VERBOSE", str_verbose)) {
-			error("Unable to set SLURM_CPU_BIND_VERBOSE");
-			rc = SLURM_FAILURE;
-		}
-
 		str_bind_type = xstrdup ("");
 		if (env->cpu_bind_type & CPU_BIND_TO_THREADS) {
 			xstrcat(str_bind_type, "threads,");
@@ -406,6 +413,8 @@ int setup_env(env_t *env)
 			xstrcat(str_bind_type, "cores,");
 		} else if (env->cpu_bind_type & CPU_BIND_TO_SOCKETS) {
 			xstrcat(str_bind_type, "sockets,");
+		} else if (env->cpu_bind_type & CPU_BIND_TO_LDOMS) {
+			xstrcat(str_bind_type, "ldoms,");
 		}
 		if (env->cpu_bind_type & CPU_BIND_NONE) {
 			xstrcat(str_bind_type, "none");
@@ -415,6 +424,12 @@ int setup_env(env_t *env)
 			xstrcat(str_bind_type, "map_cpu:");
 		} else if (env->cpu_bind_type & CPU_BIND_MASK) {
 			xstrcat(str_bind_type, "mask_cpu:");
+		} else if (env->cpu_bind_type & CPU_BIND_LDRANK) {
+			xstrcat(str_bind_type, "rank_ldom");
+		} else if (env->cpu_bind_type & CPU_BIND_LDMAP) {
+			xstrcat(str_bind_type, "map_ldom:");
+		} else if (env->cpu_bind_type & CPU_BIND_LDMASK) {
+			xstrcat(str_bind_type, "mask_ldom:");
 		}
 		len = strlen(str_bind_type);
 		if (len) {		/* remove a possible trailing ',' */
@@ -422,31 +437,57 @@ int setup_env(env_t *env)
 			    	str_bind_type[len-1] = '\0';
 			}
 		}
-		if (setenvf(&env->env, "SLURM_CPU_BIND_TYPE", str_bind_type)) {
-			error("Unable to set SLURM_CPU_BIND_TYPE");
-			rc = SLURM_FAILURE;
-		}
-
 		str_bind_list = xstrdup ("");
 		if (env->cpu_bind) {
 			xstrcat(str_bind_list, env->cpu_bind);
 		}
-		if (setenvf(&env->env, "SLURM_CPU_BIND_LIST", str_bind_list)) {
-			error("Unable to set SLURM_CPU_BIND_LIST");
-			rc = SLURM_FAILURE;
-		}
-
 		str_bind = xstrdup ("");
 		xstrcat(str_bind, str_verbose);
-		if (str_bind[0]) {		/* add ',' if str_verbose */
+		if (str_bind[0] && str_bind_type && str_bind_type[0])
 			xstrcatchar(str_bind, ',');
-		}
 		xstrcat(str_bind, str_bind_type);
 		xstrcat(str_bind, str_bind_list);
 
-		if (setenvf(&env->env, "SLURM_CPU_BIND", str_bind)) {
-			error("Unable to set SLURM_CPU_BIND");
-			rc = SLURM_FAILURE;
+		if (env->batch_flag) {
+			if (setenvf(&env->env, "SBATCH_CPU_BIND_VERBOSE",
+				    str_verbose)) {
+				error("Unable to set SBATCH_CPU_BIND_VERBOSE");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SBATCH_CPU_BIND_TYPE",
+				    str_bind_type)) {
+				error("Unable to set SBATCH_CPU_BIND_TYPE");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SBATCH_CPU_BIND_LIST",
+				    str_bind_list)) {
+				error("Unable to set SBATCH_CPU_BIND_LIST");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SBATCH_CPU_BIND", str_bind)) {
+				error("Unable to set SBATCH_CPU_BIND");
+				rc = SLURM_FAILURE;
+			}
+		} else {
+			if (setenvf(&env->env, "SLURM_CPU_BIND_VERBOSE",
+				    str_verbose)) {
+				error("Unable to set SLURM_CPU_BIND_VERBOSE");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SLURM_CPU_BIND_TYPE",
+				    str_bind_type)) {
+				error("Unable to set SLURM_CPU_BIND_TYPE");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SLURM_CPU_BIND_LIST",
+				    str_bind_list)) {
+				error("Unable to set SLURM_CPU_BIND_LIST");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SLURM_CPU_BIND", str_bind)) {
+				error("Unable to set SLURM_CPU_BIND");
+				rc = SLURM_FAILURE;
+			}
 		}
 	}
 
@@ -454,10 +495,17 @@ int setup_env(env_t *env)
 		char *str_verbose, *str_bind_type, *str_bind_list;
 		char *str_bind;
 
-		unsetenvp(env->env, "SLURM_MEM_BIND_VERBOSE");
-		unsetenvp(env->env, "SLURM_MEM_BIND_TYPE");
-		unsetenvp(env->env, "SLURM_MEM_BIND_LIST");
-		unsetenvp(env->env, "SLURM_MEM_BIND");
+		if (env->batch_flag) {
+			unsetenvp(env->env, "SBATCH_MEM_BIND_VERBOSE");
+			unsetenvp(env->env, "SBATCH_MEM_BIND_TYPE");
+			unsetenvp(env->env, "SBATCH_MEM_BIND_LIST");
+			unsetenvp(env->env, "SBATCH_MEM_BIND");
+		} else {
+			unsetenvp(env->env, "SLURM_MEM_BIND_VERBOSE");
+			unsetenvp(env->env, "SLURM_MEM_BIND_TYPE");
+			unsetenvp(env->env, "SLURM_MEM_BIND_LIST");
+			unsetenvp(env->env, "SLURM_MEM_BIND");
+		}
 
 		str_verbose = xstrdup ("");
 		if (env->mem_bind_type & MEM_BIND_VERBOSE) {
@@ -465,11 +513,6 @@ int setup_env(env_t *env)
 		} else {
 			xstrcat(str_verbose, "quiet");
 		}
-		if (setenvf(&env->env, "SLURM_MEM_BIND_VERBOSE", str_verbose)) {
-			error("Unable to set SLURM_MEM_BIND_VERBOSE");
-			rc = SLURM_FAILURE;
-		}
- 
 		str_bind_type = xstrdup ("");
 		if (env->mem_bind_type & MEM_BIND_NONE) {
 			xstrcat(str_bind_type, "none");
@@ -482,20 +525,10 @@ int setup_env(env_t *env)
 		} else if (env->mem_bind_type & MEM_BIND_LOCAL) {
 			xstrcat(str_bind_type, "local");
 		}
-		if (setenvf(&env->env, "SLURM_MEM_BIND_TYPE", str_bind_type)) {
-			error("Unable to set SLURM_MEM_BIND_TYPE");
-			rc = SLURM_FAILURE;
-		}
-
 		str_bind_list = xstrdup ("");
 		if (env->mem_bind) {
 			xstrcat(str_bind_list, env->mem_bind);
 		}
-		if (setenvf(&env->env, "SLURM_MEM_BIND_LIST", str_bind_list)) {
-			error("Unable to set SLURM_MEM_BIND_LIST");
-			rc = SLURM_FAILURE;
-		}
-
 		str_bind = xstrdup ("");
 		xstrcat(str_bind, str_verbose);
 		if (str_bind[0]) {		/* add ',' if str_verbose */
@@ -504,9 +537,46 @@ int setup_env(env_t *env)
 		xstrcat(str_bind, str_bind_type);
 		xstrcat(str_bind, str_bind_list);
 
-		if (setenvf(&env->env, "SLURM_MEM_BIND", str_bind)) {
-			error("Unable to set SLURM_MEM_BIND");
-			rc = SLURM_FAILURE;
+		if (env->batch_flag) {
+			if (setenvf(&env->env, "SBATCH_MEM_BIND_VERBOSE",
+				    str_verbose)) {
+				error("Unable to set SBATCH_MEM_BIND_VERBOSE");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SBATCH_MEM_BIND_TYPE",
+				    str_bind_type)) {
+				error("Unable to set SBATCH_MEM_BIND_TYPE");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SBATCH_MEM_BIND_LIST",
+				    str_bind_list)) {
+				error("Unable to set SBATCH_MEM_BIND_LIST");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SBATCH_MEM_BIND", str_bind)) {
+				error("Unable to set SBATCH_MEM_BIND");
+				rc = SLURM_FAILURE;
+			}
+		} else {
+			if (setenvf(&env->env, "SLURM_MEM_BIND_VERBOSE",
+				    str_verbose)) {
+				error("Unable to set SLURM_MEM_BIND_VERBOSE");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SLURM_MEM_BIND_TYPE",
+				    str_bind_type)) {
+				error("Unable to set SLURM_MEM_BIND_TYPE");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SLURM_MEM_BIND_LIST",
+				    str_bind_list)) {
+				error("Unable to set SLURM_MEM_BIND_LIST");
+				rc = SLURM_FAILURE;
+			}
+			if (setenvf(&env->env, "SLURM_MEM_BIND", str_bind)) {
+				error("Unable to set SLURM_MEM_BIND");
+				rc = SLURM_FAILURE;
+			}
 		}
 	}
 
@@ -528,8 +598,8 @@ int setup_env(env_t *env)
 		rc = SLURM_FAILURE;
 	}
 
-#ifdef HAVE_BG
 	if(env->select_jobinfo) {
+#ifdef HAVE_BG
 		char *bgl_part_id = NULL;
 		select_g_get_jobinfo(env->select_jobinfo, 
 				     SELECT_DATA_BLOCK_ID, &bgl_part_id);
@@ -560,13 +630,36 @@ int setup_env(env_t *env)
 			error("Can't set MPIRUN_PARTITION "
 			      "environment variable");
 		
-	}
 #endif
 
-	if (env->jobid >= 0
-	    && setenvf(&env->env, "SLURM_JOBID", "%d", env->jobid)) {
-		error("Unable to set SLURM_JOBID environment");
-		rc = SLURM_FAILURE;
+#ifdef HAVE_CRAY_XT
+		char *resv_id = NULL;
+		select_g_get_jobinfo(env->select_jobinfo, 
+				     SELECT_DATA_RESV_ID, &resv_id);
+		if (resv_id) {
+			if(setenvf(&env->env, 
+				   "BASIL_RESVERATION_ID", "%s", resv_id))
+				rc = SLURM_FAILURE;
+		} else 
+			rc = SLURM_FAILURE;
+		
+		if(rc == SLURM_FAILURE)
+			error("Can't set BASIL_RESVERATION_ID "
+			      "environment variable");
+		xfree(resv_id);
+#endif
+	}
+
+	if (env->jobid >= 0) {
+		if (setenvf(&env->env, "SLURM_JOB_ID", "%d", env->jobid)) {
+			error("Unable to set SLURM_JOB_ID environment");
+			rc = SLURM_FAILURE;
+		}
+		/* and for backwards compatability... */
+		if (setenvf(&env->env, "SLURM_JOBID", "%d", env->jobid)) {
+			error("Unable to set SLURM_JOBID environment");
+			rc = SLURM_FAILURE;
+		}
 	}
 	
 	if (env->nodeid >= 0
@@ -618,13 +711,7 @@ int setup_env(env_t *env)
 		error ("Can't set SLURM_SRUN_COMM_PORT env variable");
 		rc = SLURM_FAILURE;
 	}
-	if (env->comm_hostname
-	    && setenvf (&env->env, "SLURM_SRUN_COMM_HOST", "%s", 
-			env->comm_hostname)) {
-		error ("Can't set SLURM_SRUN_COMM_HOST env variable");
-		rc = SLURM_FAILURE;
-	}
-		
+
 	if (env->cli) {
 		
 		slurm_print_slurm_addr (env->cli, addrbuf, INET_ADDRSTRLEN);
@@ -683,11 +770,19 @@ int setup_env(env_t *env)
 		error("Can't set SLURM_PTY_WIN_ROW env variable");
 		rc = SLURM_FAILURE;
 	}
-	if (env->ckpt_path 
-        && setenvf(&env->env, "SLURM_CHECKPOINT_PATH", "%s", env->ckpt_path)) {
-		error("Can't set SLURM_CHECKPOINT_PATH env variable");
+	if (env->ckpt_dir 
+	&& setenvf(&env->env, "SLURM_CHECKPOINT_IMAGE_DIR", "%s", 
+		   env->ckpt_dir)) {
+		error("Can't set SLURM_CHECKPOINT_IMAGE_DIR env variable");
+		rc = SLURM_FAILURE;
+	}
+
+	if (env->restart_cnt &&
+	    setenvf(&env->env, "SLURM_RESTART_COUNT", "%u", env->restart_cnt)) {
+		error("Can't set SLURM_RESTART_COUNT env variable");
 		rc = SLURM_FAILURE;
 	}
+
 	return rc;
 }
 
@@ -743,8 +838,8 @@ static char *_uint16_array_to_str(int array_len, const uint16_t *array)
 /*
  * The cpus-per-node representation in SLURM (and perhaps tasks-per-node
  * in the future) is stored in a compressed format comprised of two
- * equal-length arrays of uint32_t, and an integer holding the array length.
- * In one array an element represents a count (number of cpus, number of tasks,
+ * equal-length arrays, and an integer holding the array length.  In one
+ * array an element represents a count (number of cpus, number of tasks,
  * etc.), and the corresponding element in the other array contains the
  * number of times the count is repeated sequentially in the uncompressed
  * something-per-node array.
@@ -753,7 +848,7 @@ static char *_uint16_array_to_str(int array_len, const uint16_t *array)
  * array.  Free with xfree().
  */
 extern char *uint32_compressed_to_str(uint32_t array_len,
-				      const uint32_t *array,
+				      const uint16_t *array,
 				      const uint32_t *array_reps)
 {
 	int i;
@@ -799,7 +894,11 @@ void
 env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 		  const job_desc_msg_t *desc)
 {
+#ifdef HAVE_CRAY_XT
+	char *resv_id = NULL;
+#endif
 	char *tmp = NULL;
+	char *dist = NULL, *lllp_dist = NULL;
 	slurm_step_layout_t *step_layout = NULL;
 	uint32_t num_tasks = desc->num_tasks;
 
@@ -809,7 +908,20 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 	env_array_overwrite_fmt(dest, "SLURM_JOB_NODELIST", "%s",
 				alloc->node_list);
 
-	tmp = uint32_compressed_to_str((uint32_t)alloc->num_cpu_groups,
+	_set_distribution(desc->task_dist, &dist, &lllp_dist);
+	if(dist) 
+		env_array_overwrite_fmt(dest, "SLURM_DISTRIBUTION", "%s",
+					dist);
+	
+	if(desc->task_dist == SLURM_DIST_PLANE) 
+		env_array_overwrite_fmt(dest, "SLURM_DIST_PLANESIZE",
+					"%u", desc->plane_size);
+	
+	if(lllp_dist)
+		env_array_overwrite_fmt(dest, "SLURM_DIST_LLLP", "%s", 
+					lllp_dist);
+
+	tmp = uint32_compressed_to_str(alloc->num_cpu_groups,
 					alloc->cpus_per_node,
 					alloc->cpu_count_reps);
 	env_array_overwrite_fmt(dest, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp);
@@ -842,6 +954,16 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 		xfree(tmp);
 	}
 #endif
+
+#ifdef HAVE_CRAY_XT
+	select_g_get_jobinfo(alloc->select_jobinfo, SELECT_DATA_RESV_ID,
+			     &resv_id);
+	if (resv_id) {
+		env_array_overwrite_fmt(dest, "BASIL_RESERVATION_ID", "%s",
+					resv_id);
+	}
+#endif
+
 	/* OBSOLETE, but needed by MPI, do not remove */
 	env_array_overwrite_fmt(dest, "SLURM_JOBID", "%u", alloc->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_NNODES", "%u", alloc->node_cnt);
@@ -908,21 +1030,16 @@ extern void
 env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 			const char *node_name)
 {
-	char *tmp = getenvp(batch->environment, "SLURM_CPUS_PER_TASK");
+	char *tmp = NULL;
 	uint32_t num_nodes = 0;
 	uint32_t num_cpus = 0;
 	int i;
 	slurm_step_layout_t *step_layout = NULL;
-	int cpus_per_task = 1;
 	uint32_t num_tasks = batch->nprocs;
+	uint16_t cpus_per_task;
 
-	if(tmp) 
-		cpus_per_task = atoi(tmp);
-	
 	/* There is no explicit node count in the batch structure,
-	   so we need to calculate the node count. We also need to
-	   figure out the explicit cpu count so we can figure out the
-	   cpus_per_task. */
+	 * so we need to calculate the node count. */
 	for (i = 0; i < batch->num_cpu_groups; i++) {
 		num_nodes += batch->cpu_count_reps[i];
 		num_cpus += batch->cpu_count_reps[i] * batch->cpus_per_node[i];
@@ -931,7 +1048,8 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	env_array_overwrite_fmt(dest, "SLURM_JOB_ID", "%u", batch->job_id);
 	env_array_overwrite_fmt(dest, "SLURM_JOB_NUM_NODES", "%u", num_nodes);
 	env_array_overwrite_fmt(dest, "SLURM_JOB_NODELIST", "%s", batch->nodes);
-	tmp = uint32_compressed_to_str((uint32_t)batch->num_cpu_groups,
+
+	tmp = uint32_compressed_to_str(batch->num_cpu_groups,
 					batch->cpus_per_node,
 					batch->cpu_count_reps);
 	env_array_overwrite_fmt(dest, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp);
@@ -952,17 +1070,25 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	if(num_tasks) 
 		env_array_overwrite_fmt(dest, "SLURM_NPROCS", "%u", 
 					num_tasks);
-	else 
-		num_tasks = num_cpus / cpus_per_task;
+
+	if((batch->cpus_per_task != 0) &&
+	   (batch->cpus_per_task != (uint16_t) NO_VAL))
+		cpus_per_task = batch->cpus_per_task;
+	else
+		cpus_per_task = 1;	/* default value */
+	if (cpus_per_task > 1) {
+		env_array_overwrite_fmt(dest, "SLURM_CPUS_PER_TASK", "%u",
+					cpus_per_task);
+	}
+	num_tasks = num_cpus / cpus_per_task;
 	
 	step_layout = slurm_step_layout_create(batch->nodes,
 					       batch->cpus_per_node,
 					       batch->cpu_count_reps,
 					       num_nodes,
 					       num_tasks,
-					       (uint16_t)cpus_per_task,
-					       (uint16_t)
-					       SLURM_DIST_BLOCK,
+					       cpus_per_task,
+					       (uint16_t)SLURM_DIST_BLOCK,
 					       (uint16_t)NO_VAL);
 	tmp = _uint16_array_to_str(step_layout->node_cnt,
 				   step_layout->tasks);
@@ -976,16 +1102,17 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
  * overwriting any environment variables of the same name.  If the address
  * pointed to by "dest" is NULL, memory will automatically be xmalloc'ed.
  * The array is terminated by a NULL pointer, and thus is suitable for
- * use by execle() and other env_array_* functions.
+ * use by execle() and other env_array_* functions.  If preserve_env is
+ * true, the variables SLURM_NNODES and SLURM_NPROCS remain unchanged.
  *
  * Sets variables:
  *	SLURM_STEP_ID
  *	SLURM_STEP_NUM_NODES
  *	SLURM_STEP_NUM_TASKS
  *	SLURM_STEP_TASKS_PER_NODE
- *	SLURM_STEP_LAUNCHER_HOSTNAME
  *	SLURM_STEP_LAUNCHER_PORT
  *	SLURM_STEP_LAUNCHER_IPADDR
+ *	SLURM_STEP_RESV_PORTS
  *
  * Sets OBSOLETE variables:
  *	SLURM_STEPID
@@ -1000,8 +1127,8 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 void
 env_array_for_step(char ***dest, 
 		   const job_step_create_response_msg_t *step,
-		   const char *launcher_hostname,
-		   uint16_t launcher_port)
+		   uint16_t launcher_port,
+		   bool preserve_env)
 {
 	char *tmp;
 
@@ -1015,17 +1142,21 @@ env_array_for_step(char ***dest,
 	env_array_overwrite_fmt(dest, "SLURM_STEP_NUM_TASKS",
 				"%u", step->step_layout->task_cnt);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_TASKS_PER_NODE", "%s", tmp);
-	env_array_overwrite_fmt(dest, "SLURM_STEP_LAUNCHER_HOSTNAME",
-				"%s", launcher_hostname);
 	env_array_overwrite_fmt(dest, "SLURM_STEP_LAUNCHER_PORT",
 				"%hu", launcher_port);
+	if (step->resv_ports) {
+		env_array_overwrite_fmt(dest, "SLURM_STEP_RESV_PORTS",
+					"%s", step->resv_ports);
+	}
 
 	/* OBSOLETE, but needed by MPI, do not remove */
 	env_array_overwrite_fmt(dest, "SLURM_STEPID", "%u", step->job_step_id);
-	env_array_overwrite_fmt(dest, "SLURM_NNODES",
-				"%hu", step->step_layout->node_cnt);
-	env_array_overwrite_fmt(dest, "SLURM_NPROCS",
-				"%u", step->step_layout->task_cnt);
+	if (!preserve_env) {
+		env_array_overwrite_fmt(dest, "SLURM_NNODES",
+					"%hu", step->step_layout->node_cnt);
+		env_array_overwrite_fmt(dest, "SLURM_NPROCS",
+					"%u", step->step_layout->task_cnt);
+	}
 	env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", tmp);
 	env_array_overwrite_fmt(dest, "SLURM_SRUN_COMM_PORT",
 				"%hu", launcher_port);
diff --git a/src/common/env.h b/src/common/env.h
index edcbb7ef58d82957e3841c5a385bcd4acf306169..94a715e53de22c6a2273ccbe74bc2ab903ce5df9 100644
--- a/src/common/env.h
+++ b/src/common/env.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -40,7 +41,7 @@ typedef struct env_options {
 	bool nprocs_set;	/* true if nprocs explicitly set */
 	bool cpus_set;		/* true if cpus_per_task explicitly set */
 	task_dist_states_t distribution; /* --distribution=, -m dist	*/
-	int plane_size;         /* plane_size for SLURM_DIST_PLANE */
+	uint16_t plane_size;         /* plane_size for SLURM_DIST_PLANE */
 	cpu_bind_type_t
 		cpu_bind_type;	/* --cpu_bind=			*/
 	char *cpu_bind;		/* binding map for map/mask_cpu	*/
@@ -55,7 +56,6 @@ typedef struct env_options {
 	char *nodelist;		/* nodelist in string form */
 	char **env;             /* job environment */
 	uint16_t comm_port;	/* srun's communication port */
-	char *comm_hostname;	/* srun's hostname */
 	slurm_addr *cli;	/* launch node address */
 	slurm_addr *self;
 	int jobid;		/* assigned job id */
@@ -73,7 +73,9 @@ typedef struct env_options {
 	uint16_t pty_port;	/* used to communicate window size changes */
 	uint8_t ws_col;		/* window size, columns */
 	uint8_t ws_row;		/* window size, row count */
-	char *ckpt_path;	/* --ckpt-path=                 */
+	char *ckpt_dir;		/* --ckpt-dir=                 */
+	uint16_t restart_cnt;	/* count of job restarts	*/
+	uint16_t batch_flag;	/* 1 if batch: queued job with script */
 } env_t;
 
 
@@ -136,20 +138,21 @@ extern void env_array_for_batch_job(char ***dest,
 				    const char* node_name);
 
 /*
- * Set in "dest the environment variables relevant to a SLURM job step,
+ * Set in "dest" the environment variables relevant to a SLURM job step,
  * overwriting any environment variables of the same name.  If the address
  * pointed to by "dest" is NULL, memory will automatically be xmalloc'ed.
  * The array is terminated by a NULL pointer, and thus is suitable for
- * use by execle() and other env_array_* functions.
+ * use by execle() and other env_array_* functions.  If preserve_env is
+ * true, the variables SLURM_NNODES and SLURM_NPROCS remain unchanged.
  *
  * Sets variables:
  *	SLURM_STEP_ID
  *	SLURM_STEP_NUM_NODES
  *	SLURM_STEP_NUM_TASKS
  *	SLURM_STEP_TASKS_PER_NODE
- *	SLURM_STEP_LAUNCHER_HOSTNAME
  *	SLURM_STEP_LAUNCHER_PORT
  *	SLURM_STEP_LAUNCHER_IPADDR
+ *	SLURM_STEP_RESV_PORTS
  *
  * Sets OBSOLETE variables:
  *	SLURM_STEPID
@@ -165,8 +168,8 @@ extern void env_array_for_batch_job(char ***dest,
 void
 env_array_for_step(char ***dest,
 		   const job_step_create_response_msg_t *step,
-		   const char *launcher_hostname,
-		   uint16_t launcher_port);
+		   uint16_t launcher_port,
+		   bool preserve_env);
 
 /*
  * Return an empty environment variable array (contains a single
@@ -264,8 +267,8 @@ char **env_array_user_default(const char *username, int timeout, int mode);
 /*
  * The cpus-per-node representation in SLURM (and perhaps tasks-per-node
  * in the future) is stored in a compressed format comprised of two
- * equal-length arrays of uint32_t, and an integer holding the array length.
- * In one array an element represents a count (number of cpus, number of tasks,
+ * equal-length arrays, and an integer holding the array length. In one 
+ * array an element represents a count (number of cpus, number of tasks,
  * etc.), and the corresponding element in the other array contains the
  * number of times the count is repeated sequentially in the uncompressed
  * something-per-node array.
@@ -274,7 +277,7 @@ char **env_array_user_default(const char *username, int timeout, int mode);
  * array.  Free with xfree().
  */
 char *uint32_compressed_to_str(uint32_t array_len,
-			       const uint32_t *array,
+			       const uint16_t *array,
 			       const uint32_t *array_reps);
 
 #endif
diff --git a/src/common/forward.c b/src/common/forward.c
index 41a1a58797d1e886874a10dabfc09ed4e8c418eb..f8e13c624fe233bf726d1fd580dfdbd16e3a005b 100644
--- a/src/common/forward.c
+++ b/src/common/forward.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <auble1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -61,10 +62,28 @@
 
 #define MAX_RETRIES 3
 
+typedef struct {
+	pthread_cond_t *notify;
+	slurm_msg_t *orig_msg;
+	List ret_list;
+	int timeout;
+	hostlist_t tree_hl;
+	pthread_mutex_t *tree_mutex;
+} fwd_tree_t;
+
+void _destroy_tree_fwd(fwd_tree_t *fwd_tree)
+{
+	if(fwd_tree) {
+		if(fwd_tree->tree_hl)
+			hostlist_destroy(fwd_tree->tree_hl);
+		xfree(fwd_tree);
+	}
+}
+
 void *_forward_thread(void *arg)
 {
 	forward_msg_t *fwd_msg = (forward_msg_t *)arg;
-	Buf buffer = init_buf(0);
+	Buf buffer = init_buf(fwd_msg->buf_len);
 	int i=0;
 	List ret_list = NULL;
 	slurm_fd fd = -1;
@@ -146,7 +165,7 @@ void *_forward_thread(void *arg)
 			free(name);
 			if(hostlist_count(hl) > 0) {
 				free_buf(buffer);	
-				buffer = init_buf(0);
+				buffer = init_buf(fwd_msg->buf_len);
 				slurm_mutex_unlock(fwd_msg->forward_mutex);
 				slurm_close_accepted_conn(fd);
 				fd = -1;
@@ -201,7 +220,7 @@ void *_forward_thread(void *arg)
 				list_destroy(ret_list);
 			if (hostlist_count(hl) > 0) {
 				free_buf(buffer);	
-				buffer = init_buf(0);
+				buffer = init_buf(fwd_msg->buf_len);
 				slurm_mutex_unlock(fwd_msg->forward_mutex);
 				slurm_close_accepted_conn(fd);
 				fd = -1;
@@ -257,7 +276,6 @@ void *_forward_thread(void *arg)
 		}
 		break;
 	}
-
 	slurm_mutex_lock(fwd_msg->forward_mutex);
 	if(ret_list) {
 		while((ret_data_info = list_pop(ret_list)) != NULL) {
@@ -283,6 +301,85 @@ cleanup:
 	return (NULL);
 }
 
+void *_fwd_tree_thread(void *arg)
+{
+	fwd_tree_t *fwd_tree = (fwd_tree_t *)arg;
+	List ret_list = NULL;
+	char *name = NULL;
+	char buf[8196];
+	slurm_msg_t send_msg;	
+	
+	slurm_msg_t_init(&send_msg);
+	send_msg.msg_type = fwd_tree->orig_msg->msg_type;
+	send_msg.data = fwd_tree->orig_msg->data;
+
+	/* repeat until we are sure the message was sent */ 
+	while((name = hostlist_shift(fwd_tree->tree_hl))) {
+		if(slurm_conf_get_addr(name, &send_msg.address)
+		   == SLURM_ERROR) {
+			error("fwd_tree_thread: can't find address for host "
+			      "%s, check slurm.conf", name);
+			slurm_mutex_lock(fwd_tree->tree_mutex);
+			mark_as_failed_forward(&fwd_tree->ret_list, name,
+					SLURM_COMMUNICATIONS_CONNECTION_ERROR);
+ 			pthread_cond_signal(fwd_tree->notify);
+			slurm_mutex_unlock(fwd_tree->tree_mutex);
+			free(name);
+		
+			continue;
+		}
+		
+		hostlist_ranged_string(fwd_tree->tree_hl, sizeof(buf), buf);
+		send_msg.forward.nodelist = xstrdup(buf);
+		send_msg.forward.timeout = fwd_tree->timeout;
+		send_msg.forward.cnt = hostlist_count(fwd_tree->tree_hl);
+		if (send_msg.forward.nodelist[0]) {
+			debug3("Tree sending to %s along with %s", 
+			       name, send_msg.forward.nodelist);
+		} else
+			debug3("Tree sending to %s", name);
+
+		ret_list = slurm_send_addr_recv_msgs(&send_msg, name,
+						     fwd_tree->timeout);
+
+		xfree(send_msg.forward.nodelist);
+
+		if(ret_list) {
+			slurm_mutex_lock(fwd_tree->tree_mutex);
+			list_transfer(fwd_tree->ret_list, ret_list);
+			pthread_cond_signal(fwd_tree->notify);
+			slurm_mutex_unlock(fwd_tree->tree_mutex);
+			list_destroy(ret_list);
+		} else {
+			/* This should never happen (when this was
+			   written slurm_send_addr_recv_msgs always
+			   returned a list */
+			error("fwd_tree_thread: no return list given from "
+			      "slurm_send_addr_recv_msgs", name);
+			slurm_mutex_lock(fwd_tree->tree_mutex);
+			mark_as_failed_forward(&fwd_tree->ret_list, name,
+					SLURM_COMMUNICATIONS_CONNECTION_ERROR);
+ 			pthread_cond_signal(fwd_tree->notify);
+			slurm_mutex_unlock(fwd_tree->tree_mutex);
+			free(name);
+			
+			continue;
+		}
+
+		free(name);
+		
+		/* check for error and try again */
+		if(errno == SLURM_COMMUNICATIONS_CONNECTION_ERROR) 
+ 			continue;						
+		
+		break;
+	}
+
+	_destroy_tree_fwd(fwd_tree);
+		
+	return NULL;
+}
+
 /*
  * forward_init    - initilize forward structure
  * IN: forward     - forward_t *   - struct to store forward info
@@ -320,7 +417,7 @@ extern void forward_init(forward_t *forward, forward_t *from)
 extern int forward_msg(forward_struct_t *forward_struct, 
 		       header_t *header)
 {
-	int i = 0, j = 0;
+	int j = 0;
 	int retries = 0;
 	forward_msg_t *forward_msg = NULL;
 	int thr_count = 0;
@@ -335,12 +432,7 @@ extern int forward_msg(forward_struct_t *forward_struct,
 		return SLURM_ERROR;
 	}
 	hl = hostlist_create(header->forward.nodelist);	
-	slurm_mutex_init(&forward_struct->forward_mutex);
-	pthread_cond_init(&forward_struct->notify, NULL);
-	
-	forward_struct->forward_msg = 
-		xmalloc(sizeof(forward_msg_t) * header->forward.cnt);
-	i = 0;
+	hostlist_uniq(hl);
 	
 	while((name = hostlist_shift(hl))) {
 		pthread_attr_t attr_agent;
@@ -379,7 +471,6 @@ extern int forward_msg(forward_struct_t *forward_struct,
 		forward_msg->header.ret_cnt = 0;
 		
 		forward_hl = hostlist_create(name);
-		i++;
 		free(name);
 		for(j = 0; j < span[thr_count]; j++) {
 			name = hostlist_shift(hl);
@@ -387,20 +478,21 @@ extern int forward_msg(forward_struct_t *forward_struct,
 				break;
 			hostlist_push(forward_hl, name);
 			free(name);
-			i++;
 		}
-		hostlist_uniq(forward_hl);
+
 		hostlist_ranged_string(forward_hl, sizeof(buf), buf);
 		hostlist_destroy(forward_hl);
+		forward_init(&forward_msg->header.forward, NULL);
 		forward_msg->header.forward.nodelist = xstrdup(buf);
 		while(pthread_create(&thread_agent, &attr_agent,
-				   _forward_thread, 
-				   (void *)forward_msg)) {
+				     _forward_thread, 
+				     (void *)forward_msg)) {
 			error("pthread_create error %m");
 			if (++retries > MAX_RETRIES)
 				fatal("Can't create pthread");
 			sleep(1);	/* sleep and try again */
 		}
+		slurm_attr_destroy(&attr_agent);
 		thr_count++; 
 	}
 	hostlist_destroy(hl);
@@ -408,6 +500,104 @@ extern int forward_msg(forward_struct_t *forward_struct,
 	return SLURM_SUCCESS;
 }
 
+/*
+ * start_msg_tree  - logic to begin the forward tree and
+ *                   accumulate the return codes from processes getting the
+ *                   the forwarded message
+ *
+ * IN: hl          - hostlist_t   - list of every node to send message to
+ * IN: msg         - slurm_msg_t  - message to send.
+ * IN: timeout     - int          - how long to wait in milliseconds.
+ * RET List 	   - List containing the responses of the childern
+ *		     (if any) we forwarded the message to. List
+ *		     containing type (ret_data_info_t).
+ */
+extern List start_msg_tree(hostlist_t hl, slurm_msg_t *msg, int timeout)
+{
+	int *span = NULL;
+	fwd_tree_t *fwd_tree = NULL;
+	pthread_mutex_t tree_mutex;
+	pthread_cond_t notify;
+	int j = 0, count = 0;
+	List ret_list = NULL;
+	char *name = NULL;
+	int thr_count = 0;
+	int host_count = 0;
+
+	xassert(hl);
+	xassert(msg);
+
+	hostlist_uniq(hl);		
+	host_count = hostlist_count(hl);
+
+	span = set_span(host_count, 0);
+
+	slurm_mutex_init(&tree_mutex);
+	pthread_cond_init(&notify, NULL);
+
+	ret_list = list_create(destroy_data_info);
+	
+	while((name = hostlist_shift(hl))) {
+		pthread_attr_t attr_agent;
+		pthread_t thread_agent;
+		int retries = 0;
+
+		slurm_attr_init(&attr_agent);
+		if (pthread_attr_setdetachstate
+		    (&attr_agent, PTHREAD_CREATE_DETACHED))
+			error("pthread_attr_setdetachstate error %m");
+
+		fwd_tree = xmalloc(sizeof(fwd_tree_t));
+		fwd_tree->orig_msg = msg;
+		fwd_tree->ret_list = ret_list;
+		fwd_tree->timeout = timeout;
+		fwd_tree->notify = &notify;
+		fwd_tree->tree_mutex = &tree_mutex;
+
+		if(fwd_tree->timeout <= 0) {
+			/* convert secs to msec */
+			fwd_tree->timeout  = slurm_get_msg_timeout() * 1000; 
+		}
+
+		fwd_tree->tree_hl = hostlist_create(name);
+		free(name);
+		for(j = 0; j < span[thr_count]; j++) {
+			name = hostlist_shift(hl);
+			if(!name)
+				break;
+			hostlist_push(fwd_tree->tree_hl, name);
+			free(name);
+		}
+
+		while(pthread_create(&thread_agent, &attr_agent,
+				     _fwd_tree_thread, (void *)fwd_tree)) {
+			error("pthread_create error %m");
+			if (++retries > MAX_RETRIES)
+				fatal("Can't create pthread");
+			sleep(1);	/* sleep and try again */
+		}
+		slurm_attr_destroy(&attr_agent);
+		thr_count++; 
+	}
+	xfree(span);
+	
+	slurm_mutex_lock(&tree_mutex);
+
+	count = list_count(ret_list);
+	debug2("Tree head got back %d looking for %d", count, host_count);
+	while((count < host_count)) {
+		pthread_cond_wait(&notify, &tree_mutex);
+		count = list_count(ret_list);
+		debug2("Tree head got back %d", count);
+	}
+	debug2("Tree head got them all");
+	slurm_mutex_unlock(&tree_mutex);
+
+	slurm_mutex_destroy(&tree_mutex);
+	pthread_cond_destroy(&notify);
+
+	return ret_list;
+}
 
 /*
  * mark_as_failed_forward- mark a node as failed and add it to "ret_list"
@@ -443,16 +633,16 @@ extern void forward_wait(slurm_msg_t * msg)
 		debug2("looking for %d", msg->forward_struct->fwd_cnt);
 		slurm_mutex_lock(&msg->forward_struct->forward_mutex);
 		count = 0;
-		if (msg->ret_list != NULL) {
-			count += list_count(msg->ret_list);
-		}
+		if (msg->ret_list != NULL) 
+			count = list_count(msg->ret_list);
+		
 		debug2("Got back %d", count);
 		while((count < msg->forward_struct->fwd_cnt)) {
 			pthread_cond_wait(&msg->forward_struct->notify, 
 					  &msg->forward_struct->forward_mutex);
-			count = 0;
+			
 			if (msg->ret_list != NULL) {
-				count += list_count(msg->ret_list);
+				count = list_count(msg->ret_list);
 			}
 			debug2("Got back %d", count);
 				
@@ -480,6 +670,8 @@ void destroy_forward(forward_t *forward)
 	if(forward->init == FORWARD_INIT) {
 		xfree(forward->nodelist);
 		forward->init = 0;
+	} else {
+		error("destroy_forward: no init");
 	}
 }
 
@@ -493,4 +685,3 @@ void destroy_forward_struct(forward_struct_t *forward_struct)
 		xfree(forward_struct);
 	}
 }
-
diff --git a/src/common/forward.h b/src/common/forward.h
index c7e92eee62792966d8f4cccef27c832625851104..da707a571232586094757453da153d3712c06bd8 100644
--- a/src/common/forward.h
+++ b/src/common/forward.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <auble1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -87,6 +88,21 @@ if (forward_msg(forward_struct, &header) == SLURM_ERROR) {
 extern int forward_msg(forward_struct_t *forward_struct, 
 		       header_t *header);
 
+
+/*
+ * start_msg_tree  - logic to begin the forward tree and
+ *                   accumulate the return codes from processes getting the
+ *                   the forwarded message
+ *
+ * IN: hl          - hostlist_t   - list of every node to send message to
+ * IN: msg         - slurm_msg_t  - message to send.
+ * IN: timeout     - int          - how long to wait in milliseconds.
+ * RET List 	   - List containing the responses of the childern
+ *		     (if any) we forwarded the message to. List
+ *		     containing type (ret_data_info_t).
+ */
+extern List start_msg_tree(hostlist_t hl, slurm_msg_t *msg, int timeout);
+
 /*
  * mark_as_failed_forward- mark a node as failed and add it to "ret_list"
  *
@@ -128,6 +144,5 @@ if(!ret_list || list_count(ret_list) == 0) {
 extern void destroy_data_info(void *object);
 extern void destroy_forward(forward_t *forward);
 extern void destroy_forward_struct(forward_struct_t *forward_struct);
-extern void destroy_ret_types(void *object);
 	
 #endif
diff --git a/src/common/hostlist.c b/src/common/hostlist.c
index 28467a3d7eb9128e1d97dfe100225db996eb1bd1..5cdb95fb199eb63fed0dbcaf2c058b74111dee2c 100644
--- a/src/common/hostlist.c
+++ b/src/common/hostlist.c
@@ -1,15 +1,16 @@
 /*****************************************************************************\
- *  $Id: hostlist.c 16088 2008-12-29 21:56:17Z jette $
+ *  $Id: hostlist.c 17248 2009-04-14 20:14:06Z da $
  *****************************************************************************
  *  $LSDId: hostlist.c,v 1.14 2003/10/14 20:11:54 grondo Exp $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -2550,7 +2551,7 @@ _get_boxes(char *buf, int max_len)
 static void
 _clear_grid(void)
 {
-	bzero(axis, sizeof(axis));
+	memset(axis, 0, sizeof(axis));
 
 	axis_min_x = HOSTLIST_BASE;
 	axis_min_y = HOSTLIST_BASE;
diff --git a/src/common/hostlist.h b/src/common/hostlist.h
index 4b4e5aee06a15b07712c289206a1610660eee92f..19399e3d6edec9691d3a65ed744a3631cee93bcb 100644
--- a/src/common/hostlist.h
+++ b/src/common/hostlist.h
@@ -1,15 +1,16 @@
 /*****************************************************************************\
- *  $Id: hostlist.h 15870 2008-12-08 16:14:18Z jette $
+ *  $Id: hostlist.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  $LSDId: hostlist.h,v 1.4 2003/09/19 21:37:34 grondo Exp $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/io_hdr.c b/src/common/io_hdr.c
index 3a782025fa727cb9c66aba18c8926ecb3db4acaf..920d4a2fad8e00ce6d6c83d8c153fcb894ee2ab9 100644
--- a/src/common/io_hdr.c
+++ b/src/common/io_hdr.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/common/io_hdr.c - IO connection header functions
- * $Id: io_hdr.c 13672 2008-03-19 23:10:58Z jette $
+ * $Id: io_hdr.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark A. Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/io_hdr.h b/src/common/io_hdr.h
index ec6f3d149ba176c0e66f675b15e4e102942249d9..0c8216476a9b4dac526620d620744efc8115478c 100644
--- a/src/common/io_hdr.h
+++ b/src/common/io_hdr.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/common/io_hdr.h - IO connection header functions
- * $Id: io_hdr.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: io_hdr.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark A. Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/job_options.c b/src/common/job_options.c
index 95ae5be10dc539b73c019e9dce2df87aec808077..5d8e468819db8bc4402d63992febf15809838ee6 100644
--- a/src/common/job_options.c
+++ b/src/common/job_options.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/job_options.h b/src/common/job_options.h
index 18a50a28c01c0d239ee6df595bfaf64b7a4a722c..09da3139a686249f1dfe643c14233ac14a9c257a 100644
--- a/src/common/job_options.h
+++ b/src/common/job_options.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/jobacct_common.c b/src/common/jobacct_common.c
index 45c131f03ce1468f07e5a8284255a41cc864d675..18dad291207e5876a3a33ab7a517938988a5cadd 100644
--- a/src/common/jobacct_common.c
+++ b/src/common/jobacct_common.c
@@ -4,10 +4,11 @@
  *
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *  Written by Danny Auble, <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -146,6 +147,7 @@ extern jobacct_job_rec_t *create_jobacct_job_rec()
 	job->steps = list_create(destroy_jobacct_step_rec);
 	job->requid = -1;
 	job->lft = (uint32_t)NO_VAL;
+	job->resvid = (uint32_t)NO_VAL;
 
       	return job;
 }
@@ -212,7 +214,58 @@ extern void pack_jobacct_job_rec(void *object, uint16_t rpc_version, Buf buffer)
 	jobacct_step_rec_t *step = NULL;
 	uint32_t count = 0;
 
-	if(rpc_version >= 4) {
+	if(rpc_version >= 5) {
+		pack32(job->alloc_cpus, buffer);
+		pack32(job->alloc_nodes, buffer);
+		pack32(job->associd, buffer);
+		packstr(job->account, buffer);
+		packstr(job->blockid, buffer);
+		packstr(job->cluster, buffer);
+		pack32(job->elapsed, buffer);
+		pack_time(job->eligible, buffer);
+		pack_time(job->end, buffer);
+		pack32(job->exitcode, buffer);
+		pack32(job->gid, buffer);
+		pack32(job->jobid, buffer);
+		packstr(job->jobname, buffer);
+		pack32(job->lft, buffer);
+		packstr(job->partition, buffer);
+		packstr(job->nodes, buffer);
+		pack32(job->priority, buffer);
+		pack16(job->qos, buffer);
+		pack32(job->resvid, buffer);
+		pack32(job->req_cpus, buffer);
+		pack32(job->requid, buffer);
+		_pack_sacct(&job->sacct, buffer);
+		pack32(job->show_full, buffer);
+		pack_time(job->start, buffer);
+		pack16((uint16_t)job->state, buffer);
+		if(job->steps)
+			count = list_count(job->steps);
+		pack32(count, buffer);
+		if(count) {
+			itr = list_iterator_create(job->steps);
+			while((step = list_next(itr))) {
+				pack_jobacct_step_rec(step, rpc_version,
+						      buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		pack_time(job->submit, buffer);
+		pack32(job->suspended, buffer);
+		pack32(job->sys_cpu_sec, buffer);
+		pack32(job->sys_cpu_usec, buffer);
+		pack32(job->timelimit, buffer);
+		pack32(job->tot_cpu_sec, buffer);
+		pack32(job->tot_cpu_usec, buffer);
+		pack16(job->track_steps, buffer);
+		pack32(job->uid, buffer);
+		packstr(job->user, buffer);
+		pack32(job->user_cpu_sec, buffer);
+		pack32(job->user_cpu_usec, buffer);
+		packstr(job->wckey, buffer); /* added for rpc_version 4 */
+		pack32(job->wckeyid, buffer); /* added for rpc_version 4 */
+	} else if(rpc_version >= 4) {
 		pack32(job->alloc_cpus, buffer);
 		pack32(job->associd, buffer);
 		packstr(job->account, buffer);
@@ -320,8 +373,9 @@ extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 
 	*job = job_ptr;
 
-	if(rpc_version >= 4) {
+	if(rpc_version >= 5) {
 		safe_unpack32(&job_ptr->alloc_cpus, buffer);
+		safe_unpack32(&job_ptr->alloc_nodes, buffer);
 		safe_unpack32(&job_ptr->associd, buffer);
 		safe_unpackstr_xmalloc(&job_ptr->account, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job_ptr->blockid, &uint32_tmp, buffer);
@@ -338,8 +392,62 @@ extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 		safe_unpackstr_xmalloc(&job_ptr->partition, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&job_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->priority, buffer);
+		safe_unpack16(&job_ptr->qos, buffer);
+		safe_unpack32(&job_ptr->resvid, buffer);
+		safe_unpack32(&job_ptr->req_cpus, buffer);
+		safe_unpack32(&job_ptr->requid, buffer);
+		_pack_sacct(&job_ptr->sacct, buffer);
+		safe_unpack32(&job_ptr->show_full, buffer);
+		safe_unpack_time(&job_ptr->start, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		job_ptr->state = uint16_tmp;
+		safe_unpack32(&count, buffer);
+
+		job_ptr->steps = list_create(destroy_jobacct_step_rec);
+		for(i=0; i<count; i++) {
+			unpack_jobacct_step_rec(&step, rpc_version, buffer);
+			if(step) {
+				step->job_ptr = job_ptr;
+				if(!job_ptr->first_step_ptr)
+					job_ptr->first_step_ptr = step;
+				list_append(job_ptr->steps, step);
+			}
+		}
+
+		safe_unpack_time(&job_ptr->submit, buffer);
+		safe_unpack32(&job_ptr->suspended, buffer);
+		safe_unpack32(&job_ptr->sys_cpu_sec, buffer);
+		safe_unpack32(&job_ptr->sys_cpu_usec, buffer);
+		safe_unpack32(&job_ptr->timelimit, buffer);
+		safe_unpack32(&job_ptr->tot_cpu_sec, buffer);
+		safe_unpack32(&job_ptr->tot_cpu_usec, buffer);
+		safe_unpack16(&job_ptr->track_steps, buffer);
+		safe_unpack32(&job_ptr->uid, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->user, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->user_cpu_sec, buffer);
+		safe_unpack32(&job_ptr->user_cpu_usec, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->wckey, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->wckeyid, buffer);
+	} else if(rpc_version >= 4) {
+		safe_unpack32(&job_ptr->alloc_cpus, buffer);
+		safe_unpack32(&job_ptr->associd, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->account, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->blockid, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->cluster, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->elapsed, buffer);
+		safe_unpack_time(&job_ptr->eligible, buffer);
+		safe_unpack_time(&job_ptr->end, buffer);
 		safe_unpack32(&uint32_tmp, buffer);
-		job_ptr->priority = (int32_t)uint32_tmp;
+		job_ptr->exitcode = (int32_t)uint32_tmp;
+		safe_unpack32(&job_ptr->gid, buffer);
+		safe_unpack32(&job_ptr->jobid, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->jobname, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->lft, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->partition, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&job_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->priority, buffer);
 		safe_unpack16(&job_ptr->qos, buffer);
 		safe_unpack32(&job_ptr->req_cpus, buffer);
 		safe_unpack32(&job_ptr->requid, buffer);
@@ -353,8 +461,12 @@ extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 		job_ptr->steps = list_create(destroy_jobacct_step_rec);
 		for(i=0; i<count; i++) {
 			unpack_jobacct_step_rec(&step, rpc_version, buffer);
-			if(step)
+			if(step) {
+				step->job_ptr = job_ptr;
+				if(!job_ptr->first_step_ptr)
+					job_ptr->first_step_ptr = step;
 				list_append(job_ptr->steps, step);
+			}
 		}
 
 		safe_unpack_time(&job_ptr->submit, buffer);
@@ -399,12 +511,15 @@ extern int unpack_jobacct_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 		safe_unpack16(&uint16_tmp, buffer);
 		job_ptr->state = uint16_tmp;
 		safe_unpack32(&count, buffer);
-
 		job_ptr->steps = list_create(destroy_jobacct_step_rec);
 		for(i=0; i<count; i++) {
 			unpack_jobacct_step_rec(&step, rpc_version, buffer);
-			if(step)
+			if(step) {
+				step->job_ptr = job_ptr;
+				if(!job_ptr->first_step_ptr)
+					job_ptr->first_step_ptr = step;
 				list_append(job_ptr->steps, step);
+			}
 		}
 
 		safe_unpack_time(&job_ptr->submit, buffer);
@@ -431,25 +546,51 @@ unpack_error:
 extern void pack_jobacct_step_rec(jobacct_step_rec_t *step, 
 				  uint16_t rpc_version, Buf buffer)
 {
-	pack32(step->elapsed, buffer);
-	pack_time(step->end, buffer);
-	pack32((uint32_t)step->exitcode, buffer);
-	pack32(step->jobid, buffer);
-	pack32(step->ncpus, buffer);
-        packstr(step->nodes, buffer);
-	pack32(step->requid, buffer);
-	_pack_sacct(&step->sacct, buffer);
-	pack_time(step->start, buffer);
-	pack16(step->state, buffer);
-	pack32(step->stepid, buffer);	/* job's step number */
-	packstr(step->stepname, buffer);
-	pack32(step->suspended, buffer);
-	pack32(step->sys_cpu_sec, buffer);
-	pack32(step->sys_cpu_usec, buffer);
-	pack32(step->tot_cpu_sec, buffer);
-	pack32(step->tot_cpu_usec, buffer);
-	pack32(step->user_cpu_sec, buffer);
-	pack32(step->user_cpu_usec, buffer);
+	uint32_t uint32_tmp = NO_VAL;
+
+	if(rpc_version >= 5) {
+		pack32(step->elapsed, buffer);
+		pack_time(step->end, buffer);
+		pack32((uint32_t)step->exitcode, buffer);
+		pack32(step->ncpus, buffer);
+		pack32(step->nnodes, buffer);
+		packstr(step->nodes, buffer);
+		pack32(step->ntasks, buffer);
+		pack32(step->requid, buffer);
+		_pack_sacct(&step->sacct, buffer);
+		pack_time(step->start, buffer);
+		pack16(step->state, buffer);
+		pack32(step->stepid, buffer);	/* job's step number */
+		packstr(step->stepname, buffer);
+		pack32(step->suspended, buffer);
+		pack32(step->sys_cpu_sec, buffer);
+		pack32(step->sys_cpu_usec, buffer);
+		pack16(step->task_dist, buffer);
+		pack32(step->tot_cpu_sec, buffer);
+		pack32(step->tot_cpu_usec, buffer);
+		pack32(step->user_cpu_sec, buffer);
+		pack32(step->user_cpu_usec, buffer);
+	} else {
+		pack32(step->elapsed, buffer);
+		pack_time(step->end, buffer);
+		pack32((uint32_t)step->exitcode, buffer);
+		pack32(uint32_tmp, buffer);
+		pack32(step->ncpus, buffer);
+		packstr(step->nodes, buffer);
+		pack32(step->requid, buffer);
+		_pack_sacct(&step->sacct, buffer);
+		pack_time(step->start, buffer);
+		pack16(step->state, buffer);
+		pack32(step->stepid, buffer);	/* job's step number */
+		packstr(step->stepname, buffer);
+		pack32(step->suspended, buffer);
+		pack32(step->sys_cpu_sec, buffer);
+		pack32(step->sys_cpu_usec, buffer);
+		pack32(step->tot_cpu_sec, buffer);
+		pack32(step->tot_cpu_usec, buffer);
+		pack32(step->user_cpu_sec, buffer);
+		pack32(step->user_cpu_usec, buffer);
+	}
 }
 
 extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step, 
@@ -461,28 +602,54 @@ extern int unpack_jobacct_step_rec(jobacct_step_rec_t **step,
 
 	*step = step_ptr;
 
-	safe_unpack32(&step_ptr->elapsed, buffer);
-	safe_unpack_time(&step_ptr->end, buffer);
-	safe_unpack32(&uint32_tmp, buffer);
-	step_ptr->exitcode = (int32_t)uint32_tmp;
-	safe_unpack32(&step_ptr->jobid, buffer);
-	safe_unpack32(&step_ptr->ncpus, buffer);
-        safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer);
-	safe_unpack32(&step_ptr->requid, buffer);
-	_unpack_sacct(&step_ptr->sacct, buffer);
-	safe_unpack_time(&step_ptr->start, buffer);
-	safe_unpack16(&uint16_tmp, buffer);
-	step_ptr->state = uint16_tmp;
-	safe_unpack32(&step_ptr->stepid, buffer);	/* job's step number */
-	safe_unpackstr_xmalloc(&step_ptr->stepname, &uint32_tmp, buffer);
-	safe_unpack32(&step_ptr->suspended, buffer);
-	safe_unpack32(&step_ptr->sys_cpu_sec, buffer);
-	safe_unpack32(&step_ptr->sys_cpu_usec, buffer);
-	safe_unpack32(&step_ptr->tot_cpu_sec, buffer);
-	safe_unpack32(&step_ptr->tot_cpu_usec, buffer);
-	safe_unpack32(&step_ptr->user_cpu_sec, buffer);
-	safe_unpack32(&step_ptr->user_cpu_usec, buffer);
-
+	if(rpc_version >= 5) {
+		safe_unpack32(&step_ptr->elapsed, buffer);
+		safe_unpack_time(&step_ptr->end, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		step_ptr->exitcode = (int32_t)uint32_tmp;
+		safe_unpack32(&step_ptr->ncpus, buffer);
+		safe_unpack32(&step_ptr->nnodes, buffer);
+		safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpack32(&step_ptr->ntasks, buffer);
+		safe_unpack32(&step_ptr->requid, buffer);
+		_unpack_sacct(&step_ptr->sacct, buffer);
+		safe_unpack_time(&step_ptr->start, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		step_ptr->state = uint16_tmp;
+		safe_unpack32(&step_ptr->stepid, buffer);
+		safe_unpackstr_xmalloc(&step_ptr->stepname,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&step_ptr->suspended, buffer);
+		safe_unpack32(&step_ptr->sys_cpu_sec, buffer);
+		safe_unpack32(&step_ptr->sys_cpu_usec, buffer);
+		safe_unpack16(&step_ptr->task_dist, buffer);
+		safe_unpack32(&step_ptr->tot_cpu_sec, buffer);
+		safe_unpack32(&step_ptr->tot_cpu_usec, buffer);
+		safe_unpack32(&step_ptr->user_cpu_sec, buffer);
+		safe_unpack32(&step_ptr->user_cpu_usec, buffer);
+	} else {
+		safe_unpack32(&step_ptr->elapsed, buffer);
+		safe_unpack_time(&step_ptr->end, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		step_ptr->exitcode = (int32_t)uint32_tmp;
+		safe_unpack32(&uint32_tmp, buffer);
+		safe_unpack32(&step_ptr->ncpus, buffer);
+		safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpack32(&step_ptr->requid, buffer);
+		_unpack_sacct(&step_ptr->sacct, buffer);
+		safe_unpack_time(&step_ptr->start, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		step_ptr->state = uint16_tmp;
+		safe_unpack32(&step_ptr->stepid, buffer);	/* job's step number */
+		safe_unpackstr_xmalloc(&step_ptr->stepname, &uint32_tmp, buffer);
+		safe_unpack32(&step_ptr->suspended, buffer);
+		safe_unpack32(&step_ptr->sys_cpu_sec, buffer);
+		safe_unpack32(&step_ptr->sys_cpu_usec, buffer);
+		safe_unpack32(&step_ptr->tot_cpu_sec, buffer);
+		safe_unpack32(&step_ptr->tot_cpu_usec, buffer);
+		safe_unpack32(&step_ptr->user_cpu_sec, buffer);
+		safe_unpack32(&step_ptr->user_cpu_usec, buffer);
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
diff --git a/src/common/jobacct_common.h b/src/common/jobacct_common.h
index 2eb69cdd31725b529f59630dfba9ff4ace0d3444..c8d38037c504fc253c4f46ad7148091e5ba964cf 100644
--- a/src/common/jobacct_common.h
+++ b/src/common/jobacct_common.h
@@ -4,10 +4,11 @@
  *
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *  Written by Danny Auble, <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -88,24 +89,30 @@ typedef struct {
 
 typedef struct {
 	uint32_t alloc_cpus;
-	uint32_t associd;
+	uint32_t alloc_nodes;
 	char    *account;
+	uint32_t associd;
 	char	*blockid;
 	char    *cluster;
 	uint32_t elapsed;
 	time_t eligible;
 	time_t end;
 	int32_t	exitcode;
+	void *first_step_ptr; /* this pointer to a jobacct_step_rec_t 
+				 is set up on the
+				 client side so does not need to
+				 be packed */
 	uint32_t gid;
 	uint32_t jobid;
 	char	*jobname;
 	uint32_t lft;
 	char	*partition;
 	char	*nodes;
-	int32_t priority;
+	uint32_t priority;
 	uint16_t qos;
 	uint32_t req_cpus;
 	uint32_t requid;
+	uint32_t resvid;
 	sacct_t sacct;
 	uint32_t show_full;
 	time_t start;
@@ -115,6 +122,7 @@ typedef struct {
 	uint32_t suspended;
 	uint32_t sys_cpu_sec;
 	uint32_t sys_cpu_usec;
+	uint32_t timelimit;
 	uint32_t tot_cpu_sec;
 	uint32_t tot_cpu_usec;
 	uint16_t track_steps;
@@ -127,25 +135,16 @@ typedef struct {
 } jobacct_job_rec_t;
 
 typedef struct {
-	char    *account; /* This is a pointer to the account var inside
-			   * the jobacct_job_rec_t that contains this
-			   * step.  It is to be used only in the
-			   * client.  This should not be freed, packed
-			   * or unpacked
-			   */
-	uint32_t associd;
-	char    *cluster; /* This is a pointer to the cluster var inside
-			   * the jobacct_job_rec_t that contains this
-			   * step.  It is to be used only in the
-			   * client.  This should not be freed, packed
-			   * or unpacked
-			   */
 	uint32_t elapsed;
 	time_t end;
 	int32_t exitcode;
-	uint32_t jobid;
+	jobacct_job_rec_t *job_ptr; /* this pointer is set up on the
+				       client side so does not need to
+				       be packed */
 	uint32_t ncpus;
+	uint32_t nnodes;
 	char *nodes;
+	uint32_t ntasks;
 	uint32_t requid;
 	sacct_t sacct;
 	time_t start;
@@ -155,6 +154,7 @@ typedef struct {
 	uint32_t suspended;
 	uint32_t sys_cpu_sec;
 	uint32_t sys_cpu_usec;
+	uint16_t task_dist;
 	uint32_t tot_cpu_sec;
 	uint32_t tot_cpu_usec;
 	uint32_t user_cpu_sec;
diff --git a/src/common/log.c b/src/common/log.c
index 5c0f0719d8fd793ef8a91d553f0398f510272f14..5c1fc315e4b411061d8c2032d2c553bd45d020be 100644
--- a/src/common/log.c
+++ b/src/common/log.c
@@ -1,11 +1,11 @@
 /*****************************************************************************\
  *  log.c - slurm logging facilities
- *  $Id: log.c 15367 2008-10-09 20:51:36Z da $
+ *  $Id: log.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  Much of this code was derived or adapted from the log.c component of 
  *  openssh which contains the following notices:
@@ -400,9 +400,15 @@ static char *vxstrfmt(const char *fmt, va_list ap)
 			case 'T': 	/* "%T" => "dd Mon yyyy hh:mm:ss off" */
 				xstrftimecat(buf, "%a %d %b %Y %H:%M:%S %z");   
 				break;
+#ifdef USE_ISO_8601
+			case 'M':       /* "%M" => "yyyy-mm-ddThh:mm:ss"          */
+				xstrftimecat(buf, "%Y-%m-%dT%T");
+				break;
+#else
 			case 'M':       /* "%M" => "Mon DD hh:mm:ss"          */
 				xstrftimecat(buf, "%b %d %T");
 				break;
+#endif
 			case 's':	/* "%s" => append string */
 				/* we deal with this case for efficiency */
 				if (unprocessed == 0) 
diff --git a/src/common/log.h b/src/common/log.h
index e159a9b76627e9393cb0b0dce24d6cab2040b39b..99d936f915c03bc5455bbe222911a88bb4ae8bf2 100644
--- a/src/common/log.h
+++ b/src/common/log.h
@@ -4,7 +4,7 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  Much of this code was derived or adapted from the log.c component of 
  *  openssh which contains the following notices:
diff --git a/src/common/macros.h b/src/common/macros.h
index a2492f767f12f65aafacef6403efb6b4ff6d9ac3..7784ae6d3682b366ded43c5288effccc7cbb1e48 100644
--- a/src/common/macros.h
+++ b/src/common/macros.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/common/macros.h - some standard macros for slurm
- * $Id: macros.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: macros.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/mpi.c b/src/common/mpi.c
index 96eb18cdf908d6ef2d5b71b7127c5286c519f3e1..eeea9fb396ca0a16cabadcd6ef05bb815e8ea2db 100644
--- a/src/common/mpi.c
+++ b/src/common/mpi.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondo1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/mpi.h b/src/common/mpi.h
index 75255181a4e1a6a2d137d08e9fbdca95cb9ebe11..2fa2e2b27c92acda2da6cfff862aa775fc9ec3e1 100644
--- a/src/common/mpi.h
+++ b/src/common/mpi.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondo1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/net.c b/src/common/net.c
index 5fb33c058f8c3e0cc0b5c407ee0561f537d8de15..a65b9831b8a0bd1d380ea16382ff2b3195a41da7 100644
--- a/src/common/net.c
+++ b/src/common/net.c
@@ -5,10 +5,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, Kevin Tew <tew1@llnl.gov>, 
  *  et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/net.h b/src/common/net.h
index 7b5e8548bef736526333be177cdbafb63630cb10..ef19ce25620ff5af467aeb28ff5f8295a465ec33 100644
--- a/src/common/net.h
+++ b/src/common/net.h
@@ -5,10 +5,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, Kevin Tew <tew1@llnl.gov>,
  *  et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/node_select.c b/src/common/node_select.c
index ebf6e78a7c8d01d37bfde664c29852fe97410fc5..2e7aa8d415a9601325e8d89585feb3a2872db797 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -8,16 +8,16 @@
  *  front-end nodes, the functions they require are here rather than within 
  *  the plugin. This is because functions required by the plugin can not be 
  *  resolved on the front-end nodes, so we can't load the plugins there.
- *
- *  $Id: node_select.c 17005 2009-03-24 21:57:43Z da $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -88,14 +88,8 @@ typedef struct slurm_select_ops {
 	int		(*job_fini)	       (struct job_record *job_ptr);
 	int		(*job_suspend)	       (struct job_record *job_ptr);
 	int		(*job_resume)	       (struct job_record *job_ptr);
-	int		(*get_job_cores)       (uint32_t job_id,
-						int alloc_index, int s);
 	int		(*pack_node_info)      (time_t last_query_time,
 						Buf *buffer_ptr);
-        int             (*get_extra_jobinfo)   (struct node_record *node_ptr,
-						struct job_record *job_ptr,
-						enum select_data_info cr_info,
-						void *data);
         int             (*get_select_nodeinfo) (struct node_record *node_ptr,
 						enum select_data_info cr_info, 
 						void *data);
@@ -105,13 +99,13 @@ typedef struct slurm_select_ops {
         int             (*update_sub_node)     (update_part_msg_t
 						*part_desc_ptr);
 	int             (*get_info_from_plugin)(enum select_data_info cr_info,
+						struct job_record *job_ptr,
 						void *data);
 	int             (*update_node_state)   (int index, uint16_t state);
 	int             (*alter_node_cnt)      (enum select_node_cnt type,
 						void *data);
 	int		(*reconfigure)         (void);
-	int		(*step_begin)          (struct step_record *step_ptr);
-	int		(*step_fini)           (struct step_record *step_ptr);
+	List		(*get_config)          (void);
 } slurm_select_ops_t;
 
 typedef struct slurm_select_context {
@@ -127,7 +121,9 @@ static pthread_mutex_t		g_select_context_lock =
 					PTHREAD_MUTEX_INITIALIZER;
 
 #ifdef HAVE_BG			/* node selection specific logic */
+
 #  define JOBINFO_MAGIC 0x83ac
+
 struct select_jobinfo {
 	uint16_t start[SYSTEM_DIMENSIONS];	/* start position of block
 						 *  e.g. XYZ */
@@ -150,7 +146,16 @@ struct select_jobinfo {
 	char *mloaderimage;     /* mloaderImage for this block */
 	char *ramdiskimage;     /* RamDiskImage for this block */
 };
-#endif
+
+#endif	/* HAVE_BG */
+
+#ifdef HAVE_CRAY_XT		/* node selection specific logic */
+#  define JOBINFO_MAGIC 0x8cb3
+struct select_jobinfo {
+	uint16_t magic;		/* magic number */
+	char *reservation_id;	/* BASIL reservation ID */
+};
+#endif	/* HAVE_CRAY_XT */
 
 /*
  * Local functions
@@ -180,9 +185,7 @@ static slurm_select_ops_t * _select_get_ops(slurm_select_context_t *c)
 		"select_p_job_fini",
 		"select_p_job_suspend",
 		"select_p_job_resume",
-		"select_p_get_job_cores",
 		"select_p_pack_node_info",
-                "select_p_get_extra_jobinfo",
                 "select_p_get_select_nodeinfo",
                 "select_p_update_nodeinfo",
 		"select_p_update_block",
@@ -191,8 +194,7 @@ static slurm_select_ops_t * _select_get_ops(slurm_select_context_t *c)
 		"select_p_update_node_state",
 		"select_p_alter_node_cnt",
 		"select_p_reconfigure",
-		"select_p_step_begin",
-		"select_p_step_fini",
+		"select_p_get_config"
 	};
 	int n_syms = sizeof( syms ) / sizeof( char * );
 
@@ -396,28 +398,6 @@ extern int select_g_block_init(List block_list)
 
 	return (*(g_select_context->ops.block_init))(block_list);
 }
- 
-/* 
- * Get selected data from a given node for a specific job. 
- * IN node_ptr  - current node record
- * IN job_ptr   - current job record
- * IN cr_info   - type of data to get from the node record 
- *                (see enum select_data_info)
- * IN/OUT data  - the data to get from node record
- */
-extern int select_g_get_extra_jobinfo (struct node_record *node_ptr, 
-				       struct job_record *job_ptr, 
-                                       enum select_data_info cr_info,
-                                       void *data)
-{
-       if (slurm_select_init() < 0)
-               return SLURM_ERROR;
-
-       return (*(g_select_context->ops.get_extra_jobinfo))(node_ptr, 
-							   job_ptr, 
-							   cr_info, 
-							   data);
-}
 
 /* 
  * Get select data from a specific node record
@@ -484,12 +464,14 @@ extern int select_g_update_sub_node (update_part_msg_t *part_desc_ptr)
  * IN/OUT data  - the data to get from node record
  */
 extern int select_g_get_info_from_plugin (enum select_data_info cr_info, 
+					  struct job_record *job_ptr,
 					  void *data)
 {
        if (slurm_select_init() < 0)
                return SLURM_ERROR;
 
-       return (*(g_select_context->ops.get_info_from_plugin))(cr_info, data);
+       return (*(g_select_context->ops.get_info_from_plugin))(cr_info, job_ptr,
+       								data);
 }
 
 /* 
@@ -535,6 +517,17 @@ extern int select_g_reconfigure (void)
 	return (*(g_select_context->ops.reconfigure))();
 }
 
+/* 
+ * Get configuration specific for this plugin.
+ */
+extern List select_g_get_config(void)
+{
+	if (slurm_select_init() < 0)
+		return NULL;
+
+	return (*(g_select_context->ops.get_config))();
+}
+
 /*
  * Select the "best" nodes for given job from those available
  * IN/OUT job_ptr - pointer to job being considered for initiation,
@@ -646,21 +639,6 @@ extern int select_g_job_resume(struct job_record *job_ptr)
 	return (*(g_select_context->ops.job_resume))(job_ptr);
 }
 
-/*
- * Get job core info. Executed from sched/gang.
- * IN job_id      - id of job from which to obtain data
- * IN alloc_index - allocated node index
- * IN s           - socket index
- * RET number of allocated cores on the given socket from the given node
- */
-extern int select_g_get_job_cores(uint32_t job_id, int alloc_index, int s)
-{
-	if (slurm_select_init() < 0)
-		return 0;
-
-	return (*(g_select_context->ops.get_job_cores))(job_id, alloc_index, s);
-}
-
 extern int select_g_pack_node_info(time_t last_query_time, Buf *buffer)
 {
 	if (slurm_select_init() < 0)
@@ -670,40 +648,18 @@ extern int select_g_pack_node_info(time_t last_query_time, Buf *buffer)
 		(last_query_time, buffer);
 }
 
-/* Prepare to start a job step, allocate memory as needed
- * RET - slurm error code
- */
-extern int select_g_step_begin(struct step_record *step_ptr)
-{
-	if (slurm_select_init() < 0)
-		return SLURM_ERROR;
-
-	return (*(g_select_context->ops.step_begin))(step_ptr);
-}
-
-/* Prepare to terminate a job step, release memory as needed
- * RET - slurm error code
- */
-extern int select_g_step_fini(struct step_record *step_ptr)
-{
-	if (slurm_select_init() < 0)
-		return SLURM_ERROR;
-
-	return (*(g_select_context->ops.step_fini))(step_ptr);
-}
-
 #ifdef HAVE_BG		/* node selection specific logic */
 static void _free_node_info(bg_info_record_t *bg_info_record)
 {
-	xfree(bg_info_record->nodes);
-	xfree(bg_info_record->ionodes);
-	xfree(bg_info_record->owner_name);
 	xfree(bg_info_record->bg_block_id);
+	xfree(bg_info_record->blrtsimage);
 	xfree(bg_info_record->bp_inx);
+	xfree(bg_info_record->ionodes);
 	xfree(bg_info_record->ionode_inx);
-	xfree(bg_info_record->blrtsimage);
 	xfree(bg_info_record->linuximage);
 	xfree(bg_info_record->mloaderimage);
+	xfree(bg_info_record->nodes);
+	xfree(bg_info_record->owner_name);
 	xfree(bg_info_record->ramdiskimage);
 }
 
@@ -731,13 +687,11 @@ static int _unpack_node_info(bg_info_record_t *bg_info_record, Buf buffer)
 #ifdef HAVE_BGL
 	safe_unpack16(&uint16_tmp, buffer);
 	bg_info_record->node_use = (int) uint16_tmp;
-	safe_unpack16(&uint16_tmp, buffer);
-	bg_info_record->quarter = (int) uint16_tmp;
-	safe_unpack16(&uint16_tmp, buffer);
-	bg_info_record->nodecard = (int) uint16_tmp;
 #endif
 	safe_unpack32(&uint32_tmp, buffer);
 	bg_info_record->node_cnt = (int) uint32_tmp;
+	safe_unpack32(&uint32_tmp, buffer);
+	bg_info_record->job_running = (int) uint32_tmp;
 	safe_unpackstr_xmalloc(&bp_inx_str, &uint32_tmp, buffer);
 	if (bp_inx_str == NULL) {
 		bg_info_record->bp_inx = bitfmt2int("");
@@ -830,16 +784,10 @@ extern int select_g_alloc_jobinfo (select_jobinfo_t *jobinfo)
 	(*jobinfo)->conn_type = SELECT_NAV;
 	(*jobinfo)->reboot = (uint16_t) NO_VAL;
 	(*jobinfo)->rotate = (uint16_t) NO_VAL;
-	(*jobinfo)->bg_block_id = NULL;
 	(*jobinfo)->magic = JOBINFO_MAGIC;
-	(*jobinfo)->nodes = NULL;
-	(*jobinfo)->ionodes = NULL;
 	(*jobinfo)->node_cnt = NO_VAL;
 	(*jobinfo)->max_procs =  NO_VAL;
-	(*jobinfo)->blrtsimage = NULL;
-	(*jobinfo)->linuximage = NULL;
-	(*jobinfo)->mloaderimage = NULL;
-	(*jobinfo)->ramdiskimage = NULL;
+	/* Remainder of structure is already NULL fulled */
 
 	return SLURM_SUCCESS;
 }
@@ -1036,8 +984,8 @@ extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
 			*tmp_char = xstrdup(jobinfo->ramdiskimage);
 		break;
 	default:
-		debug("select_g_get_jobinfo data_type %d invalid", 
-		      data_type);
+		debug2("select_g_get_jobinfo data_type %d invalid", 
+		       data_type);
 	}
 
 	return rc;
@@ -1186,7 +1134,7 @@ extern int  select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer)
 	safe_unpack32(&(jobinfo->max_procs), buffer);
 
 	safe_unpackstr_xmalloc(&(jobinfo->bg_block_id),  &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&(jobinfo->nodes),      &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&(jobinfo->nodes),        &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&(jobinfo->ionodes),      &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&(jobinfo->blrtsimage),   &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&(jobinfo->linuximage),   &uint32_tmp, buffer);
@@ -1546,8 +1494,8 @@ extern char *select_g_xstrdup_jobinfo(select_jobinfo_t jobinfo, int mode)
 }
 
 /* Unpack node select info from a buffer */
-extern int select_g_unpack_node_info(node_select_info_msg_t **
-		node_select_info_msg_pptr, Buf buffer)
+extern int select_g_unpack_node_info(
+	node_select_info_msg_t **node_select_info_msg_pptr, Buf buffer)
 {
 	int i, record_count = 0;
 	node_select_info_msg_t *buf;
@@ -1595,6 +1543,313 @@ extern int select_g_free_node_info(node_select_info_msg_t **
 
 #else	/* !HAVE_BG */
 
+#ifdef HAVE_CRAY_XT
+
+/* allocate storage for a select job credential
+ * OUT jobinfo - storage for a select job credential
+ * RET         - slurm error code
+ * NOTE: storage must be freed using select_g_free_jobinfo
+ */
+extern int select_g_alloc_jobinfo (select_jobinfo_t *jobinfo)
+{
+	xassert(jobinfo != NULL);
+	
+	*jobinfo = xmalloc(sizeof(struct select_jobinfo));
+	(*jobinfo)->magic = JOBINFO_MAGIC;
+
+	return SLURM_SUCCESS;
+}
+
+/* fill in a previously allocated select job credential
+ * IN/OUT jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * IN data - the data to enter into job credential
+ */
+extern int select_g_set_jobinfo (select_jobinfo_t jobinfo,
+		enum select_data_type data_type, void *data)
+{
+	int rc = SLURM_SUCCESS;
+	char *tmp_char = (char *) data;
+
+	if (jobinfo == NULL) {
+		error("select_g_set_jobinfo: jobinfo not set");
+		return SLURM_ERROR;
+	}
+	if (jobinfo->magic != JOBINFO_MAGIC) {
+		error("select_g_set_jobinfo: jobinfo magic bad");
+		return SLURM_ERROR;
+	}
+
+	switch (data_type) {
+	case SELECT_DATA_RESV_ID:
+		/* we xfree() any preset value to avoid a memory leak */
+		xfree(jobinfo->reservation_id);
+		if (tmp_char)
+			jobinfo->reservation_id = xstrdup(tmp_char);
+		break;
+	default:
+		debug("select_g_set_jobinfo data_type %d invalid", 
+		      data_type);
+	}
+
+	return rc;
+}
+
+/* get data from a select job credential
+ * IN jobinfo  - updated select job credential
+ * IN data_type - type of data to enter into job credential
+ * OUT data - the data to get from job credential, caller must xfree 
+ *	data for data_tyep == SELECT_DATA_BLOCK_ID 
+ */
+extern int select_g_get_jobinfo (select_jobinfo_t jobinfo,
+		enum select_data_type data_type, void *data)
+{
+	int rc = SLURM_SUCCESS;
+	char **tmp_char = (char **) data;
+
+	if (jobinfo == NULL) {
+		error("select_g_get_jobinfo: jobinfo not set");
+		return SLURM_ERROR;
+	}
+	if (jobinfo->magic != JOBINFO_MAGIC) {
+		error("select_g_get_jobinfo: jobinfo magic bad");
+		return SLURM_ERROR;
+	}
+
+	switch (data_type) {
+	case SELECT_DATA_RESV_ID:
+		if ((jobinfo->reservation_id == NULL) ||
+		    (jobinfo->reservation_id[0] == '\0'))
+			*tmp_char = NULL;
+		else
+			*tmp_char = xstrdup(jobinfo->reservation_id);
+		break;
+	default:
+		/* There is some use of BlueGene specific params that 
+		 * are not supported on the Cray, but requested on
+		 * all systems */
+		debug2("select_g_get_jobinfo data_type %d invalid", 
+		       data_type);
+		return SLURM_ERROR;
+	}
+
+	return rc;
+}
+
+/* copy a select job credential
+ * IN jobinfo - the select job credential to be copied
+ * RET        - the copy or NULL on failure
+ * NOTE: returned value must be freed using select_g_free_jobinfo
+ */
+extern select_jobinfo_t select_g_copy_jobinfo(select_jobinfo_t jobinfo)
+{
+	struct select_jobinfo *rc = NULL;
+		
+	if (jobinfo == NULL)
+		;
+	else if (jobinfo->magic != JOBINFO_MAGIC)
+		error("select_g_copy_jobinfo: jobinfo magic bad");
+	else {
+		rc = xmalloc(sizeof(struct select_jobinfo));
+		rc->magic = JOBINFO_MAGIC;
+		rc->reservation_id = xstrdup(jobinfo->reservation_id);
+	}
+
+	return rc;
+}
+
+/* free storage previously allocated for a select job credential
+ * IN jobinfo  - the select job credential to be freed
+ */
+extern int select_g_free_jobinfo  (select_jobinfo_t *jobinfo)
+{
+	int rc = SLURM_SUCCESS;
+
+	xassert(jobinfo != NULL);
+	if (*jobinfo == NULL)	/* never set, treat as not an error */
+		;
+	else if ((*jobinfo)->magic != JOBINFO_MAGIC) {
+		error("select_g_free_jobinfo: jobinfo magic bad");
+		rc = EINVAL;
+	} else {
+		(*jobinfo)->magic = 0;
+		xfree((*jobinfo)->reservation_id);
+		xfree(*jobinfo);
+	}
+	return rc;
+}
+
+/* pack a select job credential into a buffer in machine independent form
+ * IN jobinfo  - the select job credential to be saved
+ * OUT buffer  - buffer with select credential appended
+ * RET         - slurm error code
+ */
+extern int  select_g_pack_jobinfo  (select_jobinfo_t jobinfo, Buf buffer)
+{
+	if (jobinfo) {
+		/* NOTE: If new elements are added here, make sure to 
+		 * add equivalant pack of zeros below for NULL pointer */
+		packstr(jobinfo->reservation_id, buffer);
+	} else {
+		packnull(buffer); //reservation_id
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/* unpack a select job credential from a buffer
+ * OUT jobinfo - the select job credential read
+ * IN  buffer  - buffer with select credential read from current pointer loc
+ * RET         - slurm error code
+ * NOTE: returned value must be freed using select_g_free_jobinfo
+ */
+extern int  select_g_unpack_jobinfo(select_jobinfo_t jobinfo, Buf buffer)
+{
+	uint32_t uint32_tmp;
+
+	safe_unpackstr_xmalloc(&(jobinfo->reservation_id),  &uint32_tmp, buffer);
+
+	return SLURM_SUCCESS;
+
+      unpack_error:
+	return SLURM_ERROR;
+}
+
+/* write select job credential to a string
+ * IN jobinfo - a select job credential
+ * OUT buf    - location to write job credential contents
+ * IN size    - byte size of buf
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - the string, same as buf
+ */
+extern char *select_g_sprint_jobinfo(select_jobinfo_t jobinfo,
+				     char *buf, size_t size, int mode)
+{
+		
+	if (buf == NULL) {
+		error("select_g_sprint_jobinfo: buf is null");
+		return NULL;
+	}
+
+	if ((mode != SELECT_PRINT_DATA) &&
+	    jobinfo && (jobinfo->magic != JOBINFO_MAGIC)) {
+		error("select_g_sprint_jobinfo: jobinfo magic bad");
+		return NULL;
+	}
+
+	if (jobinfo == NULL) {
+		if (mode != SELECT_PRINT_HEAD) {
+			error("select_g_sprint_jobinfo: jobinfo bad");
+			return NULL;
+		}
+	}
+
+	switch (mode) {
+	case SELECT_PRINT_HEAD:
+		snprintf(buf, size,
+			 "RESV_ID");
+		break;
+	case SELECT_PRINT_DATA:
+		snprintf(buf, size, 
+			 "%7s",
+			 jobinfo->reservation_id);
+		break;
+	case SELECT_PRINT_MIXED:
+		snprintf(buf, size, 
+			 "Resv_ID=%s",
+			 jobinfo->reservation_id);
+		break;
+	case SELECT_PRINT_RESV_ID:
+		snprintf(buf, size, "%s", jobinfo->reservation_id);
+		break;	
+	default:
+		/* likely a BlueGene specific mode */
+		error("select_g_sprint_jobinfo: bad mode %d", mode);
+		if (size > 0)
+			buf[0] = '\0';
+	}
+	
+	return buf;
+}
+
+/* write select job info to a string
+ * IN jobinfo - a select job credential
+ * IN mode    - print mode, see enum select_print_mode
+ * RET        - char * containing string of request
+ */
+extern char *select_g_xstrdup_jobinfo(select_jobinfo_t jobinfo, int mode)
+{
+	char *buf = NULL;
+		
+	if ((mode != SELECT_PRINT_DATA) &&
+	    jobinfo && (jobinfo->magic != JOBINFO_MAGIC)) {
+		error("select_g_xstrdup_jobinfo: jobinfo magic bad");
+		return NULL;
+	}
+
+	if (jobinfo == NULL) {
+		if (mode != SELECT_PRINT_HEAD) {
+			error("select_g_xstrdup_jobinfo: jobinfo bad");
+			return NULL;
+		}
+	}
+
+	switch (mode) {
+	case SELECT_PRINT_HEAD:
+		xstrcat(buf, 
+			"RESV_ID");
+		break;
+	case SELECT_PRINT_DATA:
+		xstrfmtcat(buf, 
+			   "%7s",
+			   jobinfo->reservation_id);
+		break;
+	case SELECT_PRINT_MIXED:
+		xstrfmtcat(buf, 
+			   "Resv_ID=%s",
+			   jobinfo->reservation_id);
+		break;
+	case SELECT_PRINT_RESV_ID:
+		xstrfmtcat(buf, "%s", jobinfo->reservation_id);
+		break;
+	default:
+		error("select_g_xstrdup_jobinfo: bad mode %d", mode);
+	}
+	
+	return buf;
+}
+
+/* Unpack node select info from a buffer */
+extern int select_g_unpack_node_info(node_select_info_msg_t **
+		node_select_info_msg_pptr, Buf buffer)
+{
+	return SLURM_ERROR;
+}
+
+/* Free a node select information buffer */
+extern int select_g_free_node_info(node_select_info_msg_t **
+		node_select_info_msg_pptr)
+{
+	return SLURM_ERROR;
+}
+
+extern void select_g_print_config(List config_list)
+{
+	ListIterator iter = NULL;
+	config_key_pair_t *key_pair;
+
+	if (!config_list)
+		return;
+	
+	printf("\nCRAY XT configuration:\n");
+	iter = list_iterator_create(config_list);
+	while((key_pair = list_next(iter))) {
+		printf("%-22s = %s\n", key_pair->name, key_pair->value);
+	}
+	list_iterator_destroy(iter);
+}
+
+#else	/* !HAVE_CRAY_XT */
 /* allocate storage for a select job credential
  * OUT jobinfo - storage for a select job credential
  * RET         - slurm error code
@@ -1704,4 +1959,21 @@ extern int select_g_free_node_info(node_select_info_msg_t **
 	return SLURM_ERROR;
 }
 
-#endif
+extern void select_g_print_config(List config_list)
+{
+	ListIterator iter = NULL;
+	config_key_pair_t *key_pair;
+
+	if (!config_list)
+		return;
+	
+	printf("\nSelect configuration:\n");
+	iter = list_iterator_create(config_list);
+	while((key_pair = list_next(iter))) {
+		printf("%-22s = %s\n", key_pair->name, key_pair->value);
+	}
+	list_iterator_destroy(iter);
+}
+
+#endif	/* HAVE_CRAY_XT */
+#endif	/* HAVE_BG */
diff --git a/src/common/node_select.h b/src/common/node_select.h
index 7c0d957aeb177e64bbf8ba53ade299e47fde1c6b..60e2a9a98da02629c9e59b98621430fb803e645b 100644
--- a/src/common/node_select.h
+++ b/src/common/node_select.h
@@ -1,15 +1,15 @@
 /*****************************************************************************\
  *  node_select.h - Define node selection plugin functions.
- *
- * $Id: node_select.h 15324 2008-10-07 00:16:53Z da $
  *****************************************************************************
- *  Copyright (C) 2004-2006 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -128,9 +128,11 @@ extern int select_g_update_sub_node (update_part_msg_t *part_desc_ptr);
  * IN node_pts  - current node record
  * IN cr_info   - type of data to get from the node record 
  *                (see enum select_data_info)
+ * IN job_ptr   - pointer to the job that's related to this query (may be NULL)
  * IN/OUT data  - the data to get from node record
  */
 extern int select_g_get_info_from_plugin (enum select_data_info cr_info, 
+					  struct job_record *job_ptr,
 					  void *data);
 
 /* 
@@ -238,14 +240,6 @@ extern int select_g_job_suspend(struct job_record *job_ptr);
  */
 extern int select_g_job_resume(struct job_record *job_ptr);
 
-/*
- * Get number of allocated cores per socket from a job
- * IN job_id      - identifies the job
- * IN alloc_index - allocated node index
- * IN s           - socket index
- */
-extern int select_g_get_job_cores(uint32_t job_id, int alloc_index, int s);
-
 /* allocate storage for a select job credential
  * OUT jobinfo - storage for a select job credential
  * RET         - slurm error code
@@ -282,18 +276,6 @@ extern select_jobinfo_t select_g_copy_jobinfo(select_jobinfo_t jobinfo);
  * RET         - slurm error code
  */
 extern int select_g_free_jobinfo  (select_jobinfo_t *jobinfo);
- 
-/* 
- * Get selected data from a given node for a specific job. 
- * IN node_ptr  - current node record
- * IN job_ptr   - current job record
- * IN cr_info   - type of data to get from the node record
- * IN/OUT data  - the data to get from node record
- */
-extern int select_g_get_extra_jobinfo (struct node_record *node_ptr, 
-				       struct job_record *job_ptr, 
-                                       enum select_data_info cr_info,
-                                       void *data);
 
 /* pack a select job credential into a buffer in machine independent form
  * IN jobinfo  - the select job credential to be saved
@@ -361,4 +343,7 @@ extern int select_g_free_node_info(node_select_info_msg_t **
 /* Note reconfiguration or change in partition configuration */
 extern int select_g_reconfigure(void);
 
+/*  Get configuration specific for this plugin */
+extern List select_g_get_config(void);
+
 #endif /*__SELECT_PLUGIN_API_H__*/
diff --git a/src/common/optz.c b/src/common/optz.c
index ec9d67d84bcfd684fc229dfc7d1f899197b981f4..61d562d4f009987fc545901866184261da4bd067 100644
--- a/src/common/optz.c
+++ b/src/common/optz.c
@@ -3,10 +3,11 @@
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/optz.h b/src/common/optz.h
index 07cf1b257546841b2440b9fe5ea446df6627e85b..5eedcc07baa961cdd10d3ae83979bbf1778955b6 100644
--- a/src/common/optz.h
+++ b/src/common/optz.h
@@ -3,10 +3,11 @@
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/pack.c b/src/common/pack.c
index 744c04b88f8483de4a41b9c36ed61d2d24fa585a..014a899b85af6b9cfdc993a0916a1b48f4209883 100644
--- a/src/common/pack.c
+++ b/src/common/pack.c
@@ -7,10 +7,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jim Garlick <garlick@llnl.gov>, 
  *             Morris Jette <jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -137,7 +138,8 @@ Buf init_buf(int size)
 		error("init_buf: buffer size too large");
 		return NULL;
 	}
-
+	if(size <= 0)
+		size = BUF_SIZE;
 	my_buf = xmalloc(sizeof(struct slurm_buf));
 	my_buf->magic = BUF_MAGIC;
 	my_buf->size = size;
@@ -193,6 +195,55 @@ int unpack_time(time_t * valp, Buf buffer)
 }
 
 
+/*
+ * Given a double, multiple by FLOAT_MULT and then
+ * typecast to a uint64_t in host byte order, convert to network byte order
+ * store in buffer, and adjust buffer counters.
+ */
+void 	packdouble(double val, Buf buffer)
+{
+	double nl1 =  (val * FLOAT_MULT) + .5; /* the .5 is here to
+						  round off.  We have
+						  found on systems
+						  going out more than
+						  15 decimals will
+						  mess things up so
+						  this is here to
+						  correct it. */
+	uint64_t nl =  HTON_uint64(nl1);
+	
+	if (remaining_buf(buffer) < sizeof(nl)) {
+		if (buffer->size > (MAX_BUF_SIZE - BUF_SIZE)) {
+			error("pack64: buffer size too large");
+			return;
+		}
+		buffer->size += BUF_SIZE;
+		xrealloc(buffer->head, buffer->size);
+	}
+
+	memcpy(&buffer->head[buffer->processed], &nl, sizeof(nl));
+	buffer->processed += sizeof(nl);
+
+}
+
+/*
+ * Given a buffer containing a network byte order 64-bit integer,
+ * typecast as double, and  divide by FLOAT_MULT 
+ * store a host double at 'valp', and adjust buffer counters.
+ */
+int	unpackdouble(double *valp, Buf buffer)
+{
+	uint64_t nl;
+	if (remaining_buf(buffer) < sizeof(nl))
+		return SLURM_ERROR;
+	
+	memcpy(&nl, &buffer->head[buffer->processed], sizeof(nl));
+
+	*valp = (double)NTOH_uint64(nl) / (double)FLOAT_MULT;
+	buffer->processed += sizeof(nl);
+	return SLURM_SUCCESS;
+}
+
 /*
  * Given a 64-bit integer in host byte order, convert to network byte order
  * store in buffer, and adjust buffer counters.
diff --git a/src/common/pack.h b/src/common/pack.h
index 78531c7b7f43ad2a25c336f61b2fa412e0cc62ca..a3980de6e2294abe852e318f18dc59924e070016 100644
--- a/src/common/pack.h
+++ b/src/common/pack.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, Morris Jette <jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -60,6 +61,7 @@
 #define BUF_MAGIC 0x42554545
 #define BUF_SIZE (16 * 1024)
 #define MAX_BUF_SIZE ((uint32_t) 0xffff0000)	/* avoid going over 32-bits */
+#define FLOAT_MULT 1000000
 
 struct slurm_buf {
 	uint32_t magic;
@@ -85,6 +87,9 @@ void	*xfer_buf_data(Buf my_buf);
 void	pack_time(time_t val, Buf buffer);
 int	unpack_time(time_t *valp, Buf buffer);
 
+void 	packdouble(double val, Buf buffer);
+int	unpackdouble(double *valp, Buf buffer);
+
 void 	pack64(uint64_t val, Buf buffer);
 int	unpack64(uint64_t *valp, Buf buffer);
 
@@ -129,6 +134,20 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 		goto unpack_error;			\
 } while (0)
 
+#define safe_packdouble(val,buf) do {			\
+	assert(sizeof(val) == sizeof(double));   	\
+	assert(buf->magic == BUF_MAGIC);		\
+	packdouble(val,buf);				\
+} while (0)
+
+#define safe_unpackdouble(valp,buf) do {		\
+	assert((valp) != NULL); 			\
+	assert(sizeof(*valp) == sizeof(double));        \
+	assert(buf->magic == BUF_MAGIC);		\
+        if (unpackdouble(valp,buf))			\
+		goto unpack_error;			\
+} while (0)
+
 #define safe_pack64(val,buf) do {			\
 	assert(sizeof(val) == sizeof(uint64_t)); 	\
 	assert(buf->magic == BUF_MAGIC);		\
diff --git a/src/common/parse_config.c b/src/common/parse_config.c
index 71539b749461bd9c867416b7563c468ac03626e7..b8a9f986a11dc43a50d48bd01d9d16065f24f69d 100644
--- a/src/common/parse_config.c
+++ b/src/common/parse_config.c
@@ -7,10 +7,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -421,7 +422,7 @@ static int _handle_long(s_p_values_t *v,
 		    || (*endptr != '\0')) {
 			if (strcasecmp(value, "UNLIMITED") == 0
 			    || strcasecmp(value, "INFINITE") == 0) {
-				num = (long)-1;
+				num = (long) INFINITE;
 			} else {
 				error("\"%s\" is not a valid number", value);
 				return -1;
@@ -464,7 +465,7 @@ static int _handle_uint16(s_p_values_t *v,
 		    || (*endptr != '\0')) {
 			if (strcasecmp(value, "UNLIMITED") == 0
 			    || strcasecmp(value, "INFINITE") == 0) {
-				num = (uint16_t)-1;
+				num = (uint16_t) INFINITE;
 			} else {
 				error("%s value \"%s\" is not a valid number", 
 					v->key, value);
@@ -519,7 +520,7 @@ static int _handle_uint32(s_p_values_t *v,
 		    || (*endptr != '\0')) {
 			if ((strcasecmp(value, "UNLIMITED") == 0) ||
 			    (strcasecmp(value, "INFINITE")  == 0)) {
-				num = (uint32_t)-1;
+				num = (uint32_t) INFINITE;
 			} else {
 				error("%s value (%s) is not a valid number", 
 					v->key, value);
diff --git a/src/common/parse_config.h b/src/common/parse_config.h
index acbbdf02b63c5dd05fb13ca4143a8fc59833ff98..7d72af7b8a833f04e8c4d138e6e9925d43ac1ef9 100644
--- a/src/common/parse_config.h
+++ b/src/common/parse_config.h
@@ -3,15 +3,16 @@
  *
  *  NOTE: when you see the prefix "s_p_", think "slurm parser".
  *
- *  $Id: parse_config.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: parse_config.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/parse_spec.c b/src/common/parse_spec.c
index e1ff00c3f1198b4a0d49787ccbaf5c6b581f09bc..1f248bbed3e90c80f103e404534b3e8fab62cad7 100644
--- a/src/common/parse_spec.c
+++ b/src/common/parse_spec.c
@@ -1,14 +1,15 @@
-/* $Id: parse_spec.c 13672 2008-03-19 23:10:58Z jette $ */
+/* $Id: parse_spec.c 17458 2009-05-12 21:35:36Z dbremer $ */
 /*****************************************************************************\
  * parse_spec.c - configuration file parser
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -40,6 +41,10 @@
 #  include "config.h"
 #endif
 
+#ifndef   _GNU_SOURCE
+#  define _GNU_SOURCE
+#endif
+
 #include <stdarg.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -51,6 +56,7 @@
 #include "src/common/parse_spec.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
+#include "src/common/slurm_strcasestr.h"
 
 
 #define BUFFER_SIZE 1024
@@ -59,7 +65,6 @@
 static int   _load_long (long *destination, char *keyword, char *in_line) ;
 static int   _load_integer (int *destination, char *keyword, char *in_line) ;
 static int   _load_float (float *destination, char *keyword, char *in_line) ;
-static char *_strcasestr(char *haystack, char *needle);
 
 /* 
  * slurm_parser - parse the supplied specification into keyword/value pairs
@@ -136,7 +141,7 @@ _load_float (float *destination, char *keyword, char *in_line)
 	char *str_ptr1, *str_ptr2, *str_ptr3;
 	int i, str_len1, str_len2;
 
-	str_ptr1 = (char *) _strcasestr (in_line, keyword);
+	str_ptr1 = (char *) slurm_strcasestr (in_line, keyword);
 	if (str_ptr1 != NULL) {
 		str_len1 = strlen (keyword);
 		strcpy (scratch, str_ptr1 + str_len1);
@@ -174,7 +179,7 @@ _load_integer (int *destination, char *keyword, char *in_line)
 	char *str_ptr1, *str_ptr2, *str_ptr3;
 	int i, str_len1, str_len2;
 
-	str_ptr1 = (char *) _strcasestr (in_line, keyword);
+	str_ptr1 = (char *) slurm_strcasestr (in_line, keyword);
 	if (str_ptr1 != NULL) {
 		str_len1 = strlen (keyword);
 		strcpy (scratch, str_ptr1 + str_len1);
@@ -231,7 +236,7 @@ _load_long (long *destination, char *keyword, char *in_line)
 	char *str_ptr1, *str_ptr2, *str_ptr3;
 	int i, str_len1, str_len2;
 
-	str_ptr1 = (char *) _strcasestr (in_line, keyword);
+	str_ptr1 = (char *) slurm_strcasestr (in_line, keyword);
 	if (str_ptr1 != NULL) {
 		str_len1 = strlen (keyword);
 		strcpy (scratch, str_ptr1 + str_len1);
@@ -289,7 +294,7 @@ load_string  (char **destination, char *keyword, char *in_line)
 	char *str_ptr1, *str_ptr2, *str_ptr3;
 	int i, str_len1, str_len2;
 
-	str_ptr1 = (char *) _strcasestr (in_line, keyword);
+	str_ptr1 = (char *) slurm_strcasestr (in_line, keyword);
 	if (str_ptr1 != NULL) {
 		int quoted = 0;
 		str_len1 = strlen (keyword);
@@ -315,27 +320,3 @@ load_string  (char **destination, char *keyword, char *in_line)
 	}
 	return 0;
 }
-
-/* case insensitve version of strstr() */
-static char *
-_strcasestr(char *haystack, char *needle)
-{
-	int hay_inx,  hay_size  = strlen(haystack);
-	int need_inx, need_size = strlen(needle);
-	char *hay_ptr = haystack;
-
-	for (hay_inx=0; hay_inx<hay_size; hay_inx++) {
-		for (need_inx=0; need_inx<need_size; need_inx++) {
-			if (tolower((int) hay_ptr[need_inx]) != 
-			    tolower((int) needle [need_inx]))
-				break;		/* mis-match */
-		}
-
-		if (need_inx == need_size)	/* it matched */
-			return hay_ptr;
-		else				/* keep looking */
-			hay_ptr++;
-	}
-
-	return NULL;	/* no match anywhere in string */
-}
diff --git a/src/common/parse_spec.h b/src/common/parse_spec.h
index 6b4359c3c99ff7a503f844d577ff84840334295c..c8f5009f7b7956c2d2df0c0c8b3745c4a214b7ef 100644
--- a/src/common/parse_spec.h
+++ b/src/common/parse_spec.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/parse_time.c b/src/common/parse_time.c
index 38576d74b1120526c49723644cb1e42dfd60ac6d..a70435cba5a6f9a7c5dea9da421d401ac62358f8 100644
--- a/src/common/parse_time.c
+++ b/src/common/parse_time.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -35,9 +36,15 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
 #include <stdio.h>
 #include <time.h>
+#include <string.h>
 #include <strings.h>
+
 #ifndef   __USE_ISOC99
 #  define __USE_ISOC99 /* isblank() */
 #endif
@@ -193,7 +200,8 @@ _get_time(char *time_str, int *pos, int *hour, int *minute, int * second)
 }
 
 /* convert "MMDDYY" "MM.DD.YY" or "MM/DD/YY" string to numeric values
- * time_str (in): string to parse
+ * or "YYYY-MM-DD string to numeric values
+* time_str (in): string to parse
  * pos (in/out): position of parse start/end
  * month, mday, year (out): numberic values
  * RET: -1 on error, 0 otherwise
@@ -203,11 +211,62 @@ static int _get_date(char *time_str, int *pos, int *month, int *mday, int *year)
 	int mon, day, yr;
 	int offset = *pos;
 
+	if(time_str[offset+4] && (time_str[offset+4] == '-')
+	   && time_str[offset+7] && (time_str[offset+7] == '-')) {
+		/* get year */
+		if ((time_str[offset] < '0') || (time_str[offset] > '9'))
+			goto prob;
+		yr = time_str[offset++] - '0';
+
+		if ((time_str[offset] < '0') || (time_str[offset] > '9'))
+			goto prob;
+		yr = (yr * 10) + time_str[offset++] - '0';
+
+		if ((time_str[offset] < '0') || (time_str[offset] > '9'))
+			goto prob;
+		yr = (yr * 10) + time_str[offset++] - '0';
+
+		if ((time_str[offset] < '0') || (time_str[offset] > '9'))
+			goto prob;
+		yr = (yr * 10) + time_str[offset++] - '0';
+		
+		offset++; // for the -
+		
+		/* get month */
+		mon = time_str[offset++] - '0';
+		if ((time_str[offset] >= '0') && (time_str[offset] <= '9'))
+			mon = (mon * 10) + time_str[offset++] - '0';
+		if ((mon < 1) || (mon > 12)) {
+			offset -= 2;
+			goto prob;
+		}
+		
+		offset++; // for the -
+		
+		/* get day */
+		if ((time_str[offset] < '0') || (time_str[offset] > '9'))
+			goto prob;
+		day = time_str[offset++] - '0';
+		if ((time_str[offset] >= '0') && (time_str[offset] <= '9'))
+			day = (day * 10) + time_str[offset++] - '0';
+		if ((day < 1) || (day > 31)) {
+			offset -= 2;
+			goto prob;
+		}
+		
+		*pos = offset - 1;
+		*month = mon - 1;	/* zero origin */
+		*mday  = day;
+		*year  = yr - 1900;     /* need to make it mktime
+					   happy 1900 == "00" */
+		return 0;
+	}
+	
 	/* get month */
 	mon = time_str[offset++] - '0';
 	if ((time_str[offset] >= '0') && (time_str[offset] <= '9'))
 		mon = (mon * 10) + time_str[offset++] - '0';
-	if ((mon < 1) || (mon > 12)) {
+       	if ((mon < 1) || (mon > 12)) {
 		offset -= 2;
 		goto prob;
 	}
@@ -255,6 +314,8 @@ static int _get_date(char *time_str, int *pos, int *month, int *mday, int *year)
  *   HH:MM[:SS] [AM|PM]
  *   MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]
  *   MM/DD[/YY]-HH:MM[:SS]
+ *   YYYY-MM-DD[THH[:MM[:SS]]]
+ *
  *   now + count [minutes | hours | days | weeks]
  * 
  * Invalid input results in message to stderr and return value of zero
@@ -273,7 +334,8 @@ extern time_t parse_time(char *time_str, int past)
 	time_now_tm = localtime(&time_now);
 
 	for (pos=0; ((time_str[pos] != '\0')&&(time_str[pos] != '\n')); pos++) {
-		if (isblank(time_str[pos]) || (time_str[pos] == '-'))
+		if (isblank(time_str[pos]) || (time_str[pos] == '-') 
+		    || (time_str[pos] == 'T'))
 			continue;
 		if (strncasecmp(time_str+pos, "today", 5) == 0) {
 			month = time_now_tm->tm_mon;
@@ -324,7 +386,8 @@ extern time_t parse_time(char *time_str, int past)
 				}
 				if (isblank(time_str[i]))
 					continue;
-				if ((time_str[i] == '\0') || (time_str[i] == '\n')) {
+				if ((time_str[i] == '\0') 
+				    || (time_str[i] == '\n')) {
 					pos += (i-1);
 					break;
 				}
@@ -342,7 +405,8 @@ extern time_t parse_time(char *time_str, int past)
 			continue;
 		}
 
-		if ((time_str[pos] < '0') || (time_str[pos] > '9'))	/* invalid */
+		if ((time_str[pos] < '0') || (time_str[pos] > '9'))
+			/* invalid */
 			goto prob;
 		/* We have some numeric value to process */
 		if (time_str[pos+2] == ':') {	/* time */
@@ -350,10 +414,11 @@ extern time_t parse_time(char *time_str, int past)
 				goto prob;
 			continue;
 		}
+		
 		if (_get_date(time_str, &pos, &month, &mday, &year))
 			goto prob;
 	}
-	/* printf("%d/%d/%d %d:%d\n",month+1,mday,year+1900,hour+1,minute); */
+/* 	printf("%d/%d/%d %d:%d\n",month+1,mday,year,hour+1,minute);  */
 
 
 	if ((hour == -1) && (month == -1))		/* nothing specified, time=0 */
@@ -407,7 +472,7 @@ extern time_t parse_time(char *time_str, int past)
 	}
 
 	/* convert the time into time_t format */
-	bzero(&res_tm, sizeof(res_tm));
+	memset(&res_tm, 0, sizeof(res_tm));
 	res_tm.tm_sec   = second;
 	res_tm.tm_min   = minute;
 	res_tm.tm_hour  = hour;
@@ -415,6 +480,9 @@ extern time_t parse_time(char *time_str, int past)
 	res_tm.tm_mon   = month;
 	res_tm.tm_year  = year;
 	res_tm.tm_isdst = -1;
+
+	/* printf("%d/%d/%d %d:%d\n",month+1,mday,year,hour+1,minute);  */
+
 	return mktime(&res_tm);
 
  prob:	fprintf(stderr, "Invalid time specification (pos=%d): %s\n", pos, time_str);
@@ -457,7 +525,7 @@ slurm_make_time_str (time_t *time, char *string, int size)
 	if ( *time == (time_t) 0 ) {
 		snprintf(string, size, "Unknown");
 	} else {
-#ifdef ISO8601
+#ifdef USE_ISO_8601
 		/* Format YYYY-MM-DDTHH:MM:SS, ISO8601 standard format,
 		 * NOTE: This is expected to break Maui, Moab and LSF
 		 * schedulers management of SLURM. */
diff --git a/src/common/parse_time.h b/src/common/parse_time.h
index 57a731668a0909bfe8a721e346cf53157c521ce9..c2af20d3e45859b7b4aa922eb0d6baff8b61a470 100644
--- a/src/common/parse_time.h
+++ b/src/common/parse_time.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -60,6 +61,7 @@
  *   midnight, noon, teatime (4PM)
  *   HH:MM [AM|PM]
  *   MMDDYY or MM/DD/YY or MM.DD.YY
+ *   YYYY-MM-DD[THH[:MM[:SS]]]
  *   now + count [minutes | hours | days | weeks]
  *
  * Invalid input results in message to stderr and return value of zero
diff --git a/src/common/plugin.c b/src/common/plugin.c
index cab64e29b4029ee2d363a72e1186aa8e0d3fd426..142ef5f32de82a7b9f38adffaac1515c750ba65c 100644
--- a/src/common/plugin.c
+++ b/src/common/plugin.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
- * plugin.h - plugin architecture implementation.
+ *  plugin.h - plugin architecture implementation.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -43,7 +44,7 @@
 #include <errno.h>
 #include <sys/types.h>
 #include <stdio.h>
-#include <dlfcn.h>        /* don't know if there's an autoconf for this. */
+#include <dlfcn.h>	/* don't know if there's an autoconf for this. */
 #include <string.h>
 
 #include "src/common/xmalloc.h"
@@ -79,6 +80,25 @@ static char *_dlerror(void)
 	return rc;
 }
 
+const char * plugin_strerror(plugin_err_t e)
+{
+	switch (e) {
+		case EPLUGIN_SUCCESS:
+			return ("Success");
+		case EPLUGIN_NOTFOUND:
+			return ("Plugin file not found");
+		case EPLUGIN_ACCESS_ERROR:
+			return ("Plugin access denied");
+		case EPLUGIN_DLOPEN_FAILED:
+			return ("Dlopen of plugin file failed");
+		case EPLUGIN_INIT_FAILED:
+			return ("Plugin init() callback failed");
+		case EPLUGIN_MISSING_SYMBOL:
+			return ("Plugin name/type/version symbol missing");
+	}
+	return ("Unknown error");
+}
+
 int
 plugin_peek( const char *fq_path,
 			 char *plugin_type,
@@ -120,14 +140,26 @@ plugin_peek( const char *fq_path,
 	return SLURM_SUCCESS;
 }
 
-plugin_handle_t
-plugin_load_from_file( const char *fq_path )
+plugin_err_t
+plugin_load_from_file(plugin_handle_t *p, const char *fq_path)
 {
-        plugin_handle_t plug;
-        int (*init)( void );
-        
-        /*
-         * Try to open the shared object.  
+	plugin_handle_t plug;
+	int (*init)(void);
+
+	*p = PLUGIN_INVALID_HANDLE;
+
+	/*
+	 *  Check for file existence and access permissions
+	 */
+	if (access(fq_path, R_OK) < 0) {
+		if (errno == ENOENT)
+			return EPLUGIN_NOTFOUND;
+		else
+			return EPLUGIN_ACCESS_ERROR;
+	}
+
+	/*
+	 * Try to open the shared object.
 	 *
 	 * Use RTLD_LAZY to allow plugins to use symbols that may be 
 	 * defined in only one slurm entity (e.g. srun and not slurmd),
@@ -135,45 +167,43 @@ plugin_load_from_file( const char *fq_path )
 	 * entity from which it is available. (i.e. srun symbols are only
 	 * used in the context of srun, not slurmd.)
 	 *
-         */
-        plug = dlopen( fq_path, RTLD_LAZY );
-        if ( plug == NULL ) {
-		error( "plugin_load_from_file: dlopen(%s): %s",
-			fq_path,
-			_dlerror() );
-                return PLUGIN_INVALID_HANDLE;
-        }
-	
-        /* Now see if our required symbols are defined. */
-        if ( ( dlsym( plug, PLUGIN_NAME ) == NULL ) ||
-             ( dlsym( plug, PLUGIN_TYPE ) == NULL ) ||
-             ( dlsym( plug, PLUGIN_VERSION ) == NULL ) ) {
-		debug( "plugin_load_from_file: invalid symbol");
-                /* slurm_seterrno( SLURM_PLUGIN_SYMBOLS ); */
-                return PLUGIN_INVALID_HANDLE;
-        }
-
-        /*
-         * Now call its init() function, if present.  If the function
-         * returns nonzero, unload the plugin and signal an error.
-         */
-        if ( ( init = dlsym( plug, "init" ) ) != NULL ) {
-                if ( (*init)() != 0 ) {
-			error( "plugin_load_from_file(%s): init() returned SLURM_ERROR", 
-				fq_path );
-                        (void) dlclose( plug );
-                        return PLUGIN_INVALID_HANDLE;
-                }
-        }
-        
-        return plug;
+	 */
+	plug = dlopen(fq_path, RTLD_LAZY);
+	if (plug == NULL) {
+		error("plugin_load_from_file: dlopen(%s): %s",
+		      fq_path,
+		      _dlerror());
+		return EPLUGIN_DLOPEN_FAILED;
+	}
+
+	/* Now see if our required symbols are defined. */
+	if ((dlsym(plug, PLUGIN_NAME) == NULL) ||
+	    (dlsym(plug, PLUGIN_TYPE) == NULL) ||
+	    (dlsym(plug, PLUGIN_VERSION) == NULL)) {
+		dlclose (plug);
+		return EPLUGIN_MISSING_SYMBOL;
+	}
+
+	/*
+	 * Now call its init() function, if present.  If the function
+	 * returns nonzero, unload the plugin and signal an error.
+	 */
+	if ((init = dlsym(plug, "init")) != NULL) {
+		if ((*init)() != 0) {
+			dlclose(plug);
+			return EPLUGIN_INIT_FAILED;
+		}
+	}
+
+	*p = plug;
+	return EPLUGIN_SUCCESS;
 }
 
 plugin_handle_t
 plugin_load_and_link(const char *type_name, int n_syms,
 		    const char *names[], void *ptrs[])
 {
-        plugin_handle_t plug = PLUGIN_INVALID_HANDLE;
+	plugin_handle_t plug = PLUGIN_INVALID_HANDLE;
 	struct stat st;
 	char *head=NULL, *dir_array=NULL, *so_name = NULL,
 		*file_name=NULL;
@@ -210,7 +240,7 @@ plugin_load_and_link(const char *type_name, int n_syms,
 			debug4("No Good.");
 			xfree(file_name);
 		} else {
-			plug = plugin_load_from_file(file_name);
+			plugin_load_from_file(&plug, file_name);
 			xfree(file_name);
 			if (plugin_get_syms(plug, n_syms, names, ptrs) >= 
 			    n_syms) {
@@ -239,72 +269,72 @@ plugin_load_and_link(const char *type_name, int n_syms,
 void
 plugin_unload( plugin_handle_t plug )
 {
-        void (*fini)(void);
-        
-        if ( plug != PLUGIN_INVALID_HANDLE ) {
-                if ( ( fini = dlsym( plug, "fini" ) ) != NULL ) {
-                        (*fini)();
-                }
-                (void) dlclose( plug );
-        }
+	void (*fini)(void);
+	
+	if ( plug != PLUGIN_INVALID_HANDLE ) {
+		if ( ( fini = dlsym( plug, "fini" ) ) != NULL ) {
+			(*fini)();
+		}
+		(void) dlclose( plug );
+	}
 }
 
 
 void *
 plugin_get_sym( plugin_handle_t plug, const char *name )
 {
-        if ( plug != PLUGIN_INVALID_HANDLE )
-                return dlsym( plug, name );
-        else
-                return NULL;
+	if ( plug != PLUGIN_INVALID_HANDLE )
+		return dlsym( plug, name );
+	else
+		return NULL;
 }
 
 const char *
 plugin_get_name( plugin_handle_t plug )
 {
-        if ( plug != PLUGIN_INVALID_HANDLE )
-                return (const char *) dlsym( plug, PLUGIN_NAME );
-        else
-                return NULL;
+	if ( plug != PLUGIN_INVALID_HANDLE )
+		return (const char *) dlsym( plug, PLUGIN_NAME );
+	else
+		return NULL;
 }
 
 const char *
 plugin_get_type( plugin_handle_t plug )
 {
-        if ( plug != PLUGIN_INVALID_HANDLE )
-                return (const char *) dlsym( plug, PLUGIN_TYPE );
-        else
-                return NULL;
+	if ( plug != PLUGIN_INVALID_HANDLE )
+		return (const char *) dlsym( plug, PLUGIN_TYPE );
+	else
+		return NULL;
 }
 
 uint32_t
 plugin_get_version( plugin_handle_t plug )
 {
-        uint32_t *ptr;
+	uint32_t *ptr;
 
-        if ( plug == PLUGIN_INVALID_HANDLE ) return 0;        
-        ptr = (uint32_t *) dlsym( plug, PLUGIN_VERSION );
-        return ptr ? *ptr : 0;
+	if ( plug == PLUGIN_INVALID_HANDLE ) return 0;	
+	ptr = (uint32_t *) dlsym( plug, PLUGIN_VERSION );
+	return ptr ? *ptr : 0;
 }
 
 int
 plugin_get_syms( plugin_handle_t plug,
-                 int n_syms,
-                 const char *names[],
-                 void *ptrs[] )
+		 int n_syms,
+		 const char *names[],
+		 void *ptrs[] )
 {
-        int i, count;
+	int i, count;
 
-        count = 0;
-        for ( i = 0; i < n_syms; ++i ) {
-                ptrs[ i ] = dlsym( plug, names[ i ] );
-                if ( ptrs[ i ] ) 
+	count = 0;
+	for ( i = 0; i < n_syms; ++i ) {
+		ptrs[ i ] = dlsym( plug, names[ i ] );
+		if ( ptrs[ i ] ) 
 			++count;
 		else 
 			debug3("Couldn't find sym '%s' in the plugin",
 			       names[ i ]);
 	}
 
-        return count;
+	return count;
 }
 
diff --git a/src/common/plugin.h b/src/common/plugin.h
index e634320f171f1209b35a453cee6d5424fc38ea52..1e5c3fbc8e601bf47db5ba5157d114796b7463c1 100644
--- a/src/common/plugin.h
+++ b/src/common/plugin.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windlay <jwindley@lnxi.com>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -80,6 +81,17 @@ typedef void *plugin_handle_t;
 
 #define PLUGIN_INVALID_HANDLE ((void*)0)
 
+typedef enum {
+	EPLUGIN_SUCCESS = 0,     /* Success                             */
+	EPLUGIN_NOTFOUND,        /* Plugin file does not exist          */
+	EPLUGIN_ACCESS_ERROR,    /* Access denied                       */
+	EPLUGIN_DLOPEN_FAILED,   /* Dlopen not successful               */
+	EPLUGIN_INIT_FAILED,     /* Plugin's init() callback failed     */
+	EPLUGIN_MISSING_SYMBOL   /* plugin_name/type/version missing    */
+} plugin_err_t;
+
+const char *plugin_strerror(plugin_err_t err);
+
 /*
  * "Peek" into a plugin to discover its type and version.  This does
  * not run the plugin's init() or fini() functions (as defined in this
@@ -107,15 +119,17 @@ int plugin_peek( const char *fq_path,
 /*
  * Simplest way to get a plugin -- load it from a file.
  *
+ * pph     - Pointer to a plugin handle
  * fq_path - the fully-qualified pathname (i.e., from root) to
  * the plugin to load.
  *
- * Returns a handle if successful, or NULL if not.
+ * Returns EPLUGIN_SUCCESS on success, and an plugin_err_t error
+ * code on failure.
  *
  * The plugin's initialization code will be executed prior
  * to this function's return.
  */
-plugin_handle_t plugin_load_from_file( const char *fq_path );
+plugin_err_t plugin_load_from_file(plugin_handle_t *pph, const char *fq_path);
 
 /*
  * load plugin and link hooks.
diff --git a/src/common/plugrack.c b/src/common/plugrack.c
index eb595df026e444fb6eb33956de9d02246bbf3770..18e5c490bb4efb8541cda489802e986d48899bde 100644
--- a/src/common/plugrack.c
+++ b/src/common/plugrack.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -547,7 +548,7 @@ plugrack_load_all( plugrack_t rack )
         it = list_iterator_create( rack->entries );
         while ( ( e = list_next( it ) ) != NULL ) {
                 if ( e->plug == PLUGIN_INVALID_HANDLE ) {
-                        e->plug = plugin_load_from_file( e->fq_path );
+                        plugin_load_from_file(&e->plug, e->fq_path);
                 }
         }
 
@@ -584,7 +585,7 @@ plugrack_use_by_type( plugrack_t rack,
 		
                 /* See if plugin is loaded. */
                 if ( e->plug == PLUGIN_INVALID_HANDLE ) 
-			e->plug = plugin_load_from_file( e->fq_path );
+                        plugin_load_from_file(&e->plug, e->fq_path);
 
                 /* If load was successful, increment the reference count. */
                 if ( e->plug == PLUGIN_INVALID_HANDLE )
diff --git a/src/common/plugrack.h b/src/common/plugrack.h
index 3937f0bd1a9c36e88c6cd8d8ea0a20e65e82f221..2fd3a850263e878a431e19614e0f2c38e19b4896 100644
--- a/src/common/plugrack.h
+++ b/src/common/plugrack.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/plugstack.c b/src/common/plugstack.c
index 9dea9a6c2c8599403571ac2dc0d9637a543e71b7..ac076ee0d9730f36c5d1b7714eb59742e366cba0 100644
--- a/src/common/plugstack.c
+++ b/src/common/plugstack.c
@@ -2,12 +2,13 @@
  *  plugstack.c -- stackable plugin architecture for node job kontrol (SPANK)
  *****************************************************************************
  *  Copyright (C) 2005-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -55,6 +56,7 @@
 #include "src/common/plugstack.h"
 #include "src/common/optz.h"
 #include "src/common/job_options.h"
+#include "src/common/env.h"
 
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 /*#include "src/srun/srun_job.h"*/
@@ -70,18 +72,20 @@ struct spank_plugin_operations {
 	spank_f *init_post_opt;
 	spank_f *local_user_init;
 	spank_f *user_init;
+	spank_f *task_init_privileged;
 	spank_f *user_task_init;
 	spank_f *task_post_fork;
 	spank_f *task_exit;
 	spank_f *exit;
 };
 
-const int n_spank_syms = 8;
+const int n_spank_syms = 9;
 const char *spank_syms[] = {
 	"slurm_spank_init",
 	"slurm_spank_init_post_opt",
 	"slurm_spank_local_user_init",
 	"slurm_spank_user_init",
+	"slurm_spank_task_init_privileged",
 	"slurm_spank_task_init",
 	"slurm_spank_task_post_fork",
 	"slurm_spank_task_exit",
@@ -102,6 +106,9 @@ struct spank_plugin {
 /*
  *  SPANK Plugin options 
  */
+
+#define SPANK_OPTION_ENV_PREFIX "_SLURM_SPANK_OPTION_"
+
 struct spank_plugin_opt {
 	struct spank_option *opt;   /* Copy of plugin option info           */
 	struct spank_plugin *plugin;/* Link back to plugin structure        */
@@ -123,14 +130,14 @@ static List option_cache = NULL;
 
 
 /*
- *  SPANK handle for plugins
- *
- *   Handle types: local or remote.
+ *  SPANK plugin context (local, remote, allocator)
  */
-typedef enum spank_handle_type {
-	S_TYPE_LOCAL,           /* LOCAL == srun         */
-	S_TYPE_REMOTE           /* REMOTE == slurmd      */
-} spank_handle_type_t;
+enum spank_context_type {
+	S_TYPE_NONE,
+	S_TYPE_LOCAL,           /* LOCAL == srun              */
+	S_TYPE_REMOTE,          /* REMOTE == slurmd           */
+	S_TYPE_ALLOCATOR        /* ALLOCATOR == sbatch/salloc */
+};
 
 /*
  *  SPANK plugin hook types:
@@ -140,6 +147,7 @@ typedef enum step_fn {
 	SPANK_INIT_POST_OPT,
 	LOCAL_USER_INIT,
 	STEP_USER_INIT,
+	STEP_TASK_INIT_PRIV,
 	STEP_USER_TASK_INIT,
 	STEP_TASK_POST_FORK,
 	STEP_TASK_EXIT,
@@ -150,7 +158,6 @@ struct spank_handle {
 #   define SPANK_MAGIC 0x00a5a500
 	int                  magic;  /* Magic identifier to ensure validity. */
 	struct spank_plugin *plugin; /* Current plugin using handle          */
-	spank_handle_type_t  type;   /* remote(slurmd) || local(srun)        */
 	step_fn_t            phase;  /* Which spank fn are we called from?   */
 	void               * job;    /* Reference to current srun|slurmd job */
 	slurmd_task_info_t * task;   /* Reference to current task (if valid) */
@@ -160,6 +167,7 @@ struct spank_handle {
  *  SPANK plugins stack
  */
 static List spank_stack = NULL;
+static enum spank_context_type spank_ctx = S_TYPE_NONE;
 
 static pthread_mutex_t spank_mutex = PTHREAD_MUTEX_INITIALIZER;
 
@@ -252,10 +260,13 @@ static struct spank_plugin *_spank_plugin_create(char *path, int ac,
 {
 	struct spank_plugin *plugin;
 	plugin_handle_t p;
+	plugin_err_t e;
 	struct spank_plugin_operations ops;
 
-	if (!(p = plugin_load_from_file(path)))
+	if ((e = plugin_load_from_file(&p, path)) != EPLUGIN_SUCCESS) {
+		error ("spank: %s: %s\n", path, plugin_strerror(e));
 		return NULL;
+	}
 
 	if (plugin_get_syms(p, n_spank_syms, spank_syms, (void **)&ops) == 0) {
 		error("spank: \"%s\" exports 0 symbols\n", path);
@@ -272,7 +283,11 @@ static struct spank_plugin *_spank_plugin_create(char *path, int ac,
 	plugin->argv = av;
 	plugin->ops = ops;
 
-	plugin->opts = plugin_get_sym(p, "spank_options");
+	/*
+	 *  Do not load static plugin options table in allocator context.
+	 */
+	if (spank_ctx != S_TYPE_ALLOCATOR)
+		plugin->opts = plugin_get_sym(p, "spank_options");
 
 	return (plugin);
 }
@@ -496,15 +511,9 @@ _spank_handle_init(struct spank_handle *spank, void * arg,
 
 	if (arg != NULL) {
 		spank->job = arg;
-		if (fn == LOCAL_USER_INIT)
-			spank->type = S_TYPE_LOCAL;
-		else {
-			spank->type = S_TYPE_REMOTE;
-			if (taskid >= 0)
-				spank->task = ((slurmd_job_t *) arg)->task[taskid];
+		if (spank_ctx == S_TYPE_REMOTE && taskid >= 0) {
+			spank->task = ((slurmd_job_t *) arg)->task[taskid];
 		}
-	} else {
-		spank->type = S_TYPE_LOCAL;
 	}
 	return (0);
 }
@@ -520,6 +529,8 @@ static const char *_step_fn_name(step_fn_t type)
 		return ("local_user_init");
 	case STEP_USER_INIT:
 		return ("user_init");
+	case STEP_TASK_INIT_PRIV:
+		return ("task_init_privileged");
 	case STEP_USER_TASK_INIT:
 		return ("task_init");
 	case STEP_TASK_POST_FORK:
@@ -591,6 +602,14 @@ static int _do_call_stack(step_fn_t type, void * job, int taskid)
 				       fn_name, rc);
 			}
 			break;
+		case STEP_TASK_INIT_PRIV:
+			if (sp->ops.task_init_privileged) {
+				rc = (*sp->ops.task_init_privileged)
+					(spank, sp->ac, sp->argv);
+				debug2("spank: %s: %s = %d\n", name,
+				       fn_name, rc);
+			}
+			break;
 		case STEP_USER_TASK_INIT:
 			if (sp->ops.user_task_init) {
 				rc = (*sp->ops.user_task_init) (spank,
@@ -640,13 +659,15 @@ static int _do_call_stack(step_fn_t type, void * job, int taskid)
 	return (rc);
 }
 
-int spank_init(slurmd_job_t * job)
+int _spank_init(enum spank_context_type context, slurmd_job_t * job)
 {
 	slurm_ctl_conf_t *conf = slurm_conf_lock();
 	const char *path = conf->plugstack;
 	default_spank_path = conf->plugindir;
 	slurm_conf_unlock();
 
+	spank_ctx = context;
+
 	if (_spank_stack_create(path, &spank_stack) < 0) {
 		/* No error if spank config doesn't exist */
 		if (errno == ENOENT)
@@ -658,17 +679,66 @@ int spank_init(slurmd_job_t * job)
 	if (_do_call_stack(SPANK_INIT, job, -1) < 0)
 		return (-1);
 
-	if (job && spank_get_remote_options(job->options) < 0) {
+	/*
+	 *  Nothing more to do unless we are in remote context:
+	 */
+	if (spank_ctx != S_CTX_REMOTE)
+		return (0);
+
+	/*
+	 *  Remote-specific code:
+	 */
+	if (!job) {
+		error("spank: spank_init called without job reference!");
+		return (-1);
+	}
+
+	/*
+	 *  Get any remote options from job launch message:
+	 */
+	if (spank_get_remote_options(job->options) < 0) {
 		error("spank: Unable to get remote options");
 		return (-1);
 	}
 
-	if (_do_call_stack(SPANK_INIT_POST_OPT, job, -1) < 0)
+	/*
+	 *  Get any remote option passed thru environment
+	 */
+	if (spank_get_remote_options_env(job->env) < 0) {
+		error("spank: Unable to get remote options from environment");
 		return (-1);
+	}
 
-	return (0);
+	/*
+	 *  Now that all options have been processed, we can
+	 *   call the post_opt handlers here in remote context.
+	 */
+	return (_do_call_stack(SPANK_INIT_POST_OPT, job, -1) < 0);
+}
+
+int spank_init (slurmd_job_t * job)
+{
+	if (job)
+		return _spank_init (S_TYPE_REMOTE, job);
+	else
+		return _spank_init (S_TYPE_LOCAL, NULL);
+}
+
+int spank_init_allocator (void)
+{
+	return _spank_init (S_TYPE_ALLOCATOR, NULL);
 }
 
+int spank_init_post_opt (void)
+{
+	/*
+	 *  In allocator context, set remote options in env here.
+	 */
+	if (spank_ctx == S_TYPE_ALLOCATOR)
+		spank_set_remote_options_env();
+
+	return (_do_call_stack(SPANK_INIT_POST_OPT, NULL, -1));
+}
 
 int spank_user(slurmd_job_t * job)
 {
@@ -680,6 +750,11 @@ int spank_local_user(struct spank_launcher_job_info *job)
 	return (_do_call_stack(LOCAL_USER_INIT, job, -1));
 }
 
+int spank_task_privileged(slurmd_job_t *job, int taskid)
+{
+	return (_do_call_stack(STEP_TASK_INIT_PRIV, job, taskid));
+}
+
 int spank_user_task(slurmd_job_t * job, int taskid)
 {
 	return (_do_call_stack(STEP_USER_TASK_INIT, job, taskid));
@@ -788,9 +863,13 @@ _spank_option_register(struct spank_plugin *p, struct spank_option *opt)
 	int disabled = 0;
 	struct spank_plugin_opt *spopt;
 
+	if (!option_cache) {
+		option_cache =
+		    list_create((ListDelF) _spank_plugin_opt_destroy);
+	}
+
 	spopt = list_find_first(option_cache, 
 			(ListFindF) _opt_by_name, opt->name);
-
 	if (spopt) {
 		struct spank_plugin *q = spopt->plugin;
 		info("spank: option \"%s\" provided by both %s and %s", 
@@ -837,11 +916,6 @@ static int _spank_plugin_options_cache(struct spank_plugin *p)
 	if ((opt == NULL) || opt->name == NULL)
 		return (0);
 
-	if (!option_cache) {
-		option_cache =
-		    list_create((ListDelF) _spank_plugin_opt_destroy);
-	}
-
 	for (; opt && opt->name != NULL; opt++)
 		_spank_option_register(p, opt);
 
@@ -915,6 +989,9 @@ int spank_process_option(int optval, const char *arg)
 	struct spank_plugin_opt *opt;
 	int rc = 0;
 
+	if (option_cache == NULL || (list_count(option_cache) == 0))
+		return (-1);
+
 	opt =
 	    list_find_first(option_cache, (ListFindF) _opt_by_val,
 			    &optval);
@@ -1078,7 +1155,7 @@ int spank_print_options(FILE * fp, int left_pad, int width)
 	if ((option_cache == NULL) || (list_count(option_cache) == 0))
 		return (0);
 
-	fprintf(fp, "Options provided by plugins:\n");
+	fprintf(fp, "\nOptions provided by plugins:\n");
 
 	i = list_iterator_create(option_cache);
 	while ((p = list_next(i))) {
@@ -1093,6 +1170,78 @@ int spank_print_options(FILE * fp, int left_pad, int width)
 
 #define OPT_TYPE_SPANK 0x4400
 
+static char _canonical_char (char c)
+{
+	if (!isalnum (c))
+		return '_';
+	else
+		return c;
+}
+
+/*
+ *  Create spank option environment variable name from option name.
+ */
+static char * _opt_env_name (struct spank_plugin_opt *p, char *buf, size_t siz)
+{
+	const char * name = p->opt->name;
+	const char * pname = p->plugin->name;
+	int i, n;
+
+	strlcpy (buf, SPANK_OPTION_ENV_PREFIX, siz);
+
+	/*
+	 *  First append the plugin name associated with this option:
+	 */
+	n = 0;
+	for (i = strlen (buf); i < siz - 1 && n < strlen (pname); i++)
+	    buf[i] = _canonical_char (pname[n++]);
+
+	/*
+	 *  Append _
+	 */
+	buf[i] = '_';
+	buf[i+1] = '\0';
+
+	/*
+	 *  Now incorporate the option name:
+	 */
+	n = 0;
+	for (i = strlen (buf); i < siz - 1 && n < strlen (name); i++)
+	    buf[i] = _canonical_char (name[n++]);
+	buf[i] = '\0';
+
+	return (buf);
+}
+
+static int _option_setenv (struct spank_plugin_opt *option)
+{
+	char var [1024];
+
+	_opt_env_name (option, var, sizeof (var));
+
+	if (setenv (var, option->optarg, 1) < 0)
+	    error ("failed to set %s=%s in env", var, option->optarg);
+
+	return (0);
+}
+
+int spank_set_remote_options_env(void)
+{
+	struct spank_plugin_opt *p;
+	ListIterator i;
+
+	if ((option_cache == NULL) || (list_count(option_cache) == 0))
+		return (0);
+
+	i = list_iterator_create(option_cache);
+	while ((p = list_next(i))) {
+		if (p->found)
+			_option_setenv (p);
+	}
+	list_iterator_destroy(i);
+	return (0);
+}
+
 int spank_set_remote_options(job_options_t opts)
 {
 	struct spank_plugin_opt *p;
@@ -1168,6 +1317,38 @@ static struct spank_plugin_opt *_find_remote_option_by_name(const char
 	return (opt);
 }
 
+int spank_get_remote_options_env (char **env)
+{
+	char var [1024];
+	const char *arg;
+	struct spank_plugin_opt *option;
+	ListIterator i;
+
+	if (!option_cache)
+		return (0);
+
+	i = list_iterator_create (option_cache);
+	while ((option = list_next (i))) {
+		struct spank_option *p = option->opt;
+
+		if (!(arg = getenvp (env, _opt_env_name (option, var, sizeof(var)))))
+			continue;
+
+		if (p->cb && (((*p->cb) (p->val, arg, 1)) < 0))
+			error ("spank: failed to process option %s=%s", p->name, arg);
+
+		/*
+		 *  Now remove the environment variable.
+		 *   It is no longer needed.
+		 */
+		unsetenvp (env, var);
+
+	}
+	list_iterator_destroy (i);
+
+	return (0);
+}
+
 int spank_get_remote_options(job_options_t opts)
 {
 	const struct job_option_info *j;
@@ -1255,29 +1436,89 @@ static int _valid_in_local_context (spank_item_t item)
 	return (rc);
 }
 
-/*
- *  Return 1 if spank_item_t is just getting version (valid anywhere)
- */
-static int _version_check (spank_item_t item)
+static int _valid_in_allocator_context (spank_item_t item)
 {
-	int rc = 0;
 	switch (item) {
-	case S_SLURM_VERSION:
-	case S_SLURM_VERSION_MAJOR:
-	case S_SLURM_VERSION_MINOR:
-	case S_SLURM_VERSION_MICRO:
-		rc = 1;
-		break;
-	default:
-		rc = 0;
+	  case S_JOB_UID:
+	  case S_JOB_GID:
+		  return 1;
+	  default:
+		  return 0;
 	}
-	return (rc);
+}
+
+static spank_err_t _check_spank_item_validity (spank_item_t item, void *job)
+{
+	/*
+	 *  Valid in all contexts:
+	 */
+	switch (item) {
+	  case S_SLURM_VERSION:
+	  case S_SLURM_VERSION_MAJOR:
+	  case S_SLURM_VERSION_MINOR:
+	  case S_SLURM_VERSION_MICRO:
+		  return ESPANK_SUCCESS;
+	  default:
+		  break; /* fallthru */
+	}
+
+	if (spank_ctx == S_TYPE_LOCAL) {
+		if (!_valid_in_local_context (item))
+			return ESPANK_NOT_REMOTE;
+		else if (job == NULL)
+			return ESPANK_NOT_AVAIL;
+	}
+	else if (spank_ctx == S_TYPE_ALLOCATOR) {
+		if (_valid_in_allocator_context (item)) {
+			if (job)
+				return ESPANK_SUCCESS;
+			else
+				return ESPANK_NOT_AVAIL;
+		}
+		else if (_valid_in_local_context (item))
+			return ESPANK_BAD_ARG;
+		else
+			return ESPANK_NOT_REMOTE;
+	}
+
+	/* All items presumably valid in remote context */
+	return ESPANK_SUCCESS;
 }
 
 /*
  *  Global functions for SPANK plugins
  */
 
+const char * spank_strerror (spank_err_t err)
+{
+	switch (err) {
+	case ESPANK_SUCCESS:
+		return "Success";
+	case ESPANK_ERROR:
+		return "Generic error";
+	case ESPANK_BAD_ARG:
+		return "Bad argument";
+	case ESPANK_NOT_TASK:
+		return "Not in task context";
+	case ESPANK_ENV_EXISTS:
+		return "Environment variable exists";
+	case ESPANK_ENV_NOEXIST:
+		return "No such environment variable";
+	case ESPANK_NOSPACE:
+		return "Buffer too small";
+	case ESPANK_NOT_REMOTE:
+		return "Valid only in remote context";
+	case ESPANK_NOEXIST:
+		return "Id/PID does not exist on this node";
+	case ESPANK_NOT_EXECD:
+		return "Lookup by PID requested, but no tasks running";
+	case ESPANK_NOT_AVAIL:
+		return "Item not available from this callback";
+	}
+
+	return "Unknown";
+}
+
 int spank_symbol_supported (const char *name)
 {
 	int i;
@@ -1297,12 +1538,28 @@ int spank_remote(spank_t spank)
 {
 	if ((spank == NULL) || (spank->magic != SPANK_MAGIC))
 		return (-1);
-	if (spank->type == S_TYPE_REMOTE)
+	if (spank_ctx == S_TYPE_REMOTE)
 		return (1);
 	else
 		return (0);
 }
 
+spank_context_t spank_context (void)
+{
+	switch (spank_ctx) {
+	  case S_TYPE_REMOTE:
+		  return S_CTX_REMOTE;
+	  case S_TYPE_LOCAL:
+		  return S_CTX_LOCAL;
+	  case S_TYPE_ALLOCATOR:
+		  return S_CTX_ALLOCATOR;
+	  default:
+		  return S_CTX_ERROR;
+	}
+
+	return S_CTX_ERROR;
+}
+
 spank_err_t spank_get_item(spank_t spank, spank_item_t item, ...)
 {
 	int *p2int;
@@ -1325,36 +1582,37 @@ spank_err_t spank_get_item(spank_t spank, spank_item_t item, ...)
 	if ((spank == NULL) || (spank->magic != SPANK_MAGIC))
 		return (ESPANK_BAD_ARG);
 
-	if (!_version_check(item)) {
-		/* Need job pointer to process other items */
-		if ( (spank->type != S_TYPE_REMOTE) 
-		  && (!_valid_in_local_context(item)))
-			return (ESPANK_NOT_REMOTE);
-
-		if (spank->job == NULL)
-			return (ESPANK_BAD_ARG);
+	/*
+	 *  Check for validity of the given item in the current context
+	 */
+	rc = _check_spank_item_validity (item, spank->job);
+	if (rc != ESPANK_SUCCESS)
+		return (rc);
 
-		if (spank->type == S_TYPE_LOCAL)
-			launcher_job = spank->job;
-		else
-			slurmd_job = spank->job;
-	}
+	if (spank_ctx == S_TYPE_LOCAL)
+		launcher_job = spank->job;
+	else if (spank_ctx == S_TYPE_REMOTE)
+		slurmd_job = spank->job;
 
 	va_start(vargs, item);
 	switch (item) {
 	case S_JOB_UID:
 		p2uid = va_arg(vargs, uid_t *);
-		if (spank->type == S_TYPE_LOCAL)
+		if (spank_ctx == S_TYPE_LOCAL)
 			*p2uid = launcher_job->uid;
-		else
+		else if (spank_ctx == S_TYPE_REMOTE)
 			*p2uid = slurmd_job->uid;
+		else
+			*p2uid = getuid();
 		break;
 	case S_JOB_GID:
 		p2gid = va_arg(vargs, gid_t *);
-		if (spank->type == S_TYPE_LOCAL)
+		if (spank_ctx == S_TYPE_LOCAL)
 			*p2gid = launcher_job->gid;
-		else
+		else if (spank_ctx == S_TYPE_REMOTE)
 			*p2gid = slurmd_job->gid;
+		else
+			*p2gid = getgid();
 		break;
 	case S_JOB_SUPPLEMENTARY_GIDS:
 		p2gids = va_arg(vargs, gid_t **);
@@ -1364,21 +1622,21 @@ spank_err_t spank_get_item(spank_t spank, spank_item_t item, ...)
 		break;
 	case S_JOB_ID:
 		p2uint32 = va_arg(vargs, uint32_t *);
-		if (spank->type == S_TYPE_LOCAL)
+		if (spank_ctx == S_TYPE_LOCAL)
 			*p2uint32 = launcher_job->jobid;
 		else
 			*p2uint32 = slurmd_job->jobid;
 		break;
 	case S_JOB_STEPID:
 		p2uint32 = va_arg(vargs, uint32_t *);
-		if (spank->type == S_TYPE_LOCAL)
+		if (spank_ctx == S_TYPE_LOCAL)
 			*p2uint32 = launcher_job->stepid;
 		else
 			*p2uint32 = slurmd_job->stepid;
 		break;
 	case S_JOB_NNODES:
 		p2uint32 = va_arg(vargs, uint32_t *);
-		if (spank->type == S_TYPE_LOCAL) {
+		if (spank_ctx == S_TYPE_LOCAL) {
 			if (launcher_job->step_layout)
 				*p2uint32 = launcher_job->step_layout->node_cnt;
 			else {
@@ -1398,7 +1656,7 @@ spank_err_t spank_get_item(spank_t spank, spank_item_t item, ...)
 		break;
 	case S_JOB_TOTAL_TASK_COUNT:
 		p2uint32 = va_arg(vargs, uint32_t *);
-		if (spank->type == S_TYPE_LOCAL) {
+		if (spank_ctx == S_TYPE_LOCAL) {
 			if (launcher_job->step_layout)
 				*p2uint32 = launcher_job->step_layout->task_cnt;
 			else {
@@ -1419,7 +1677,7 @@ spank_err_t spank_get_item(spank_t spank, spank_item_t item, ...)
 	case S_JOB_ARGV:
 		p2int = va_arg(vargs, int *);
 		p2argv = va_arg(vargs, char ***);
-		if (spank->type == S_TYPE_LOCAL) {
+		if (spank_ctx == S_TYPE_LOCAL) {
 			*p2int = launcher_job->argc;
 			*p2argv = launcher_job->argv;
 		} else {
@@ -1537,7 +1795,7 @@ spank_err_t spank_getenv(spank_t spank, const char *var, char *buf,
 	if ((spank == NULL) || (spank->magic != SPANK_MAGIC))
 		return (ESPANK_BAD_ARG);
 
-	if (spank->type != S_TYPE_REMOTE)
+	if (spank_ctx != S_TYPE_REMOTE)
 		return (ESPANK_NOT_REMOTE);
 
 	if (spank->job == NULL)
@@ -1563,7 +1821,7 @@ spank_err_t spank_setenv(spank_t spank, const char *var, const char *val,
 	if ((spank == NULL) || (spank->magic != SPANK_MAGIC))
 		return (ESPANK_BAD_ARG);
 
-	if (spank->type != S_TYPE_REMOTE)
+	if (spank_ctx != S_TYPE_REMOTE)
 		return (ESPANK_NOT_REMOTE);
 
 	if (spank->job == NULL)
@@ -1588,7 +1846,7 @@ spank_err_t spank_unsetenv (spank_t spank, const char *var)
 	if ((spank == NULL) || (spank->magic != SPANK_MAGIC))
 		return (ESPANK_BAD_ARG);
 
-	if (spank->type != S_TYPE_REMOTE)
+	if (spank_ctx != S_TYPE_REMOTE)
 		return (ESPANK_NOT_REMOTE);
 
 	if (spank->job == NULL)
diff --git a/src/common/plugstack.h b/src/common/plugstack.h
index 288663c762c2b149893a465ed5d1b8ec062facb9..1ac51e2ec23a9b4b11ebd4afb91b22c2ee64b672 100644
--- a/src/common/plugstack.h
+++ b/src/common/plugstack.h
@@ -3,10 +3,11 @@
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -65,10 +66,16 @@ struct spank_launcher_job_info {
 
 int spank_init (slurmd_job_t *job);
 
+int spank_init_allocator (void);
+
+int spank_init_post_opt (void);
+
 int spank_user (slurmd_job_t *job);
 
 int spank_local_user (struct spank_launcher_job_info *job);
 
+int spank_task_privileged (slurmd_job_t *job, int taskid);
+
 int spank_user_task (slurmd_job_t *job, int taskid);
 
 int spank_task_post_fork (slurmd_job_t *job, int taskid);
@@ -124,6 +131,12 @@ int spank_print_options (FILE *fp, int width, int left_pad);
  */
 int spank_set_remote_options (job_options_t options);
 
+/*  Set all registered remote options (i.e. those passed to
+ *   spank_process_option) in the current environment for later
+ *   retreival by spank_get_remote_options_env().
+ */
+int spank_set_remote_options_env (void);
+
 /*  Register any remote spank options that exist in `options'
  *    to their respective spank plugins. This function ends up invoking
  *    all plugin option callbacks, and will fail (return < 0) if
@@ -134,4 +147,14 @@ int spank_set_remote_options (job_options_t options);
  */
 int spank_get_remote_options (job_options_t options);
 
+/*  Register any remote spank options that exist in the environment `env'
+ *    to their respective spank plugins. This function ends up invoking
+ *    all plugin option callbacks, and will fail (return < 0) if
+ *    a *required* plugin callback returns < 0.
+ *
+ *  A warning is printed if no plugin matches a remote option
+ *   in the job_options structure, but the funtion does not return failure.
+ */
+int spank_get_remote_options_env (char **env);
+
 #endif /* !_PLUGSTACK_H */
diff --git a/src/common/print_fields.c b/src/common/print_fields.c
index 707403f5a0a1efe8c0ff800c4c4b06bed9f388cc..0681f70dad9a7f90424a053e04800ca83fb363a3 100644
--- a/src/common/print_fields.c
+++ b/src/common/print_fields.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -69,6 +70,7 @@ extern void print_fields_header(List print_fields_list)
 	print_field_t *field = NULL;
 	int curr_inx = 1;
 	int field_count = 0;
+
 	if(!print_fields_list || !print_fields_have_header) 
 		return;
 
@@ -82,8 +84,10 @@ extern void print_fields_header(List print_fields_list)
 			printf("%s", field->name);	
 		else if(print_fields_parsable_print)
 			printf("%s|", field->name);
-		else
-			printf("%-*.*s ", field->len, field->len, field->name);
+		else {
+			int abs_len = abs(field->len);
+			printf("%*.*s ", abs_len, abs_len, field->name);
+		}
 		curr_inx++;
 	}
 	list_iterator_reset(itr);
@@ -91,7 +95,8 @@ extern void print_fields_header(List print_fields_list)
 	if(print_fields_parsable_print)
 		return;
 	while((field = list_next(itr))) {
-		printf("%-*.*s ", field->len, field->len, 
+		int abs_len = abs(field->len);
+		printf("%*.*s ", abs_len, abs_len, 
 		       "-----------------------------------------------------");
 	}
 	list_iterator_destroy(itr);
@@ -100,26 +105,29 @@ extern void print_fields_header(List print_fields_list)
 
 extern void print_fields_date(print_field_t *field, time_t value, int last)
 {
-	char temp_char[field->len];
+	int abs_len = abs(field->len);
+	char temp_char[abs_len+1];
 	time_t now = value;
 
 	if(!now)
 		now = time(NULL);
-	slurm_make_time_str(&value, (char *)temp_char, field->len);
+	slurm_make_time_str(&value, (char *)temp_char, sizeof(temp_char));
 	if(print_fields_parsable_print == PRINT_FIELDS_PARSABLE_NO_ENDING
 	   && last)
 		printf("%s", temp_char);	
 	else if(print_fields_parsable_print)
 		printf("%s|", temp_char);
-	else 
-		printf("%-*.*s ", field->len, field->len, temp_char);
+	else if(field->len == abs_len)
+		printf("%*.*s ", abs_len, abs_len, temp_char);
+	else
+		printf("%-*.*s ", abs_len, abs_len, temp_char); 
 }
 
 extern void print_fields_str(print_field_t *field, char *value, int last)
 {
-	char temp_char[field->len];
+	int abs_len = abs(field->len);
+	char temp_char[abs_len+1];
 	char *print_this = NULL;
-
 	if(!value) {
 		if(print_fields_parsable_print)
 			print_this = "";
@@ -135,18 +143,23 @@ extern void print_fields_str(print_field_t *field, char *value, int last)
 		printf("%s|", print_this);
 	else {
 		if(value) {
-			memcpy(&temp_char, value, field->len);
+			memcpy(&temp_char, value, abs_len);
 			
-			if(strlen(value) > field->len) 
-				temp_char[field->len-1] = '+';
+			if(strlen(value) > abs_len) 
+				temp_char[abs_len-1] = '+';
 			print_this = temp_char;
 		}
-		printf("%-*.*s ", field->len, field->len, print_this);
+
+		if(field->len == abs_len)
+			printf("%*.*s ", abs_len, abs_len, print_this);
+		else
+			printf("%-*.*s ", abs_len, abs_len, print_this);
 	}
 }
 
 extern void print_fields_int(print_field_t *field, int value, int last)
 {
+	int abs_len = abs(field->len);
 	/* (value == unset)  || (value == cleared) */
 	if((value == NO_VAL) || (value == INFINITE)) {
 		if(print_fields_parsable_print 
@@ -156,7 +169,7 @@ extern void print_fields_int(print_field_t *field, int value, int last)
 		else if(print_fields_parsable_print)
 			printf("|");	
 		else				
-			printf("%*s ", field->len, " ");
+			printf("%*s ", abs_len, " ");
 	} else {
 		if(print_fields_parsable_print
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
@@ -164,13 +177,16 @@ extern void print_fields_int(print_field_t *field, int value, int last)
 			printf("%d", value);	
 		else if(print_fields_parsable_print)
 			printf("%d|", value);	
+		else if(field->len == abs_len)
+			printf("%*d ", abs_len, value);
 		else
-			printf("%*d ", field->len, value);
+			printf("%-*d ", abs_len, value);
 	}
 }
 
 extern void print_fields_uint32(print_field_t *field, uint32_t value, int last)
 {
+	int abs_len = abs(field->len);
 	/* (value == unset)  || (value == cleared) */
 	if((value == NO_VAL) || (value == INFINITE)) {
 		if(print_fields_parsable_print 
@@ -188,13 +204,16 @@ extern void print_fields_uint32(print_field_t *field, uint32_t value, int last)
 			printf("%u", value);	
 		else if(print_fields_parsable_print)
 			printf("%u|", value);	
+		else if(field->len == abs_len)
+			printf("%*u ", abs_len, value);
 		else
-			printf("%*u ", field->len, value);
+			printf("%-*u ", abs_len, value);
 	}
 }
 
 extern void print_fields_uint64(print_field_t *field, uint64_t value, int last)
 {
+	int abs_len = abs(field->len);
 	/* (value == unset)  || (value == cleared) */
 	if((value == NO_VAL) || (value == INFINITE)) {
 		if(print_fields_parsable_print 
@@ -212,14 +231,72 @@ extern void print_fields_uint64(print_field_t *field, uint64_t value, int last)
 			printf("%llu", (long long unsigned) value);	
 		else if(print_fields_parsable_print)
 			printf("%llu|", (long long unsigned) value);	
+		else if(field->len == abs_len)
+			printf("%*llu ", abs_len, (long long unsigned) value);
+		else
+			printf("%-*llu ", abs_len, (long long unsigned) value);
+	}
+}
+
+extern void print_fields_double(print_field_t *field, double value, int last)
+{
+	int abs_len = abs(field->len);
+	/* (value == unset)  || (value == cleared) */
+	if((value == NO_VAL) || (value == INFINITE)) {
+		if(print_fields_parsable_print 
+		   == PRINT_FIELDS_PARSABLE_NO_ENDING
+		   && last)
+			;
+		else if(print_fields_parsable_print)
+			printf("|");	
+		else				
+			printf("%*s ", field->len, " ");
+	} else {
+		if(print_fields_parsable_print
+		   == PRINT_FIELDS_PARSABLE_NO_ENDING
+		   && last)
+			printf("%f", value);	
+		else if(print_fields_parsable_print)
+			printf("%f|", value);	
+		else if(field->len == abs_len)
+			printf("%*f ", abs_len, value);
+		else
+			printf("%-*f ", abs_len, value);
+	}
+}
+
+extern void print_fields_long_double(
+	print_field_t *field, long double value, int last)
+{
+	int abs_len = abs(field->len);
+	/* (value == unset)  || (value == cleared) */
+	if((value == NO_VAL) || (value == INFINITE)) {
+		if(print_fields_parsable_print 
+		   == PRINT_FIELDS_PARSABLE_NO_ENDING
+		   && last)
+			;
+		else if(print_fields_parsable_print)
+			printf("|");	
+		else				
+			printf("%*s ", field->len, " ");
+	} else {
+		if(print_fields_parsable_print
+		   == PRINT_FIELDS_PARSABLE_NO_ENDING
+		   && last)
+			printf("%Lf", value);	
+		else if(print_fields_parsable_print)
+			printf("%Lf|", value);	
+		else if(field->len == abs_len)
+			printf("%*Lf ", abs_len, value);
 		else
-			printf("%*llu ", field->len, 
-			       (long long unsigned) value);
+			printf("%-*Lf ", abs_len, value);
 	}
+
 }
 
 extern void print_fields_time(print_field_t *field, uint32_t value, int last)
 {
+	int abs_len = abs(field->len);
 	/* (value == unset)  || (value == cleared) */
 	if((value == NO_VAL) || (value == INFINITE)) {
 		if(print_fields_parsable_print 
@@ -239,13 +316,46 @@ extern void print_fields_time(print_field_t *field, uint32_t value, int last)
 			printf("%s", time_buf);
 		else if(print_fields_parsable_print)
 			printf("%s|", time_buf);
+		else if(field->len == abs_len)
+			printf("%*s ", abs_len, time_buf);
+		else
+			printf("%-*s ", abs_len, time_buf);
+	}
+}
+
+extern void print_fields_time_from_secs(print_field_t *field, 
+					uint32_t value, int last)
+{
+	int abs_len = abs(field->len);
+	/* (value == unset)  || (value == cleared) */
+	if((value == NO_VAL) || (value == INFINITE)) {
+		if(print_fields_parsable_print 
+		   == PRINT_FIELDS_PARSABLE_NO_ENDING
+		   && last)
+			;
+		else if(print_fields_parsable_print)
+			printf("|");	
+		else
+			printf("%*s ", field->len, " ");
+	} else {
+		char time_buf[32];
+		secs2time_str((time_t) value, time_buf, sizeof(time_buf));
+		if(print_fields_parsable_print 
+		   == PRINT_FIELDS_PARSABLE_NO_ENDING
+		   && last)
+			printf("%s", time_buf);
+		else if(print_fields_parsable_print)
+			printf("%s|", time_buf);
+		else if(field->len == abs_len)
+			printf("%*s ", abs_len, time_buf);
 		else
-			printf("%*s ", field->len, time_buf);
+			printf("%-*s ", abs_len, time_buf);
 	}
 }
 
 extern void print_fields_char_list(print_field_t *field, List value, int last)
 {
+	int abs_len = abs(field->len);
 	ListIterator itr = NULL;
 	char *print_this = NULL;
 	char *object = NULL;
@@ -273,10 +383,13 @@ extern void print_fields_char_list(print_field_t *field, List value, int last)
 	else if(print_fields_parsable_print)
 		printf("%s|", print_this);
 	else {
-		if(strlen(print_this) > field->len) 
-			print_this[field->len-1] = '+';
+		if(strlen(print_this) > abs_len) 
+			print_this[abs_len-1] = '+';
 		
-		printf("%-*.*s ", field->len, field->len, print_this);
+		if(field->len == abs_len)
+			printf("%*.*s ", abs_len, abs_len, print_this);
+		else
+			printf("%-*.*s ", abs_len, abs_len, print_this);
 	}
 	xfree(print_this);
 }
diff --git a/src/common/print_fields.h b/src/common/print_fields.h
index 8de6d619474921276b69aa6d2b8d929d24f1a3ee..95cdf62049c44c5966788517c1e29d93b8a692cd 100644
--- a/src/common/print_fields.h
+++ b/src/common/print_fields.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -68,7 +69,7 @@
 #include "src/common/list.h"
 
 typedef struct {
-	uint16_t len;  /* what is the width of the print */          
+	int len;  /* what is the width of the print */          
 	char *name;  /* name to be printed in header */
 	void (*print_routine) (); /* what is the function to print with  */
 	uint16_t type; /* defined in the local function */
@@ -88,12 +89,19 @@ extern void print_fields_header(List print_fields_list);
 extern void print_fields_date(print_field_t *field, time_t value, int last);
 extern void print_fields_str(print_field_t *field, char *value, int last);
 extern void print_fields_int(print_field_t *field, int value, int last);
+extern void print_fields_double(print_field_t *field, double value, int last);
+extern void print_fields_long_double(
+	print_field_t *field, long double value, int last);
 extern void print_fields_uint32(
 	print_field_t *field, uint32_t value, int last);
 extern void print_fields_uint64(
 	print_field_t *field, uint64_t value, int last);
-extern void print_fields_time(print_field_t *field, uint32_t value, int last);
+extern void print_fields_time_from_mins(print_field_t *field,
+					uint32_t value, int last);
+extern void print_fields_time_from_secs(print_field_t *field, 
+					uint32_t value, int last);
 extern void print_fields_char_list(print_field_t *field, List value, int last);
 
 #define print_fields_uint print_fields_uint32
+#define print_fields_time print_fields_time_from_mins
 #endif
diff --git a/src/common/proc_args.c b/src/common/proc_args.c
index 1a690c2ba43cf0e76f6ac7e0ca240a2a02590878..540f5fac5b53216fd0fe0adc2b752ab39551a66c 100644
--- a/src/common/proc_args.c
+++ b/src/common/proc_args.c
@@ -7,7 +7,8 @@
  *  from existing SLURM source code, particularly src/srun/opt.c 
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -420,10 +421,12 @@ bool verify_socket_core_thread_count(const char *arg,
 
  	for (j=0;j<3;j++) {	
 		for (i=0;i<47;i++) {
-			if (*cur_ptr == '\0' || *cur_ptr ==':') break;
+			if (*cur_ptr == '\0' || *cur_ptr ==':')
+				break;
 			buf[j][i] = *cur_ptr++;
 		}
-		if (*cur_ptr == '\0') break;
+		if (*cur_ptr == '\0')
+			break;
 		xassert(*cur_ptr == ':');
 		buf[j][i] = '\0';
 		cur_ptr++;
@@ -511,7 +514,8 @@ bool verify_hint(const char *arg, int *min_sockets, int *max_sockets,
 		        *max_threads = 1;
 			*cpu_bind_type |= CPU_BIND_TO_THREADS;
 		} else {
-			error("unrecognized --hint argument \"%s\", see --hint=help", tok);
+			error("unrecognized --hint argument \"%s\", "
+			      "see --hint=help", tok);
 			xfree(buf);
 			return 1;
 		}
@@ -636,12 +640,17 @@ search_path(char *cwd, char *cmd, bool check_current_dir, int access_mode)
 char *print_commandline(const int script_argc, char **script_argv)
 {
 	int i;
-	char buf[256];
+	char tmp[256], *out_buf = NULL, *prefix;
 
-	buf[0] = '\0';
-	for (i = 0; i < script_argc; i++)
-		snprintf(buf, 256,  "%s", script_argv[i]);
-	return xstrdup(buf);
+	for (i = 0; i < script_argc; i++) {
+		if (out_buf)
+			prefix = " ";
+		else
+			prefix = "";
+		snprintf(tmp, 256,  "%s%s", prefix, script_argv[i]);
+		xstrcat(out_buf, tmp);
+	}
+	return out_buf;
 }
 
 char *print_geometry(const uint16_t *geometry)
diff --git a/src/common/proc_args.h b/src/common/proc_args.h
index 109556bad9063fdae84d23da8c61dfca7c982615..7e715e1c3d689b68ba8fa81019dc3198b916aebe 100644
--- a/src/common/proc_args.h
+++ b/src/common/proc_args.h
@@ -7,7 +7,8 @@
  *  from existing SLURM source code, particularly src/srun/opt.c 
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/read_config.c b/src/common/read_config.c
index 38f56f8ced66d98969f6f570b72ab34e1504dcf3..b0a3e70f0b473feb949eeb38b42ae63f8ca96a91 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -2,14 +2,15 @@
  *  read_config.c - read the overall slurm configuration file
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Portions Copyright (C) 2008 Vijay Ramasubramanian.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -106,27 +107,28 @@ bool nodehash_initialized = false;
 static names_ll_t *host_to_node_hashtbl[NAME_HASH_LEN] = {NULL};
 static names_ll_t *node_to_host_hashtbl[NAME_HASH_LEN] = {NULL};
 
-static int parse_nodename(void **dest, slurm_parser_enum_t type,
-			  const char *key, const char *value,
-			  const char *line, char **leftover);
-static void destroy_nodename(void *ptr);
-static int parse_partitionname(void **dest, slurm_parser_enum_t type,
-			       const char *key, const char *value,
-			       const char *line, char **leftover);
-static void destroy_partitionname(void *ptr);
-static int parse_downnodes(void **dest, slurm_parser_enum_t type,
+static int _parse_nodename(void **dest, slurm_parser_enum_t type,
 			   const char *key, const char *value,
 			   const char *line, char **leftover);
-static void destroy_downnodes(void *ptr);
-static int defunct_option(void **dest, slurm_parser_enum_t type,
-			  const char *key, const char *value,
-			  const char *line, char **leftover);
-static void validate_and_set_defaults(slurm_ctl_conf_t *conf,
-				      s_p_hashtbl_t *hashtbl);
+static void _destroy_nodename(void *ptr);
+static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
+				const char *key, const char *value,
+				const char *line, char **leftover);
+static void _destroy_partitionname(void *ptr);
+static int _parse_downnodes(void **dest, slurm_parser_enum_t type,
+			    const char *key, const char *value,
+			    const char *line, char **leftover);
+static void _destroy_downnodes(void *ptr);
+static int _defunct_option(void **dest, slurm_parser_enum_t type,
+			   const char *key, const char *value,
+			   const char *line, char **leftover);
+static void _validate_and_set_defaults(slurm_ctl_conf_t *conf,
+				       s_p_hashtbl_t *hashtbl);
 
 s_p_options_t slurm_conf_options[] = {
 	{"AccountingStorageEnforce", S_P_STRING},
 	{"AccountingStorageHost", S_P_STRING},
+	{"AccountingStorageBackupHost", S_P_STRING},
 	{"AccountingStorageLoc", S_P_STRING},
 	{"AccountingStoragePass", S_P_STRING},
 	{"AccountingStoragePort", S_P_UINT32},
@@ -139,9 +141,11 @@ s_p_options_t slurm_conf_options[] = {
 	{"CheckpointType", S_P_STRING},
 	{"CacheGroups", S_P_UINT16},
 	{"ClusterName", S_P_STRING},
+	{"CompleteWait", S_P_UINT16},
 	{"ControlAddr", S_P_STRING},
 	{"ControlMachine", S_P_STRING},
 	{"CryptoType", S_P_STRING},
+	{"DebugFlags", S_P_STRING},
 	{"DefaultStorageHost", S_P_STRING},
 	{"DefaultStorageLoc", S_P_STRING},
 	{"DefaultStoragePass", S_P_STRING},
@@ -150,24 +154,25 @@ s_p_options_t slurm_conf_options[] = {
 	{"DefaultStorageUser", S_P_STRING},
 	{"DefMemPerCPU", S_P_UINT32},
 	{"DefMemPerNode", S_P_UINT32},
-	{"DefMemPerTask", S_P_UINT32},	/* defunct */
 	{"DisableRootJobs", S_P_BOOLEAN},
 	{"EnforcePartLimits", S_P_BOOLEAN},
 	{"Epilog", S_P_STRING},
 	{"EpilogMsgTime", S_P_UINT32},
+	{"EPilogSlurmctld", S_P_STRING},
 	{"FastSchedule", S_P_UINT16},
 	{"FirstJobId", S_P_UINT32},
 	{"GetEnvTimeout", S_P_UINT16},
-	{"HashBase", S_P_LONG, defunct_option},
-	{"HeartbeatInterval", S_P_LONG, defunct_option},
+	{"HashBase", S_P_LONG, _defunct_option},
+	{"HeartbeatInterval", S_P_LONG, _defunct_option},
 	{"HealthCheckInterval", S_P_UINT16},
 	{"HealthCheckProgram", S_P_STRING},
 	{"InactiveLimit", S_P_UINT16},
 	{"JobAcctGatherType", S_P_STRING},
-	{"JobAcctFrequency", S_P_UINT16, defunct_option},
+	{"JobAcctFrequency", S_P_UINT16, _defunct_option},
 	{"JobAcctGatherFrequency", S_P_UINT16},
 	{"JobAcctLogFile", S_P_STRING},
 	{"JobAcctType", S_P_STRING},
+	{"JobCheckpointDir", S_P_STRING},
 	{"JobCompHost", S_P_STRING},
 	{"JobCompLoc", S_P_STRING},
 	{"JobCompPass", S_P_STRING},
@@ -178,7 +183,8 @@ s_p_options_t slurm_conf_options[] = {
 	{"JobCredentialPublicCertificate", S_P_STRING},
 	{"JobFileAppend", S_P_UINT16},
 	{"JobRequeue", S_P_UINT16},
-	{"KillTree", S_P_UINT16, defunct_option},
+	{"KillTree", S_P_UINT16, _defunct_option},
+	{"KillOnBadExit", S_P_UINT16},
 	{"KillWait", S_P_UINT16},
 	{"Licenses", S_P_STRING},
 	{"MailProg", S_P_STRING},
@@ -188,21 +194,36 @@ s_p_options_t slurm_conf_options[] = {
 	{"MaxMemPerTask", S_P_UINT32},	/* defunct */
 	{"MessageTimeout", S_P_UINT16},
 	{"MinJobAge", S_P_UINT16},
-	{"MpichGmDirectSupport", S_P_LONG, defunct_option},
+	{"MpichGmDirectSupport", S_P_LONG, _defunct_option},
 	{"MpiDefault", S_P_STRING},
+	{"MpiParams", S_P_STRING},
+	{"OverTimeLimit", S_P_UINT16},
 	{"PluginDir", S_P_STRING},
 	{"PlugStackConfig", S_P_STRING},
+	{"PriorityDecayHalfLife", S_P_STRING},
+	{"PriorityFavorSmall", S_P_BOOLEAN},
+	{"PriorityMaxAge", S_P_STRING},
+	{"PriorityUsageResetPeriod", S_P_STRING},
+	{"PriorityType", S_P_STRING},
+	{"PriorityWeightAge", S_P_UINT32},
+	{"PriorityWeightFairshare", S_P_UINT32},
+	{"PriorityWeightJobSize", S_P_UINT32},
+	{"PriorityWeightPartition", S_P_UINT32},
+	{"PriorityWeightQOS", S_P_UINT32},
 	{"PrivateData", S_P_STRING},
 	{"ProctrackType", S_P_STRING},
 	{"Prolog", S_P_STRING},
+	{"PrologSlurmctld", S_P_STRING},
 	{"PropagatePrioProcess", S_P_UINT16},
 	{"PropagateResourceLimitsExcept", S_P_STRING},
 	{"PropagateResourceLimits", S_P_STRING},
 	{"ResumeProgram", S_P_STRING},
 	{"ResumeRate", S_P_UINT16},
+	{"ResumeTimeout", S_P_UINT16},
+	{"ResvOverRun", S_P_UINT16},
 	{"ReturnToService", S_P_UINT16},
 	{"SallocDefaultCommand", S_P_STRING},
-	{"SchedulerAuth", S_P_STRING, defunct_option},
+	{"SchedulerAuth", S_P_STRING, _defunct_option},
 	{"SchedulerParameters", S_P_STRING},
 	{"SchedulerPort", S_P_UINT16},
 	{"SchedulerRootFilter", S_P_UINT16},
@@ -211,6 +232,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"SelectType", S_P_STRING},
 	{"SelectTypeParameters", S_P_STRING},
 	{"SlurmUser", S_P_STRING},
+	{"SlurmdUser", S_P_STRING},
 	{"SlurmctldDebug", S_P_UINT16},
 	{"SlurmctldLogFile", S_P_STRING},
 	{"SlurmctldPidFile", S_P_STRING},
@@ -223,6 +245,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"SlurmdSpoolDir", S_P_STRING},
 	{"SlurmdTimeout", S_P_UINT16},
 	{"SrunEpilog", S_P_STRING},
+	{"SrunIOTimeout", S_P_UINT16},
 	{"SrunProlog", S_P_STRING},
 	{"StateSaveLocation", S_P_STRING},
 	{"SuspendExcNodes", S_P_STRING},
@@ -230,12 +253,14 @@ s_p_options_t slurm_conf_options[] = {
 	{"SuspendProgram", S_P_STRING},
 	{"SuspendRate", S_P_UINT16},
 	{"SuspendTime", S_P_LONG},
+	{"SuspendTimeout", S_P_UINT16},
 	{"SwitchType", S_P_STRING},
 	{"TaskEpilog", S_P_STRING},
 	{"TaskProlog", S_P_STRING},
 	{"TaskPlugin", S_P_STRING},
 	{"TaskPluginParam", S_P_STRING},
 	{"TmpFS", S_P_STRING},
+	{"TopologyPlugin", S_P_STRING},
 	{"TrackWCKey", S_P_BOOLEAN},
 	{"TreeWidth", S_P_UINT16},
 	{"UnkillableStepProgram", S_P_STRING},
@@ -243,16 +268,16 @@ s_p_options_t slurm_conf_options[] = {
 	{"UsePAM", S_P_BOOLEAN},
 	{"WaitTime", S_P_UINT16},
 
-	{"NodeName", S_P_ARRAY, parse_nodename, destroy_nodename},
-	{"PartitionName", S_P_ARRAY, parse_partitionname,
-	 destroy_partitionname},
-	{"DownNodes", S_P_ARRAY, parse_downnodes, destroy_downnodes},
+	{"NodeName", S_P_ARRAY, _parse_nodename, _destroy_nodename},
+	{"PartitionName", S_P_ARRAY, _parse_partitionname,
+	 _destroy_partitionname},
+	{"DownNodes", S_P_ARRAY, _parse_downnodes, _destroy_downnodes},
 
 	{NULL}
 };
 
 
-static int defunct_option(void **dest, slurm_parser_enum_t type,
+static int _defunct_option(void **dest, slurm_parser_enum_t type,
 			  const char *key, const char *value,
 			  const char *line, char **leftover)
 {
@@ -260,12 +285,46 @@ static int defunct_option(void **dest, slurm_parser_enum_t type,
 	return 0;
 }
 
-static int parse_nodename(void **dest, slurm_parser_enum_t type,
-			  const char *key, const char *value,
-			  const char *line, char **leftover)
+#ifdef HAVE_3D
+/* Used to get the general name of the machine, used primarily 
+ * for bluegene systems.  Not in general use because some systems 
+ * have multiple prefix's such as foo[1-1000],bar[1-1000].
+ */
+/* Caller must be holding slurm_conf_lock() */
+static void _set_node_prefix(const char *nodenames)
+{
+	int i;
+	char *tmp;
+
+	xassert(nodenames != NULL);
+	for (i = 1; nodenames[i] != '\0'; i++) {
+		if((nodenames[i-1] == '[') 
+		   || (nodenames[i-1] <= '9'
+		       && nodenames[i-1] >= '0'))
+			break;
+	}
+	xfree(conf_ptr->node_prefix);
+	if(nodenames[i] == '\0')
+		conf_ptr->node_prefix = xstrdup(nodenames);
+	else {
+		tmp = xmalloc(sizeof(char)*i+1);
+		memset(tmp, 0, i+1);
+		snprintf(tmp, i, "%s", nodenames);
+		conf_ptr->node_prefix = tmp;
+		tmp = NULL;
+	}
+	debug3("Prefix is %s %s %d", conf_ptr->node_prefix, nodenames, i);
+}
+#endif /* HAVE_BG */
+
+
+static int _parse_nodename(void **dest, slurm_parser_enum_t type,
+			   const char *key, const char *value,
+			   const char *line, char **leftover)
 {
 	s_p_hashtbl_t *tbl, *dflt;
 	slurm_conf_node_t *n;
+	int computed_procs;
 	static s_p_options_t _nodename_options[] = {
 		{"CoresPerSocket", S_P_UINT16},
 		{"Feature", S_P_STRING},
@@ -317,6 +376,11 @@ static int parse_nodename(void **dest, slurm_parser_enum_t type,
 		dflt = default_nodename_tbl;
 
 		n->nodenames = xstrdup(value);
+#ifdef HAVE_3D
+		if (conf_ptr->node_prefix == NULL)
+			_set_node_prefix(n->nodenames);
+#endif
+
 		if (!s_p_get_string(&n->hostnames, "NodeHostname", tbl))
 			n->hostnames = xstrdup(n->nodenames);
 		if (!s_p_get_string(&n->addresses, "NodeAddr", tbl))
@@ -409,6 +473,17 @@ static int parse_nodename(void **dest, slurm_parser_enum_t type,
 			}
 		}
 
+		computed_procs = n->sockets * n->cores * n->threads;
+		if ((n->cpus != n->sockets) &&
+		    (n->cpus != n->sockets * n->cores) &&
+		    (n->cpus != computed_procs)) {
+			error("Procs (%d) doesn't match "
+			      "Sockets*CoresPerSocket*ThreadsPerCore (%u), "
+			      "resetting Procs",
+			      n->cpus, computed_procs);
+			n->cpus = computed_procs;
+		}
+
 		*dest = (void *)n;
 
 		return 1;
@@ -417,7 +492,7 @@ static int parse_nodename(void **dest, slurm_parser_enum_t type,
 	/* should not get here */
 }
 
-static void destroy_nodename(void *ptr)
+static void _destroy_nodename(void *ptr)
 {
 	slurm_conf_node_t *n = (slurm_conf_node_t *)ptr;
 	xfree(n->nodenames);
@@ -443,7 +518,8 @@ int slurm_conf_nodename_array(slurm_conf_node_t **ptr_array[])
 	}
 }
 
-static int parse_partitionname(void **dest, slurm_parser_enum_t type,
+
+static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 			       const char *key, const char *value,
 			       const char *line, char **leftover)
 {
@@ -453,6 +529,7 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 	static s_p_options_t _partition_options[] = {
 		{"AllowGroups", S_P_STRING},
 		{"Default", S_P_BOOLEAN}, /* YES or NO */
+		{"DefaultTime", S_P_STRING},
 		{"DisableRootJobs", S_P_BOOLEAN}, /* YES or NO */
 		{"Hidden", S_P_BOOLEAN}, /* YES or NO */
 		{"MaxTime", S_P_STRING},
@@ -463,6 +540,7 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 		{"RootOnly", S_P_BOOLEAN}, /* YES or NO */
 		{"Shared", S_P_STRING}, /* YES, NO, or FORCE */
 		{"State", S_P_BOOLEAN}, /* UP or DOWN */
+		{"AllocNodes", S_P_STRING},
 		{NULL}
 	};
 
@@ -490,6 +568,16 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 			p->allow_groups = NULL; /* NULL means allow all */
 		}
 
+		if (!s_p_get_string(&p->allow_alloc_nodes, "AllocNodes", tbl)) {
+			s_p_get_string(&p->allow_alloc_nodes, "AllocNodes", 
+				       dflt);
+			if (p->allow_alloc_nodes && 
+			    (strcasecmp(p->allow_alloc_nodes, "ALL") == 0)) {
+				/* NULL means to allow all submit notes */
+				xfree(p->allow_alloc_nodes);
+			}
+		}
+
 		if (!s_p_get_boolean(&p->default_flag, "Default", tbl)
 		    && !s_p_get_boolean(&p->default_flag, "Default", dflt))
 			p->default_flag = false;
@@ -509,7 +597,7 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 			int max_time = time_str2mins(tmp);
 			if ((max_time < 0) && (max_time != INFINITE)) {
 				error("Bad value \"%s\" for MaxTime", tmp);
-				destroy_partitionname(p);
+				_destroy_partitionname(p);
 				s_p_hashtbl_destroy(tbl);
 				xfree(tmp);
 				return -1;
@@ -518,6 +606,22 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 			xfree(tmp);
 		}
 
+		if (!s_p_get_string(&tmp, "DefaultTime", tbl) &&
+		    !s_p_get_string(&tmp, "DefaultTime", dflt))
+			p->default_time = NO_VAL;
+		else {
+			int default_time = time_str2mins(tmp);
+			if ((default_time < 0) && (default_time != INFINITE)) {
+				error("Bad value \"%s\" for DefaultTime", tmp);
+				_destroy_partitionname(p);
+				s_p_hashtbl_destroy(tbl);
+				xfree(tmp);
+				return -1;
+			}
+			p->default_time = default_time;
+			xfree(tmp);
+		}
+
 		if (!s_p_get_uint32(&p->max_nodes, "MaxNodes", tbl)
 		    && !s_p_get_uint32(&p->max_nodes, "MaxNodes", dflt))
 			p->max_nodes = INFINITE;
@@ -576,7 +680,7 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 #endif
 			else {
 				error("Bad value \"%s\" for Shared", tmp);
-				destroy_partitionname(p);
+				_destroy_partitionname(p);
 				s_p_hashtbl_destroy(tbl);
 				xfree(tmp);
 				return -1;
@@ -600,13 +704,14 @@ static int parse_partitionname(void **dest, slurm_parser_enum_t type,
 	/* should not get here */
 }
 
-static void destroy_partitionname(void *ptr)
+static void _destroy_partitionname(void *ptr)
 {
 	slurm_conf_partition_t *p = (slurm_conf_partition_t *)ptr;
 
+	xfree(p->allow_alloc_nodes);
+	xfree(p->allow_groups);
 	xfree(p->name);
 	xfree(p->nodes);
-	xfree(p->allow_groups);
 	xfree(ptr);
 }
 
@@ -625,7 +730,7 @@ int slurm_conf_partition_array(slurm_conf_partition_t **ptr_array[])
 	}
 }
 
-static int parse_downnodes(void **dest, slurm_parser_enum_t type,
+static int _parse_downnodes(void **dest, slurm_parser_enum_t type,
 			   const char *key, const char *value,
 			   const char *line, char **leftover)
 {
@@ -659,7 +764,7 @@ static int parse_downnodes(void **dest, slurm_parser_enum_t type,
 	return 1;
 }
 
-static void destroy_downnodes(void *ptr)
+static void _destroy_downnodes(void *ptr)
 {
 	slurm_conf_downnodes_t *n = (slurm_conf_downnodes_t *)ptr;
 	xfree(n->nodenames);
@@ -668,7 +773,7 @@ static void destroy_downnodes(void *ptr)
 	xfree(ptr);
 }
 
-int slurm_conf_downnodes_array(slurm_conf_downnodes_t **ptr_array[])
+extern int slurm_conf_downnodes_array(slurm_conf_downnodes_t **ptr_array[])
 {
 	int count;
 	slurm_conf_downnodes_t **ptr;
@@ -805,6 +910,11 @@ static int _register_conf_node_aliases(slurm_conf_node_t *node_ptr)
 		goto cleanup;
 	}
 
+#ifdef HAVE_3D
+	if (conf_ptr->node_prefix == NULL)
+		_set_node_prefix(node_ptr->nodenames);
+#endif
+
 	/* some sanity checks */
 #ifdef HAVE_FRONT_END
 	if (hostlist_count(hostname_list) != 1
@@ -874,8 +984,10 @@ static void _init_slurmd_nodehash(void)
 	else
 		nodehash_initialized = true;
 
-	if(!conf_initialized) 
+	if(!conf_initialized) {
 		_init_slurm_conf(NULL);
+		conf_initialized = true;
+	}
 
 	count = slurm_conf_nodename_array(&ptr_array);
 	if (count == 0) {
@@ -925,6 +1037,9 @@ extern char *slurm_conf_get_hostname(const char *node_name)
 
 /*
  * slurm_conf_get_nodename - Return the NodeName for given NodeHostname
+ *
+ * NOTE: Call xfree() to release returned value's memory.
+ * NOTE: Caller must NOT be holding slurm_conf_lock().
  */
 extern char *slurm_conf_get_nodename(const char *node_hostname)
 {
@@ -949,6 +1064,39 @@ extern char *slurm_conf_get_nodename(const char *node_hostname)
 	return NULL;
 }
 
+/*
+ * slurm_conf_get_nodeaddr - Return the NodeAddr for given NodeHostname
+ *
+ * NOTE: Call xfree() to release returned value's memory.
+ * NOTE: Caller must NOT be holding slurm_conf_lock().
+ */
+extern char *slurm_conf_get_nodeaddr(const char *node_hostname)
+{
+	int idx;
+	names_ll_t *p;
+
+	slurm_conf_lock();
+	_init_slurmd_nodehash();
+	idx = _get_hash_idx(node_hostname);
+
+	p = host_to_node_hashtbl[idx];
+	while (p) {
+		if (strcmp(p->hostname, node_hostname) == 0) {
+			char *nodeaddr;
+			if (p->address != NULL)
+				nodeaddr = xstrdup(p->address);
+			else
+				nodeaddr = NULL;
+			slurm_conf_unlock();
+			return nodeaddr;
+		}
+		p = p->next_hostname;
+	}
+	slurm_conf_unlock();
+
+	return NULL;
+}
+
 /*
  * slurm_conf_get_aliased_nodename - Return the NodeName for the
  * complete hostname string returned by gethostname if there is
@@ -1139,6 +1287,7 @@ gethostname_short (char *name, size_t len)
 extern void
 free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 {
+	xfree (ctl_conf_ptr->accounting_storage_backup_host);
 	xfree (ctl_conf_ptr->accounting_storage_host);
 	xfree (ctl_conf_ptr->accounting_storage_loc);
 	xfree (ctl_conf_ptr->accounting_storage_pass);
@@ -1155,6 +1304,7 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->epilog);
 	xfree (ctl_conf_ptr->health_check_program);
 	xfree (ctl_conf_ptr->job_acct_gather_type);
+	xfree (ctl_conf_ptr->job_ckpt_dir);
 	xfree (ctl_conf_ptr->job_comp_host);
 	xfree (ctl_conf_ptr->job_comp_loc);
 	xfree (ctl_conf_ptr->job_comp_pass);
@@ -1165,9 +1315,11 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->licenses);
 	xfree (ctl_conf_ptr->mail_prog);
 	xfree (ctl_conf_ptr->mpi_default);
+	xfree (ctl_conf_ptr->mpi_params);
 	xfree (ctl_conf_ptr->node_prefix);
 	xfree (ctl_conf_ptr->plugindir);
 	xfree (ctl_conf_ptr->plugstack);
+	xfree (ctl_conf_ptr->priority_type);
 	xfree (ctl_conf_ptr->proctrack_type);
 	xfree (ctl_conf_ptr->prolog);
 	xfree (ctl_conf_ptr->propagate_rlimits_except);
@@ -1184,6 +1336,7 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->slurmd_logfile);
 	xfree (ctl_conf_ptr->slurmd_pidfile);
 	xfree (ctl_conf_ptr->slurmd_spooldir);
+	xfree (ctl_conf_ptr->slurmd_user_name);
 	xfree (ctl_conf_ptr->srun_epilog);
 	xfree (ctl_conf_ptr->srun_prolog);
 	xfree (ctl_conf_ptr->state_save_location);
@@ -1195,6 +1348,7 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->task_plugin);
 	xfree (ctl_conf_ptr->task_prolog);
 	xfree (ctl_conf_ptr->tmp_fs);
+	xfree (ctl_conf_ptr->topology_plugin);
 	xfree (ctl_conf_ptr->unkillable_program);
 
 	if (purge_node_hash)
@@ -1212,6 +1366,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 {
 	ctl_conf_ptr->last_update		= time(NULL);
 	ctl_conf_ptr->cache_groups		= (uint16_t) NO_VAL;
+	xfree (ctl_conf_ptr->accounting_storage_backup_host);
 	xfree (ctl_conf_ptr->accounting_storage_host);
 	xfree (ctl_conf_ptr->accounting_storage_loc);
 	xfree (ctl_conf_ptr->accounting_storage_pass);
@@ -1225,10 +1380,12 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->cache_groups		= 0;
 	xfree (ctl_conf_ptr->checkpoint_type);
 	xfree (ctl_conf_ptr->cluster_name);
+	ctl_conf_ptr->complete_wait		= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->control_addr);
 	xfree (ctl_conf_ptr->control_machine);
 	xfree (ctl_conf_ptr->crypto_type);
 	ctl_conf_ptr->def_mem_per_task          = 0;
+	ctl_conf_ptr->debug_flags		= 0;
 	ctl_conf_ptr->disable_root_jobs         = 0;
 	ctl_conf_ptr->enforce_part_limits       = 0;
 	xfree (ctl_conf_ptr->epilog);
@@ -1241,6 +1398,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->inactive_limit		= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->job_acct_gather_type);
 	ctl_conf_ptr->job_acct_gather_freq             = 0;
+	xfree (ctl_conf_ptr->job_ckpt_dir);
 	xfree (ctl_conf_ptr->job_comp_loc);
 	xfree (ctl_conf_ptr->job_comp_pass);
 	ctl_conf_ptr->job_comp_port             = 0;
@@ -1257,9 +1415,11 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->max_mem_per_task          = 0;
 	ctl_conf_ptr->min_job_age		= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->mpi_default);
+	xfree (ctl_conf_ptr->mpi_params);
 	ctl_conf_ptr->msg_timeout		= (uint16_t) NO_VAL;
 	ctl_conf_ptr->next_job_id		= (uint32_t) NO_VAL;
 	xfree (ctl_conf_ptr->node_prefix);
+	ctl_conf_ptr->over_time_limit           = 0;
 	xfree (ctl_conf_ptr->plugindir);
 	xfree (ctl_conf_ptr->plugstack);
 	ctl_conf_ptr->private_data              = 0;
@@ -1268,8 +1428,10 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->propagate_prio_process	= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->propagate_rlimits);
 	xfree (ctl_conf_ptr->propagate_rlimits_except);
+	ctl_conf_ptr->resume_timeout		= 0;
 	xfree (ctl_conf_ptr->resume_program);
 	ctl_conf_ptr->resume_rate		= (uint16_t) NO_VAL;
+	ctl_conf_ptr->resv_over_run		= 0;
 	ctl_conf_ptr->ret2service		= (uint16_t) NO_VAL;
 	xfree( ctl_conf_ptr->salloc_default_command);
 	xfree( ctl_conf_ptr->sched_params );
@@ -1281,6 +1443,8 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->select_type_param         = (uint16_t) NO_VAL;
 	ctl_conf_ptr->slurm_user_id		= (uint16_t) NO_VAL; 
 	xfree (ctl_conf_ptr->slurm_user_name);
+	ctl_conf_ptr->slurmd_user_id		= (uint16_t) NO_VAL; 
+	xfree (ctl_conf_ptr->slurmd_user_name);
 	ctl_conf_ptr->slurmctld_debug		= (uint16_t) NO_VAL; 
 	xfree (ctl_conf_ptr->slurmctld_logfile);
 	xfree (ctl_conf_ptr->slurmctld_pidfile);
@@ -1293,6 +1457,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	xfree (ctl_conf_ptr->slurmd_spooldir);
 	ctl_conf_ptr->slurmd_timeout		= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->srun_prolog);
+	ctl_conf_ptr->srun_io_timeout		= 0;
 	xfree (ctl_conf_ptr->srun_epilog);
 	xfree (ctl_conf_ptr->state_save_location);
 	xfree (ctl_conf_ptr->suspend_exc_nodes);
@@ -1300,17 +1465,20 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	xfree (ctl_conf_ptr->suspend_program);
 	ctl_conf_ptr->suspend_rate		= (uint16_t) NO_VAL;
 	ctl_conf_ptr->suspend_time		= (uint16_t) NO_VAL;
+	ctl_conf_ptr->suspend_timeout		= 0;
 	xfree (ctl_conf_ptr->switch_type);
 	xfree (ctl_conf_ptr->task_epilog);
 	xfree (ctl_conf_ptr->task_plugin);
 	ctl_conf_ptr->task_plugin_param		= 0;
 	xfree (ctl_conf_ptr->task_prolog);
 	xfree (ctl_conf_ptr->tmp_fs);
+	xfree (ctl_conf_ptr->topology_plugin);
 	ctl_conf_ptr->tree_width       		= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->unkillable_program);
 	ctl_conf_ptr->unkillable_timeout        = (uint16_t) NO_VAL;
 	ctl_conf_ptr->use_pam			= 0;
 	ctl_conf_ptr->wait_time			= (uint16_t) NO_VAL;
+	ctl_conf_ptr->kill_on_bad_exit	= 0;
 
 	_free_name_hashtbl();
 	_init_name_hashtbl();
@@ -1319,8 +1487,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 }
 
 /* caller must lock conf_lock */
-static void
-_init_slurm_conf(const char *file_name)
+static void _init_slurm_conf(const char *file_name)
 {
 	char *name = (char *)file_name;
 	/* conf_ptr = (slurm_ctl_conf_t *)xmalloc(sizeof(slurm_ctl_conf_t)); */
@@ -1338,7 +1505,7 @@ _init_slurm_conf(const char *file_name)
 	if(s_p_parse_file(conf_hashtbl, name) == SLURM_ERROR)
 		fatal("something wrong with opening/reading conf file");
 	/* s_p_dump_values(conf_hashtbl, slurm_conf_options); */
-	validate_and_set_defaults(conf_ptr, conf_hashtbl);
+	_validate_and_set_defaults(conf_ptr, conf_hashtbl);
 	conf_ptr->slurm_conf = xstrdup(name);
 }
 
@@ -1506,7 +1673,7 @@ static void _normalize_debug_level(uint16_t *level)
  * NOTE: if control_addr is NULL, it is over-written by control_machine
  */
 static void
-validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
+_validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 {
 	char *temp_str = NULL;
 	long long_suspend_time;
@@ -1540,8 +1707,11 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 
 	s_p_get_string(&conf->cluster_name, "ClusterName", hashtbl);
 
+	if (!s_p_get_uint16(&conf->complete_wait, "CompleteWait", hashtbl))
+		conf->complete_wait = DEFAULT_COMPLETE_WAIT;
+
 	if (!s_p_get_string(&conf->control_machine, "ControlMachine", hashtbl))
-		fatal ("validate_and_set_defaults: "
+		fatal ("_validate_and_set_defaults: "
 		       "ControlMachine not specified.");
 	else if (strcasecmp("localhost", conf->control_machine) == 0) {
 		xfree (conf->control_machine);
@@ -1565,7 +1735,7 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	s_p_get_string(&default_storage_host, "DefaultStorageHost", hashtbl);
 	s_p_get_string(&default_storage_user, "DefaultStorageUser", hashtbl);
 	s_p_get_string(&default_storage_pass, "DefaultStoragePass", hashtbl);
-	s_p_get_string(&default_storage_loc,  "DefaultStorageLoc", hashtbl);
+	s_p_get_string(&default_storage_loc,  "DefaultStorageLoc",  hashtbl);
 	s_p_get_uint32(&default_storage_port, "DefaultStoragePort", hashtbl);
 	s_p_get_string(&conf->job_credential_private_key,
 		       "JobCredentialPrivateKey", hashtbl);
@@ -1595,14 +1765,20 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		      "JobCredentialPublicCertificate be set");
 	}
 
-	if ((s_p_get_uint32(&conf->def_mem_per_task,
-			    "DefMemPerCPU", hashtbl)) ||
-	    (s_p_get_uint32(&conf->def_mem_per_task, "DefMemPerTask", hashtbl)))
+	if (s_p_get_uint32(&conf->def_mem_per_task, "DefMemPerCPU", hashtbl))
 		conf->def_mem_per_task |= MEM_PER_CPU;
-	else if (!s_p_get_uint32(&conf->def_mem_per_task,
-				 "DefMemPerNode", hashtbl))
+	else if (!s_p_get_uint32(&conf->def_mem_per_task, "DefMemPerNode", 
+				 hashtbl))
 		conf->def_mem_per_task = DEFAULT_MEM_PER_CPU;
 
+	if (s_p_get_string(&temp_str, "DebugFlags", hashtbl)) {
+		conf->debug_flags = debug_str2flags(temp_str);
+		if (conf->debug_flags == NO_VAL)
+			fatal("DebugFlags invalid: %s", temp_str);
+		xfree(temp_str);
+	} else	/* Default: no DebugFlags */
+		conf->debug_flags = 0;
+
 	if (!s_p_get_boolean((bool *) &conf->disable_root_jobs, 
 			     "DisableRootJobs", hashtbl))
 		conf->disable_root_jobs = DEFAULT_DISABLE_ROOT_JOBS;
@@ -1616,6 +1792,8 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint32(&conf->epilog_msg_time, "EpilogMsgTime", hashtbl))
 		conf->epilog_msg_time = DEFAULT_EPILOG_MSG_TIME;
 
+	s_p_get_string(&conf->epilog_slurmctld, "EpilogSlurmctld", hashtbl);
+
 	if (!s_p_get_uint16(&conf->fast_schedule, "FastSchedule", hashtbl))
 		conf->fast_schedule = DEFAULT_FAST_SCHEDULE;
 
@@ -1662,6 +1840,9 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->job_acct_gather_type =
 			xstrdup(DEFAULT_JOB_ACCT_GATHER_TYPE);
 
+	if (!s_p_get_string(&conf->job_ckpt_dir, "JobCheckpointDir", hashtbl))
+		conf->job_ckpt_dir = xstrdup(DEFAULT_JOB_CKPT_DIR);
+
 	if (!s_p_get_string(&conf->job_comp_type, "JobCompType", hashtbl)) {
 		if(default_storage_type) {
 			if(!strcasecmp("slurmdbd", default_storage_type)) {
@@ -1683,6 +1864,9 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_string(&conf->job_comp_loc, "JobCompLoc", hashtbl)) {
 		if(default_storage_loc)
 			conf->job_comp_loc = xstrdup(default_storage_loc);
+		else if(!strcmp(conf->job_comp_type, "job_comp/mysql")
+			|| !strcmp(conf->job_comp_type, "job_comp/pgsql")) 
+			conf->job_comp_loc = xstrdup(DEFAULT_JOB_COMP_DB);
 		else
 			conf->job_comp_loc = xstrdup(DEFAULT_JOB_COMP_LOC);
 	}
@@ -1690,32 +1874,34 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_string(&conf->job_comp_host, "JobCompHost",
 			    hashtbl)) {
 		if(default_storage_host)
-			conf->job_comp_host =
-				xstrdup(default_storage_host);
+			conf->job_comp_host = xstrdup(default_storage_host);
 		else
 			conf->job_comp_host = xstrdup(DEFAULT_STORAGE_HOST);
 	}
 	if (!s_p_get_string(&conf->job_comp_user, "JobCompUser",
 			    hashtbl)) {
 		if(default_storage_user)
-			conf->job_comp_user =
-				xstrdup(default_storage_user);
+			conf->job_comp_user = xstrdup(default_storage_user);
 		else
 			conf->job_comp_user = xstrdup(DEFAULT_STORAGE_USER);
 	}
 	if (!s_p_get_string(&conf->job_comp_pass, "JobCompPass",
 			    hashtbl)) {
 		if(default_storage_pass)
-			conf->job_comp_pass =
-				xstrdup(default_storage_pass);
+			conf->job_comp_pass = xstrdup(default_storage_pass);
 	}
 	if (!s_p_get_uint32(&conf->job_comp_port, "JobCompPort",
 			    hashtbl)) {
 		if(default_storage_port)
 			conf->job_comp_port = default_storage_port;
-		else
+		else if(!strcmp(conf->job_comp_type, "job_comp/mysql")) 
+			conf->job_comp_port = DEFAULT_MYSQL_PORT;
+		else if(!strcmp(conf->job_comp_type, "job_comp/pgsql")) 
+			conf->job_comp_port = DEFAULT_PGSQL_PORT;
+		else 
 			conf->job_comp_port = DEFAULT_STORAGE_PORT;
 	}
+
 	if (!s_p_get_uint16(&conf->job_file_append, "JobFileAppend", hashtbl))
 		conf->job_file_append = 0;
 
@@ -1727,10 +1913,13 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint16(&conf->get_env_timeout, "GetEnvTimeout", hashtbl))
 		conf->get_env_timeout = DEFAULT_GET_ENV_TIMEOUT;
 
-	s_p_get_uint16(&conf->health_check_interval,
-		       "HealthCheckInterval", hashtbl);
-	s_p_get_string(&conf->health_check_program,
-		       "HealthCheckProgram", hashtbl);
+	s_p_get_uint16(&conf->health_check_interval, "HealthCheckInterval", 
+		       hashtbl);
+	s_p_get_string(&conf->health_check_program, "HealthCheckProgram", 
+		       hashtbl);
+
+	if (!s_p_get_uint16(&conf->kill_on_bad_exit, "KillOnBadExit", hashtbl))
+		conf->kill_on_bad_exit = DEFAULT_KILL_ON_BAD_EXIT;
 
 	if (!s_p_get_uint16(&conf->kill_wait, "KillWait", hashtbl))
 		conf->kill_wait = DEFAULT_KILL_WAIT;
@@ -1743,13 +1932,15 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint16(&conf->max_job_cnt, "MaxJobCount", hashtbl))
 		conf->max_job_cnt = DEFAULT_MAX_JOB_COUNT;
 
-	if ((s_p_get_uint32(&conf->max_mem_per_task,
+	if ((s_p_get_uint32(&conf->max_mem_per_task, 
 			    "MaxMemPerCPU", hashtbl)) ||
-	    (s_p_get_uint32(&conf->max_mem_per_task, "MaxMemPerTask", hashtbl)))
+	    (s_p_get_uint32(&conf->max_mem_per_task, 
+			    "MaxMemPerTask", hashtbl))) {
 		conf->max_mem_per_task |= MEM_PER_CPU;
-	else if (!s_p_get_uint32(&conf->max_mem_per_task,
-				 "MaxMemPerNode", hashtbl))
+	} else if (!s_p_get_uint32(&conf->max_mem_per_task, 
+				 "MaxMemPerNode", hashtbl)) {
 		conf->max_mem_per_task = DEFAULT_MAX_MEM_PER_CPU;
+	}
 
 	if (!s_p_get_uint16(&conf->msg_timeout, "MessageTimeout", hashtbl))
 		conf->msg_timeout = DEFAULT_MSG_TIMEOUT;
@@ -1764,6 +1955,8 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_string(&conf->mpi_default, "MpiDefault", hashtbl))
 		conf->mpi_default = xstrdup(DEFAULT_MPI_DEFAULT);
 
+	s_p_get_string(&conf->mpi_params, "MpiParams", hashtbl);
+
 	if(!s_p_get_boolean((bool *)&conf->track_wckey, 
 			    "TrackWCKey", hashtbl))
 		conf->track_wckey = false;
@@ -1804,6 +1997,10 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		xfree(temp_str);
 	}
 
+	/* if no backup we don't care */
+	s_p_get_string(&conf->accounting_storage_backup_host,
+		       "AccountingStorageBackupHost", hashtbl);
+	
 	if (!s_p_get_string(&conf->accounting_storage_host,
 			    "AccountingStorageHost", hashtbl)) {
 		if(default_storage_host)
@@ -1825,11 +2022,16 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		if(default_storage_loc)
 			conf->accounting_storage_loc =
 				xstrdup(default_storage_loc);
+		else if(!strcmp(conf->accounting_storage_type, 
+				"accounting_storage/mysql")
+			|| !strcmp(conf->accounting_storage_type, 
+				"accounting_storage/pgsql")) 
+			conf->accounting_storage_loc =
+				xstrdup(DEFAULT_ACCOUNTING_DB);
 		else
 			conf->accounting_storage_loc =
 				xstrdup(DEFAULT_STORAGE_LOC);
 	}
-
 	if (!s_p_get_string(&conf->accounting_storage_user,
 			    "AccountingStorageUser", hashtbl)) {
 		if(default_storage_user)
@@ -1849,9 +2051,30 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 			    "AccountingStoragePort", hashtbl)) {
 		if(default_storage_port)
 			conf->accounting_storage_port = default_storage_port;
+		else if(!strcmp(conf->accounting_storage_type,
+				"accounting_storage/slurmdbd")) 
+			conf->accounting_storage_port = SLURMDBD_PORT;
+		else if(!strcmp(conf->accounting_storage_type, 
+			  "accounting_storage/mysql")) 
+			conf->accounting_storage_port = DEFAULT_MYSQL_PORT;
+		else if(!strcmp(conf->accounting_storage_type,
+			  "accounting_storage/pgsql")) 
+			conf->accounting_storage_port = DEFAULT_PGSQL_PORT;
 		else
 			conf->accounting_storage_port = DEFAULT_STORAGE_PORT;
 	}
+	
+	/* remove the user and loc if using slurmdbd */
+	if(!strcmp(conf->accounting_storage_type,
+		   "accounting_storage/slurmdbd")) {
+		xfree(conf->accounting_storage_loc);
+		conf->accounting_storage_loc = xstrdup("N/A");
+		xfree(conf->accounting_storage_user);
+		conf->accounting_storage_user = xstrdup("N/A");
+	}
+
+	s_p_get_uint16(&conf->over_time_limit, "OverTimeLimit", hashtbl);
+
 	if (!s_p_get_string(&conf->plugindir, "PluginDir", hashtbl))
 		conf->plugindir = xstrdup(default_plugin_path);
 
@@ -1861,6 +2084,83 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_string(&conf->switch_type, "SwitchType", hashtbl))
 		conf->switch_type = xstrdup(DEFAULT_SWITCH_TYPE);
 
+	if (s_p_get_string(&temp_str, "PriorityDecayHalfLife", hashtbl)) {
+		int max_time = time_str2mins(temp_str);
+		if ((max_time < 0) && (max_time != INFINITE)) {
+			fatal("Bad value \"%s\" for PriorityDecayHalfLife",
+			      temp_str);
+		}
+		conf->priority_decay_hl = max_time * 60;
+		xfree(temp_str);
+	} else 
+		conf->priority_decay_hl = DEFAULT_PRIORITY_DECAY;
+
+	if (s_p_get_boolean(&truth, "PriorityFavorSmall", hashtbl) && truth) 
+		conf->priority_favor_small = 1;
+	else 
+		conf->priority_favor_small = 0;
+	
+	if (s_p_get_string(&temp_str, "PriorityMaxAge", hashtbl)) {
+		int max_time = time_str2mins(temp_str);
+		if ((max_time < 0) && (max_time != INFINITE)) {
+			fatal("Bad value \"%s\" for PriorityMaxAge",
+			      temp_str);
+		}
+		conf->priority_max_age = max_time * 60;
+		xfree(temp_str);
+	} else 
+		conf->priority_max_age = DEFAULT_PRIORITY_DECAY;
+
+	if (s_p_get_string(&temp_str, "PriorityUsageResetPeriod", hashtbl)) {
+		if (strcasecmp(temp_str, "none") == 0)
+			conf->priority_reset_period = PRIORITY_RESET_NONE;
+		else if (strcasecmp(temp_str, "now") == 0)
+			conf->priority_reset_period = PRIORITY_RESET_NOW;
+		else if (strcasecmp(temp_str, "daily") == 0)
+			conf->priority_reset_period = PRIORITY_RESET_DAILY;
+		else if (strcasecmp(temp_str, "weekly") == 0)
+			conf->priority_reset_period = PRIORITY_RESET_WEEKLY;
+		else if (strcasecmp(temp_str, "monthly") == 0)
+			conf->priority_reset_period = PRIORITY_RESET_MONTHLY;
+		else if (strcasecmp(temp_str, "quarterly") == 0)
+			conf->priority_reset_period = PRIORITY_RESET_QUARTERLY;
+		else if (strcasecmp(temp_str, "yearly") == 0)
+			conf->priority_reset_period = PRIORITY_RESET_YEARLY;
+		else {
+			fatal("Bad value \"%s\" for PriorityUsageResetPeriod",
+			      temp_str);
+		}
+		xfree(temp_str);
+	} else {
+		conf->priority_reset_period = PRIORITY_RESET_NONE;
+		if(!conf->priority_decay_hl) {
+			fatal("You have to either have "
+			      "PriorityDecayHalfLife != 0 or "
+			      "PriorityUsageResetPeriod set to something "
+			      "or the priority plugin will result in "
+			      "rolling over.");
+		}
+	}
+
+	if (!s_p_get_string(&conf->priority_type, "PriorityType", hashtbl))
+		conf->priority_type = xstrdup(DEFAULT_PRIORITY_TYPE);
+
+	if (!s_p_get_uint32(&conf->priority_weight_age,
+			    "PriorityWeightAge", hashtbl))
+		conf->priority_weight_age = 0;
+	if (!s_p_get_uint32(&conf->priority_weight_fs,
+			    "PriorityWeightFairshare", hashtbl))
+		conf->priority_weight_fs = 0;
+	if (!s_p_get_uint32(&conf->priority_weight_js,
+			    "PriorityWeightJobSize", hashtbl))
+		conf->priority_weight_js = 0;
+	if (!s_p_get_uint32(&conf->priority_weight_part,
+			    "PriorityWeightPartition", hashtbl))
+		conf->priority_weight_part = 0;
+	if (!s_p_get_uint32(&conf->priority_weight_qos,
+			    "PriorityWeightQOS", hashtbl))
+		conf->priority_weight_qos = 0;
+
 	if (!s_p_get_string(&conf->proctrack_type, "ProctrackType", hashtbl)) {
 		if (!strcmp(conf->switch_type,"switch/elan"))
 			conf->proctrack_type = xstrdup("proctrack/rms");
@@ -1873,24 +2173,27 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		fatal("proctrack/linuxproc is incompatable with switch/elan");
 
 	if (s_p_get_string(&temp_str, "PrivateData", hashtbl)) {
+		if (strstr(temp_str, "account"))
+			conf->private_data |= PRIVATE_DATA_ACCOUNTS;
 		if (strstr(temp_str, "job"))
 			conf->private_data |= PRIVATE_DATA_JOBS;
 		if (strstr(temp_str, "node"))
 			conf->private_data |= PRIVATE_DATA_NODES;
 		if (strstr(temp_str, "partition"))
 			conf->private_data |= PRIVATE_DATA_PARTITIONS;
+		if (strstr(temp_str, "reservation"))
+			conf->private_data |= PRIVATE_DATA_RESERVATIONS;
 		if (strstr(temp_str, "usage"))
 			conf->private_data |= PRIVATE_DATA_USAGE;
-		if (strstr(temp_str, "users"))
+		if (strstr(temp_str, "user"))
 			conf->private_data |= PRIVATE_DATA_USERS;
-		if (strstr(temp_str, "accounts"))
-			conf->private_data |= PRIVATE_DATA_ACCOUNTS;
 		if (strstr(temp_str, "all"))
 			conf->private_data = 0xffff;
 		xfree(temp_str);
 	}
 
 	s_p_get_string(&conf->prolog, "Prolog", hashtbl);
+	s_p_get_string(&conf->prolog_slurmctld, "PrologSlurmctld", hashtbl);
 
 	if (!s_p_get_uint16(&conf->propagate_prio_process,
 			"PropagatePrioProcess", hashtbl)) {
@@ -1919,9 +2222,13 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint16(&conf->ret2service, "ReturnToService", hashtbl))
 		conf->ret2service = DEFAULT_RETURN_TO_SERVICE;
 
+	s_p_get_uint16(&conf->resv_over_run, "ResvOverRun", hashtbl);
+
 	s_p_get_string(&conf->resume_program, "ResumeProgram", hashtbl);
 	if (!s_p_get_uint16(&conf->resume_rate, "ResumeRate", hashtbl))
 		conf->resume_rate = DEFAULT_RESUME_RATE;
+	if (!s_p_get_uint16(&conf->resume_timeout, "ResumeTimeout", hashtbl))
+		conf->resume_timeout = DEFAULT_RESUME_TIMEOUT;
 
 	s_p_get_string(&conf->salloc_default_command, "SallocDefaultCommand",
 			hashtbl);
@@ -1950,6 +2257,16 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	else if ((strcmp(conf->schedtype, "sched/gang") == 0) &&
 		 (conf->fast_schedule == 0))
 		fatal("FastSchedule=0 is not supported with sched/gang");
+	if (strcmp(conf->priority_type, "priority/multifactor") == 0) {
+		if (strcmp(conf->schedtype, "sched/wiki") == 0) {
+			fatal("PriorityType=priority/multifactor is "
+			      "incompatible with SchedulerType=sched/wiki");
+		}
+		if (strcmp(conf->schedtype, "sched/wiki2") == 0) {
+			fatal("PriorityType=priority/multifactor is "
+			      "incompatible with SchedulerType=sched/wiki2");
+		}
+	}
 
 	if (!s_p_get_string(&conf->select_type, "SelectType", hashtbl))
 		conf->select_type = xstrdup(DEFAULT_SELECT_TYPE);
@@ -1977,7 +2294,7 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	} else {
 		uid_t my_uid = uid_from_string(conf->slurm_user_name);
 		if (my_uid == (uid_t) -1) {
-			error ("Invalid user for SlurmUser %s, ignored",
+			fatal ("Invalid user for SlurmUser %s, ignored",
 			       conf->slurm_user_name);
 			xfree(conf->slurm_user_name);
 		} else {
@@ -1985,6 +2302,20 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		}
 	}
 
+	if (!s_p_get_string( &conf->slurmd_user_name, "SlurmdUser", hashtbl)) {
+		conf->slurmd_user_name = xstrdup("root");
+		conf->slurmd_user_id   = 0;
+	} else {
+		uid_t my_uid = uid_from_string(conf->slurmd_user_name);
+		if (my_uid == (uid_t) -1) {
+			fatal ("Invalid user for SlurmdUser %s, ignored",
+			       conf->slurmd_user_name);
+			xfree(conf->slurmd_user_name);
+		} else {
+			conf->slurmd_user_id = my_uid;
+		}
+	}
+
 	if (s_p_get_uint16(&conf->slurmctld_debug, "SlurmctldDebug", hashtbl))
 		_normalize_debug_level(&conf->slurmctld_debug);
 	else
@@ -2023,6 +2354,7 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->slurmd_timeout = DEFAULT_SLURMD_TIMEOUT;
 
 	s_p_get_string(&conf->srun_prolog, "SrunProlog", hashtbl);
+	s_p_get_uint16(&conf->srun_io_timeout, "SrunIOTimeout", hashtbl);
 	s_p_get_string(&conf->srun_epilog, "SrunEpilog", hashtbl);
 
 	if (!s_p_get_string(&conf->state_save_location,
@@ -2038,6 +2370,8 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->suspend_time = long_suspend_time + 1;
 	else
 		conf->suspend_time = 0;
+	if (!s_p_get_uint16(&conf->suspend_timeout, "SuspendTimeout", hashtbl))
+		conf->suspend_timeout = DEFAULT_SUSPEND_TIMEOUT;
 
 	/* see above for switch_type, order dependent */
 
@@ -2045,13 +2379,46 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->task_plugin = xstrdup(DEFAULT_TASK_PLUGIN);
 
 	if (s_p_get_string(&temp_str, "TaskPluginParam", hashtbl)) {
-		if (strcasecmp(temp_str, "cpusets") == 0)
-			conf->task_plugin_param = TASK_PARAM_CPUSETS;
-		else if (strcasecmp(temp_str, "sched") == 0)
-			conf->task_plugin_param = TASK_PARAM_SCHED;
-		else {
-			fatal("Bad TaskPluginParam: %s", temp_str);
-			conf->task_plugin_param = TASK_PARAM_NONE;
+		char *last = NULL, *tok;
+		bool set_mode = false, set_unit = false;
+		tok = strtok_r(temp_str, ",", &last);
+		while (tok) {
+			if (strcasecmp(tok, "none") == 0) {
+				if (set_unit)
+					fatal("Bad TaskPluginParam: %s", tok);
+				set_unit = true;
+				conf->task_plugin_param |= CPU_BIND_NONE;
+			} else if (strcasecmp(tok, "sockets") == 0) {
+				if (set_unit)
+					fatal("Bad TaskPluginParam: %s", tok);
+				set_unit = true;
+				conf->task_plugin_param |= CPU_BIND_TO_SOCKETS;
+			} else if (strcasecmp(tok, "cores") == 0) {
+				if (set_unit)
+					fatal("Bad TaskPluginParam: %s", tok);
+				set_unit = true;
+				conf->task_plugin_param |= CPU_BIND_TO_CORES;
+			} else if (strcasecmp(tok, "threads") == 0) {
+				if (set_unit)
+					fatal("Bad TaskPluginParam: %s", tok);
+				set_unit = true;
+				conf->task_plugin_param |= CPU_BIND_TO_THREADS;
+			} else if (strcasecmp(tok, "cpusets") == 0) {
+				if (set_mode)
+					fatal("Bad TaskPluginParam: %s", tok);
+				set_mode = true;
+				conf->task_plugin_param |= CPU_BIND_CPUSETS;
+			} else if (strcasecmp(tok, "sched") == 0) {
+				if (set_mode)
+					fatal("Bad TaskPluginParam: %s", tok);
+				set_mode = true;
+				/* No change to task_plugin_param, 
+				 * this is the default */
+			} else if (strcasecmp(tok, "verbose") == 0) {
+				conf->task_plugin_param |= CPU_BIND_VERBOSE;
+			} else
+				fatal("Bad TaskPluginParam: %s", tok);
+			tok = strtok_r(NULL, ",", &last);
 		}
 		xfree(temp_str);
 	}
@@ -2064,7 +2431,10 @@ validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 
 	if (!s_p_get_uint16(&conf->wait_time, "WaitTime", hashtbl))
 		conf->wait_time = DEFAULT_WAIT_TIME;
-	
+
+	if (!s_p_get_string(&conf->topology_plugin, "TopologyPlugin", hashtbl))
+		conf->topology_plugin = xstrdup(DEFAULT_TOPOLOGY_PLUGIN);
+
 	if (s_p_get_uint16(&conf->tree_width, "TreeWidth", hashtbl)) {
 		if (conf->tree_width == 0) {
 			error("TreeWidth=0 is invalid");
@@ -2114,3 +2484,124 @@ slurm_conf_expand_slurmd_path(const char *path, const char *node_name)
 	
 	return dir;
 }
+
+/*
+ * debug_flags2str - convert a DebugFlags uint32_t to the equivalent string
+ */
+extern char * debug_flags2str(uint32_t debug_flags)
+{
+	char *rc = NULL;
+
+	if (debug_flags & DEBUG_FLAG_CPU_BIND) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "CPU_Bind");
+	}
+	if (debug_flags & DEBUG_FLAG_SELECT_TYPE) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "SelectType");
+	}
+	if (debug_flags & DEBUG_FLAG_STEPS) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "Steps");
+	}
+	if (debug_flags & DEBUG_FLAG_TRIGGERS) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "Triggers");
+	}
+	if (debug_flags & DEBUG_FLAG_WIKI) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "Wiki");
+	}
+		
+	return rc;
+}
+
+/*
+ * debug_str2flags - Convert a DebugFlags string to the equivalent uint32_t
+ * Returns NO_VAL if invalid
+ */
+extern uint32_t debug_str2flags(char *debug_flags)
+{
+	uint32_t rc = 0;
+	char *tmp_str, *tok, *last = NULL;
+
+	if (!debug_flags)
+		return rc;
+
+	tmp_str = xstrdup(debug_flags);
+	tok = strtok_r(tmp_str, ",", &last);
+	while (tok) {
+		if      (strcasecmp(tok, "CPU_Bind") == 0)
+			rc |= DEBUG_FLAG_CPU_BIND;
+		else if (strcasecmp(tok, "SelectType") == 0)
+			rc |= DEBUG_FLAG_SELECT_TYPE;
+		else if (strcasecmp(tok, "Steps") == 0)
+			rc |= DEBUG_FLAG_STEPS;
+		else if (strcasecmp(tok, "Triggers") == 0)
+			rc |= DEBUG_FLAG_TRIGGERS;
+		else if (strcasecmp(tok, "Wiki") == 0)
+			rc |= DEBUG_FLAG_WIKI;
+		else {
+			error("Invalid DebugFlag: %s", tok);
+			rc = NO_VAL;
+			break;
+		}
+		tok = strtok_r(NULL, ",", &last);
+	}
+	xfree(tmp_str);
+
+	return rc;
+}
+
+extern void destroy_config_key_pair(void *object)
+{
+	config_key_pair_t *key_pair_ptr = (config_key_pair_t *)object;
+
+	if(key_pair_ptr) {
+		xfree(key_pair_ptr->name);
+		xfree(key_pair_ptr->value);
+		xfree(key_pair_ptr);
+	}
+}
+
+extern void pack_config_key_pair(void *in, uint16_t rpc_version, Buf buffer)
+{
+	config_key_pair_t *object = (config_key_pair_t *)in;
+	packstr(object->name,  buffer);
+	packstr(object->value, buffer);
+}
+
+extern int unpack_config_key_pair(void **object, uint16_t rpc_version,
+				  Buf buffer)
+{
+	uint32_t uint32_tmp;
+	config_key_pair_t *object_ptr = xmalloc(sizeof(config_key_pair_t));
+	
+	*object = object_ptr;
+	safe_unpackstr_xmalloc(&object_ptr->name,  &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->value, &uint32_tmp, buffer);
+	
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_config_key_pair(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+extern int sort_key_pairs(config_key_pair_t *key_a, config_key_pair_t *key_b)
+{
+	int size_a = strcmp(key_a->name, key_b->name);
+
+	if (size_a < 0)
+		return -1;
+	else if (size_a > 0)
+		return 1;
+
+	return 0;
+}
diff --git a/src/common/read_config.h b/src/common/read_config.h
index b700066544acb0e9dc8726b20840b62c33c21052..924d8517592c68f5d532b0d844e94046b643529f 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -3,14 +3,15 @@
  *  file
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Portions Copyright (C) 2008 Vijay Ramasubramanian.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Mette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -54,12 +55,14 @@ extern char *default_plugstack;
 #define ACCOUNTING_ENFORCE_LIMITS 0x0002
 #define ACCOUNTING_ENFORCE_WCKEYS 0x0004
 
+#define DEFAULT_ACCOUNTING_DB      "slurm_acct_db"
 #define DEFAULT_ACCOUNTING_ENFORCE  0
 #define DEFAULT_ACCOUNTING_STORAGE_TYPE "accounting_storage/none"
-#define DEFAULT_AUTH_TYPE          "auth/none"
+#define DEFAULT_AUTH_TYPE          "auth/munge"
 #define DEFAULT_BATCH_START_TIMEOUT 10
 #define DEFAULT_CACHE_GROUPS        0
-#define DEFAULT_CRYPTO_TYPE        "crypto/openssl"
+#define DEFAULT_COMPLETE_WAIT       0
+#define DEFAULT_CRYPTO_TYPE        "crypto/munge"
 #define DEFAULT_EPILOG_MSG_TIME     2000
 #define DEFAULT_FAST_SCHEDULE       1
 #define DEFAULT_FIRST_JOB_ID        1
@@ -72,8 +75,11 @@ extern char *default_plugstack;
 #define ACCOUNTING_STORAGE_TYPE_NONE "accounting_storage/none"
 #define DEFAULT_DISABLE_ROOT_JOBS   0
 #define DEFAULT_ENFORCE_PART_LIMITS 0
+#define DEFAULT_JOB_CKPT_DIR        "/var/slurm/checkpoint"
 #define DEFAULT_JOB_COMP_TYPE       "jobcomp/none"
 #define DEFAULT_JOB_COMP_LOC        "/var/log/slurm_jobcomp.log"
+#define DEFAULT_JOB_COMP_DB         "slurm_jobcomp_db"
+#define DEFAULT_KILL_ON_BAD_EXIT    0
 #define DEFAULT_KILL_TREE           0
 #define DEFAULT_KILL_WAIT           30
 #define DEFAULT_MAIL_PROG           "/bin/mail"
@@ -90,9 +96,12 @@ extern char *default_plugstack;
 #  define DEFAULT_CHECKPOINT_TYPE   "checkpoint/none"
 #  define DEFAULT_PROCTRACK_TYPE    "proctrack/pgid"
 #endif
+#define DEFAULT_PRIORITY_DECAY      604800 /* 7 days */
+#define DEFAULT_PRIORITY_TYPE       "priority/basic"
 #define DEFAULT_PROPAGATE_PRIO_PROCESS 0
 #define DEFAULT_RETURN_TO_SERVICE   0
-#define DEFAULT_RESUME_RATE         60
+#define DEFAULT_RESUME_RATE         300
+#define DEFAULT_RESUME_TIMEOUT      60
 #define DEFAULT_SAVE_STATE_LOC      "/tmp"
 #define DEFAULT_SCHEDROOTFILTER     1
 #define DEFAULT_SCHEDULER_PORT      7321
@@ -112,11 +121,19 @@ extern char *default_plugstack;
 #define DEFAULT_STORAGE_LOC         "/var/log/slurm_jobacct.log"
 #define DEFAULT_STORAGE_USER        "root"
 #define DEFAULT_STORAGE_PORT        0
+#define DEFAULT_PGSQL_PORT          5432
+#define DEFAULT_MYSQL_PORT          3306
 #define DEFAULT_SUSPEND_RATE        60
 #define DEFAULT_SUSPEND_TIME        0
+#define DEFAULT_SUSPEND_TIMEOUT     30
 #define DEFAULT_SWITCH_TYPE         "switch/none"
 #define DEFAULT_TASK_PLUGIN         "task/none"
 #define DEFAULT_TMP_FS              "/tmp"
+#ifdef HAVE_3D
+#  define DEFAULT_TOPOLOGY_PLUGIN     "topology/3d_torus"
+#else
+#  define DEFAULT_TOPOLOGY_PLUGIN     "topology/none"
+#endif
 #define DEFAULT_WAIT_TIME           0
 #define DEFAULT_TREE_WIDTH          50
 #define DEFAULT_UNKILLABLE_TIMEOUT  60 /* seconds */
@@ -140,25 +157,30 @@ typedef struct slurm_conf_node {
 } slurm_conf_node_t;
 
 typedef struct slurm_conf_partition {
+	char *allow_alloc_nodes;/* comma delimited list of allowed
+				 * allocating nodes 
+				 * NULL indicates all */
+	char *allow_groups;	/* comma delimited list of groups, 
+				 * NULL indicates all */
+	bool default_flag;	/* Set if default partition */
+	uint32_t default_time;	/* minutes or INFINITE */
 	uint16_t disable_root_jobs; /* if set then user root can't run
 				     * jobs if NO_VAL use global
 				     * default */
-	char	*name;		/* name of the partition */
+
 	bool     hidden_flag;	/* 1 if hidden by default */
+	uint16_t max_share;	/* number of jobs to gang schedule */
 	uint32_t max_time;	/* minutes or INFINITE */
 	uint32_t max_nodes;	/* per job or INFINITE */
 	uint32_t min_nodes;	/* per job */
-	uint32_t total_nodes;	/* total number of nodes in the partition */
-	uint32_t total_cpus;	/* total number of cpus in the partition */
+	char	*name;		/* name of the partition */
+	char 	*nodes;		/* comma delimited list names of nodes */
 	uint16_t priority;	/* scheduling priority for jobs */
 	bool     root_only_flag;/* 1 if allocate/submit RPC can only be 
 				   issued by user root */
-	uint16_t max_share;	/* number of jobs to gang schedule */
 	bool     state_up_flag;	/* 1 if state is up, 0 if down */
-	char *nodes;		/* comma delimited list names of nodes */
-	char *allow_groups;	/* comma delimited list of groups, 
-				 * NULL indicates all */
-	bool default_flag;
+	uint32_t total_nodes;	/* total number of nodes in the partition */
+	uint32_t total_cpus;	/* total number of cpus in the partition */
 } slurm_conf_partition_t;
 
 typedef struct slurm_conf_downnodes {
@@ -167,6 +189,11 @@ typedef struct slurm_conf_downnodes {
 	char *state;
 } slurm_conf_downnodes_t;
 
+typedef struct {
+	char *name;
+	char *value;
+} config_key_pair_t;
+
 /*
  * slurm_conf_init - load the slurm configuration from the a file.
  * IN file_name - name of the slurm configuration file to be read
@@ -232,7 +259,7 @@ extern int slurm_conf_partition_array(slurm_conf_partition_t **ptr_array[]);
 
 /*
  * Set "ptr_array" with the pointer to an array of pointers to
- * slurm_conf_node_t structures.
+ * slurm_conf_downnodes_t structures.
  * 
  * Return value is the length of the array.
  */
@@ -251,13 +278,19 @@ extern char *slurm_conf_get_hostname(const char *node_name);
 /*
  * slurm_conf_get_nodename - Return the NodeName for given NodeHostname
  *
- * Returned string was allocated with xmalloc(), and must be freed by
- * the caller using xfree().
- *
+ * NOTE: Call xfree() to release returned value's memory.
  * NOTE: Caller must NOT be holding slurm_conf_lock().
  */
 extern char *slurm_conf_get_nodename(const char *node_hostname);
 
+/*
+ * slurm_conf_get_nodeaddr - Return the NodeAddr for given NodeHostname
+ *
+ * NOTE: Call xfree() to release returned value's memory.
+ * NOTE: Caller must NOT be holding slurm_conf_lock().
+ */
+extern char *slurm_conf_get_nodeaddr(const char *node_hostname);
+
 /*
  * slurm_conf_get_aliased_nodename - Return the NodeName matching an alias
  * of the local hostname
@@ -333,4 +366,23 @@ extern int gethostname_short (char *name, size_t len);
 extern char *slurm_conf_expand_slurmd_path(const char *path,
 					   const char *node_name);
 
+/*
+ * debug_flags2str - convert a DebugFlags uint32_t to the equivalent string
+ * Returns an xmalloc()ed string which the caller must free with xfree().
+ */
+extern char *debug_flags2str(uint32_t debug_flags);
+
+/*
+ * debug_str2flags - Convert a DebugFlags string to the equivalent uint32_t
+ * Returns NO_VAL if invalid
+ */
+extern uint32_t debug_str2flags(char *debug_flags);
+
+extern void destroy_config_key_pair(void *object);
+extern void pack_config_key_pair(void *in, uint16_t rpc_version, Buf buffer);
+extern int unpack_config_key_pair(void **object, uint16_t rpc_version, 
+				  Buf buffer);
+extern int sort_key_pairs(config_key_pair_t *key_a, config_key_pair_t *key_b);
+
+
 #endif /* !_READ_CONFIG_H */
diff --git a/src/common/safeopen.c b/src/common/safeopen.c
index 21e1bd8c1e1cab8d73495d6e5e9d3f99de9417e2..ea9236416fc9029f2cef2bc7071acf5f42150202 100644
--- a/src/common/safeopen.c
+++ b/src/common/safeopen.c
@@ -1,13 +1,14 @@
 /*****************************************************************************\
  *  safeopen.c - safer interface to open()
- *  $Id: safeopen.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: safeopen.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/safeopen.h b/src/common/safeopen.h
index 3708a26575258ea7721e34c0451206f27f807f92..7a073900d099ac315c77479430fb5aa590465047 100644
--- a/src/common/safeopen.h
+++ b/src/common/safeopen.h
@@ -1,13 +1,14 @@
 /*****************************************************************************\
  *  safeopen.h - safer interface to open()
- *  $Id: safeopen.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: safeopen.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/select_job_res.c b/src/common/select_job_res.c
new file mode 100644
index 0000000000000000000000000000000000000000..ab4cf8fc97aa7cb7ee9e902ee39d39e4fba0c03d
--- /dev/null
+++ b/src/common/select_job_res.c
@@ -0,0 +1,924 @@
+/*****************************************************************************\
+ *  select_job_res.c - functions to manage data structure identifying specific
+ *	CPUs allocated to a job, step or partition
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Written by Morris Jette <jette1@llnl.gov>.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *  
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+\*****************************************************************************/
+
+#include <stdlib.h>
+#include <string.h>
+#include <slurm/slurm_errno.h>
+
+#include "src/common/hostlist.h"
+#include "src/common/log.h"
+#include "src/common/select_job_res.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xassert.h"
+#include "src/slurmctld/slurmctld.h"
+
+
+/* Create an empty select_job_res data structure */
+extern select_job_res_t create_select_job_res(void)
+{
+	select_job_res_t select_job_res;
+
+	select_job_res = xmalloc(sizeof(struct select_job_res));
+	return select_job_res;
+}
+
+/* Set the socket and core counts associated with a set of selected
+ * nodes of a select_job_res data structure based upon slurmctld state.
+ * (sets cores_per_socket, sockets_per_node, and sock_core_rep_count based
+ * upon the value of node_bitmap, also creates core_bitmap based upon
+ * the total number of cores in the allocation). Call this ONLY from 
+ * slurmctld. Example of use:
+ *
+ * select_job_res_t select_job_res_ptr = create_select_job_res();
+ * node_name2bitmap("dummy[2,5,12,16]", true, &(select_res_ptr->node_bitmap));
+ * rc = build_select_job_res(select_job_res_ptr, node_record_table_ptr,
+ *			     slurmctld_conf.fast_schedule);
+ */
+extern int build_select_job_res(select_job_res_t select_job_res,
+				void *node_rec_table,
+				uint16_t fast_schedule)
+{
+	int i, bitmap_len;
+	int core_cnt = 0, sock_inx = -1;
+	uint32_t cores, socks;
+	struct node_record *node_ptr, *node_record_table;
+
+	if (select_job_res->node_bitmap == NULL) {
+		error("build_select_job_res: node_bitmap is NULL");
+		return SLURM_ERROR;
+	}
+
+	node_record_table = (struct node_record *) node_rec_table;
+	xfree(select_job_res->sockets_per_node);
+	xfree(select_job_res->cores_per_socket);
+	xfree(select_job_res->sock_core_rep_count);
+	select_job_res->sockets_per_node = xmalloc(sizeof(uint16_t) * 
+						   select_job_res->nhosts);
+	select_job_res->cores_per_socket = xmalloc(sizeof(uint16_t) * 
+						   select_job_res->nhosts);
+	select_job_res->sock_core_rep_count = xmalloc(sizeof(uint32_t) * 
+						      select_job_res->nhosts);
+
+	bitmap_len = bit_size(select_job_res->node_bitmap);
+	for (i=0; i<bitmap_len; i++) {
+		if (!bit_test(select_job_res->node_bitmap, i))
+			continue;
+		node_ptr = node_record_table + i;
+		if (fast_schedule) {
+			socks = node_ptr->config_ptr->sockets;
+			cores = node_ptr->config_ptr->cores;
+		} else {
+			socks = node_ptr->sockets;
+			cores = node_ptr->cores;
+		}
+		if ((sock_inx < 0) ||
+		    (socks != select_job_res->sockets_per_node[sock_inx]) ||
+		    (cores != select_job_res->cores_per_socket[sock_inx])) {
+			sock_inx++;
+			select_job_res->sockets_per_node[sock_inx] = socks;
+			select_job_res->cores_per_socket[sock_inx] = cores;
+		}
+		select_job_res->sock_core_rep_count[sock_inx]++;
+		core_cnt += (cores * socks);
+	}
+	select_job_res->core_bitmap      = bit_alloc(core_cnt);
+	select_job_res->core_bitmap_used = bit_alloc(core_cnt);
+	if ((select_job_res->core_bitmap == NULL) ||
+	    (select_job_res->core_bitmap_used == NULL))
+		fatal("bit_alloc malloc failure");
+	return SLURM_SUCCESS;
+}
+
+/* Rebuild cpu_array_cnt, cpu_array_value, and cpu_array_reps based upon the
+ * values of nhosts and cpus in an existing data structure
+ * Return total CPU count or -1 on error */
+extern int build_select_job_res_cpu_array(select_job_res_t select_job_res_ptr)
+{
+	int cpu_count = 0, i;
+	uint32_t last_cpu_cnt = 0;
+
+	if (select_job_res_ptr->nhosts == 0)
+		return cpu_count;	/* no work to do */
+	if (select_job_res_ptr->cpus == NULL) {
+		error("build_select_job_res_cpu_array: cpus==NULL");
+		return -1;
+	}
+
+	/* clear vestigial data and create new arrays of max size */
+	select_job_res_ptr->cpu_array_cnt = 0;
+	xfree(select_job_res_ptr->cpu_array_reps);
+	select_job_res_ptr->cpu_array_reps = 
+		xmalloc(select_job_res_ptr->nhosts * sizeof(uint32_t));
+	xfree(select_job_res_ptr->cpu_array_value);
+	select_job_res_ptr->cpu_array_value = 
+		xmalloc(select_job_res_ptr->nhosts * sizeof(uint16_t));
+
+	for (i=0; i<select_job_res_ptr->nhosts; i++) {
+		if (select_job_res_ptr->cpus[i] != last_cpu_cnt) {
+			last_cpu_cnt = select_job_res_ptr->cpus[i];
+			select_job_res_ptr->cpu_array_value[
+				select_job_res_ptr->cpu_array_cnt] 
+				= last_cpu_cnt;
+			select_job_res_ptr->cpu_array_reps[
+				select_job_res_ptr->cpu_array_cnt] = 1;
+			select_job_res_ptr->cpu_array_cnt++;
+		} else {
+			select_job_res_ptr->cpu_array_reps[
+				select_job_res_ptr->cpu_array_cnt-1]++;
+		}
+		cpu_count += last_cpu_cnt;
+	}
+	return cpu_count;
+}
+
+/* Rebuild cpus array based upon the values of nhosts, cpu_array_value and
+ * cpu_array_reps in an existing data structure
+ * Return total CPU count or -1 on error */
+extern int build_select_job_res_cpus_array(select_job_res_t select_job_res_ptr)
+{
+	int cpu_count = 0, cpu_inx, i, j;
+
+	if (select_job_res_ptr->nhosts == 0)
+		return cpu_count;	/* no work to do */
+	if (select_job_res_ptr->cpu_array_cnt == 0) {
+		error("build_select_job_res_cpus_array: cpu_array_cnt==0");
+		return -1;
+	}
+	if (select_job_res_ptr->cpu_array_value == NULL) {
+		error("build_select_job_res_cpus_array: cpu_array_value==NULL");
+		return -1;
+	}
+	if (select_job_res_ptr->cpu_array_reps == NULL) {
+		error("build_select_job_res_cpus_array: cpu_array_reps==NULL");
+		return -1;
+	}
+
+	/* clear vestigial data and create new arrays of max size */
+	xfree(select_job_res_ptr->cpus);
+	select_job_res_ptr->cpus = 
+		xmalloc(select_job_res_ptr->nhosts * sizeof(uint16_t));
+
+	cpu_inx = 0;
+	for (i=0; i<select_job_res_ptr->cpu_array_cnt; i++) {
+		for (j=0; j<select_job_res_ptr->cpu_array_reps[i]; j++) {
+			if (cpu_inx >= select_job_res_ptr->nhosts) {
+				error("build_select_job_res_cpus_array: "
+				      "cpu_array is too long");
+				return -1;
+			}
+			cpu_count += select_job_res_ptr->cpus[i];
+			select_job_res_ptr->cpus[cpu_inx++] = 
+				select_job_res_ptr->cpus[i];
+		}
+	}
+	if (cpu_inx < select_job_res_ptr->nhosts) {
+		error("build_select_job_res_cpus_array: "
+		      "cpu_array is incomplete");
+		return -1;
+	}
+	return cpu_count;
+}
+
+/* Reset the node_bitmap in a select_job_res data structure
+ * This is needed after a restart/reconfiguration since nodes can 
+ * be added or removed from the system resulting in changing in 
+ * the bitmap size or bit positions */
+extern void reset_node_bitmap(select_job_res_t select_job_res_ptr,
+			      bitstr_t *new_node_bitmap)
+{
+	if (select_job_res_ptr) {
+		if (select_job_res_ptr->node_bitmap)
+			bit_free(select_job_res_ptr->node_bitmap);
+		if (new_node_bitmap) {
+			select_job_res_ptr->node_bitmap =
+				bit_copy(new_node_bitmap);
+		}
+	}
+}
+
+extern int valid_select_job_res(select_job_res_t select_job_res,
+				void *node_rec_table,
+				uint16_t fast_schedule)
+{
+	int i, bitmap_len;
+	int sock_inx = 0, sock_cnt = 0;
+	uint32_t cores, socks;
+	struct node_record *node_ptr, *node_record_table;
+
+	if (select_job_res->node_bitmap == NULL) {
+		error("valid_select_job_res: node_bitmap is NULL");
+		return SLURM_ERROR;
+	}
+	if ((select_job_res->sockets_per_node == NULL) ||
+	    (select_job_res->cores_per_socket == NULL) ||
+	    (select_job_res->sock_core_rep_count == NULL)) {
+		error("valid_select_job_res: socket/core array is NULL");
+		return SLURM_ERROR;
+	}
+
+	node_record_table = (struct node_record *) node_rec_table;
+	bitmap_len = bit_size(select_job_res->node_bitmap);
+	for (i=0; i<bitmap_len; i++) {
+		if (!bit_test(select_job_res->node_bitmap, i))
+			continue;
+		node_ptr = node_record_table + i;
+		if (fast_schedule) {
+			socks = node_ptr->config_ptr->sockets;
+			cores = node_ptr->config_ptr->cores;
+		} else {
+			socks = node_ptr->sockets;
+			cores = node_ptr->cores;
+		}
+		if (sock_cnt >= select_job_res->sock_core_rep_count[sock_inx]) {
+			sock_inx++;
+			sock_cnt = 0;
+		}
+		if ((socks != select_job_res->sockets_per_node[sock_inx]) ||
+		    (cores != select_job_res->cores_per_socket[sock_inx])) {
+			error("valid_select_job_res: "
+			      "%s sockets:%u,%u, cores %u,%u",
+			      node_ptr->name,
+			      socks, 
+			      select_job_res->sockets_per_node[sock_inx],
+			      cores, 
+			      select_job_res->cores_per_socket[sock_inx]);
+			return SLURM_ERROR;
+		}
+		sock_cnt++;
+	}
+	return SLURM_SUCCESS;
+}
+
+extern select_job_res_t copy_select_job_res(select_job_res_t
+					    select_job_res_ptr)
+{
+	int i, sock_inx = 0;
+	select_job_res_t new_layout = xmalloc(sizeof(struct select_job_res));
+
+	xassert(select_job_res_ptr);
+	new_layout->nhosts = select_job_res_ptr->nhosts;
+	new_layout->nprocs = select_job_res_ptr->nprocs;
+	new_layout->node_req = select_job_res_ptr->node_req;
+	if (select_job_res_ptr->core_bitmap) {
+		new_layout->core_bitmap = bit_copy(select_job_res_ptr->
+						   core_bitmap);
+	}
+	if (select_job_res_ptr->core_bitmap_used) {
+		new_layout->core_bitmap_used = bit_copy(select_job_res_ptr->
+							core_bitmap_used);
+	}
+	if (select_job_res_ptr->node_bitmap) {
+		new_layout->node_bitmap = bit_copy(select_job_res_ptr->
+						   node_bitmap);
+	}
+
+	new_layout->cpu_array_cnt = select_job_res_ptr->cpu_array_cnt;
+	if (select_job_res_ptr->cpu_array_reps && 
+	    select_job_res_ptr->cpu_array_cnt) {
+		new_layout->cpu_array_reps = 
+			xmalloc(sizeof(uint32_t) *
+				select_job_res_ptr->cpu_array_cnt);
+		memcpy(new_layout->cpu_array_reps, 
+		       select_job_res_ptr->cpu_array_reps, 
+		       (sizeof(uint32_t) * select_job_res_ptr->cpu_array_cnt));
+	}
+	if (select_job_res_ptr->cpu_array_value && 
+	    select_job_res_ptr->cpu_array_cnt) {
+		new_layout->cpu_array_value = 
+			xmalloc(sizeof(uint16_t) *
+				select_job_res_ptr->cpu_array_cnt);
+		memcpy(new_layout->cpu_array_value, 
+		       select_job_res_ptr->cpu_array_value, 
+		       (sizeof(uint16_t) * select_job_res_ptr->cpu_array_cnt));
+	}
+
+	if (select_job_res_ptr->cpus) {
+		new_layout->cpus = xmalloc(sizeof(uint16_t) *
+					   select_job_res_ptr->nhosts);
+		memcpy(new_layout->cpus, select_job_res_ptr->cpus, 
+		       (sizeof(uint16_t) * select_job_res_ptr->nhosts));
+	}
+	if (select_job_res_ptr->cpus_used) {
+		new_layout->cpus_used = xmalloc(sizeof(uint16_t) *
+						select_job_res_ptr->nhosts);
+		memcpy(new_layout->cpus_used, select_job_res_ptr->cpus_used, 
+		       (sizeof(uint16_t) * select_job_res_ptr->nhosts));
+	}
+
+	if (select_job_res_ptr->memory_allocated) {
+		new_layout->memory_allocated = xmalloc(sizeof(uint32_t) * 
+						       new_layout->nhosts);
+		memcpy(new_layout->memory_allocated, 
+		       select_job_res_ptr->memory_allocated, 
+		       (sizeof(uint32_t) * select_job_res_ptr->nhosts));
+	}
+	if (select_job_res_ptr->memory_used) {
+		new_layout->memory_used = xmalloc(sizeof(uint32_t) * 
+						  new_layout->nhosts);
+		memcpy(new_layout->memory_used, 
+		       select_job_res_ptr->memory_used, 
+		       (sizeof(uint32_t) * select_job_res_ptr->nhosts));
+	}
+
+	/* Copy sockets_per_node, cores_per_socket and core_sock_rep_count */
+	new_layout->sockets_per_node = xmalloc(sizeof(uint16_t) * 
+					       new_layout->nhosts);	
+	new_layout->cores_per_socket = xmalloc(sizeof(uint16_t) * 
+					       new_layout->nhosts);	
+	new_layout->sock_core_rep_count = xmalloc(sizeof(uint32_t) * 
+						  new_layout->nhosts);	
+	for (i=0; i<new_layout->nhosts; i++) {
+		if (select_job_res_ptr->sock_core_rep_count[i] ==  0) {
+			error("copy_select_job_res: sock_core_rep_count=0");
+			break;
+		}
+		sock_inx += select_job_res_ptr->sock_core_rep_count[i];
+		if (sock_inx >= select_job_res_ptr->nhosts) {
+			i++;
+			break;
+		}
+	}
+	memcpy(new_layout->sockets_per_node, 
+	       select_job_res_ptr->sockets_per_node, (sizeof(uint16_t) * i));
+	memcpy(new_layout->cores_per_socket, 
+	       select_job_res_ptr->cores_per_socket, (sizeof(uint16_t) * i));
+	memcpy(new_layout->sock_core_rep_count, 
+	       select_job_res_ptr->sock_core_rep_count, 
+	       (sizeof(uint32_t) * i));
+
+	return new_layout;
+}
+
+extern void free_select_job_res(select_job_res_t *select_job_res_pptr)
+{
+	select_job_res_t select_job_res_ptr = *select_job_res_pptr;
+
+	if (select_job_res_ptr) {
+		if (select_job_res_ptr->core_bitmap)
+			bit_free(select_job_res_ptr->core_bitmap);
+		if (select_job_res_ptr->core_bitmap_used)
+			bit_free(select_job_res_ptr->core_bitmap_used);
+		xfree(select_job_res_ptr->cores_per_socket);
+		xfree(select_job_res_ptr->cpu_array_reps);
+		xfree(select_job_res_ptr->cpu_array_value);
+		xfree(select_job_res_ptr->cpus);
+		xfree(select_job_res_ptr->cpus_used);
+		xfree(select_job_res_ptr->memory_allocated);
+		xfree(select_job_res_ptr->memory_used);
+		if (select_job_res_ptr->node_bitmap)
+			bit_free(select_job_res_ptr->node_bitmap);
+		xfree(select_job_res_ptr->sock_core_rep_count);
+		xfree(select_job_res_ptr->sockets_per_node);
+		xfree(select_job_res_ptr);
+		*select_job_res_pptr = NULL;
+	}
+}
+
+/* Log the contents of a select_job_res data structure using info() */
+extern void log_select_job_res(uint32_t job_id,
+			       select_job_res_t select_job_res_ptr)
+{
+	int bit_inx = 0, bit_reps, i;
+	int array_size, node_inx;
+	int sock_inx = 0, sock_reps = 0;
+
+	if (select_job_res_ptr == NULL) {
+		error("log_select_job_res: select_job_res_ptr is NULL");
+		return;
+	}
+
+	info("====================");
+	info("job_id:%u nhosts:%u nprocs:%u node_req:%u", 
+	     job_id, select_job_res_ptr->nhosts, select_job_res_ptr->nprocs,
+	     select_job_res_ptr->node_req);
+
+	if (select_job_res_ptr->cpus == NULL) {
+		error("log_select_job_res: cpus array is NULL");
+		return;
+	}
+	if (select_job_res_ptr->memory_allocated == NULL) {
+		error("log_select_job_res: memory array is NULL");
+		return;
+	}
+	if ((select_job_res_ptr->cores_per_socket == NULL) ||
+	    (select_job_res_ptr->sockets_per_node == NULL) ||
+	    (select_job_res_ptr->sock_core_rep_count == NULL)) {
+		error("log_select_job_res: socket/core array is NULL");
+		return;
+	}
+	if (select_job_res_ptr->core_bitmap == NULL) {
+		error("log_select_job_res: core_bitmap is NULL");
+		return;
+	}
+	if (select_job_res_ptr->core_bitmap_used == NULL) {
+		error("log_select_job_res: core_bitmap_used is NULL");
+		return;
+	}
+	array_size = bit_size(select_job_res_ptr->core_bitmap);
+
+	/* Can only log node_bitmap from slurmctld, so don't bother here */
+	for (node_inx=0; node_inx<select_job_res_ptr->nhosts; node_inx++) {
+		uint32_t cpus_used = 0, memory_allocated = 0, memory_used = 0;
+		info("Node[%d]:", node_inx);
+
+		if (sock_reps >= 
+		    select_job_res_ptr->sock_core_rep_count[sock_inx]) {
+			sock_inx++;
+			sock_reps = 0;
+		}
+		sock_reps++;
+
+		if (select_job_res_ptr->cpus_used)
+			cpus_used = select_job_res_ptr->cpus_used[node_inx];
+		if (select_job_res_ptr->memory_used)
+			memory_used = select_job_res_ptr->memory_used[node_inx];
+		if (select_job_res_ptr->memory_allocated)
+			memory_allocated = select_job_res_ptr->
+					   memory_allocated[node_inx];
+
+		info("  Mem(MB):%u:%u  Sockets:%u  Cores:%u  CPUs:%u:%u", 
+		     memory_allocated, memory_used,
+		     select_job_res_ptr->sockets_per_node[sock_inx],
+		     select_job_res_ptr->cores_per_socket[sock_inx],
+		     select_job_res_ptr->cpus[node_inx],
+		     cpus_used);
+
+		bit_reps = select_job_res_ptr->sockets_per_node[sock_inx] *
+			   select_job_res_ptr->cores_per_socket[sock_inx];
+		for (i=0; i<bit_reps; i++) {
+			if (bit_inx >= array_size) {
+				error("log_select_job_res: array size wrong");
+				break;
+			}
+			if (bit_test(select_job_res_ptr->core_bitmap,
+				     bit_inx)) {
+				char *core_used = "";
+				if (bit_test(select_job_res_ptr->
+					     core_bitmap_used, bit_inx))
+					core_used = " and in use";
+				info("  Socket[%d] Core[%d] is allocated%s",
+				     (i / select_job_res_ptr->
+				          cores_per_socket[sock_inx]),
+				     (i % select_job_res_ptr->
+					  cores_per_socket[sock_inx]),
+				     core_used);
+			}
+			bit_inx++;
+		}
+	}
+	for (node_inx=0; node_inx<select_job_res_ptr->cpu_array_cnt; 
+	     node_inx++) {
+		if (node_inx == 0)
+			info("--------------------");
+		info("cpu_array_value[%d]:%u reps:%u", node_inx,
+		     select_job_res_ptr->cpu_array_value[node_inx],
+		     select_job_res_ptr->cpu_array_reps[node_inx]);
+	}
+	info("====================");
+}
+
+extern void pack_select_job_res(select_job_res_t select_job_res_ptr, 
+				Buf buffer)
+{
+	int i;
+	uint32_t core_cnt = 0, sock_recs = 0;
+
+	if (select_job_res_ptr == NULL) {
+		uint32_t empty = NO_VAL;
+		pack32(empty, buffer);
+		return;
+	}
+
+	xassert(select_job_res_ptr->core_bitmap);
+	xassert(select_job_res_ptr->core_bitmap_used);
+	xassert(select_job_res_ptr->cores_per_socket);
+	xassert(select_job_res_ptr->cpus);
+	xassert(select_job_res_ptr->nhosts);
+	xassert(select_job_res_ptr->sock_core_rep_count);
+	xassert(select_job_res_ptr->sockets_per_node);
+
+	pack32(select_job_res_ptr->nhosts, buffer);
+	pack32(select_job_res_ptr->nprocs, buffer);
+	pack8(select_job_res_ptr->node_req, buffer);
+
+	if (select_job_res_ptr->cpu_array_cnt &&
+	    select_job_res_ptr->cpu_array_reps &&
+	    select_job_res_ptr->cpu_array_value) {
+		pack32(select_job_res_ptr->cpu_array_cnt, buffer);
+		pack32_array(select_job_res_ptr->cpu_array_reps,
+			     select_job_res_ptr->cpu_array_cnt, buffer);
+		pack16_array(select_job_res_ptr->cpu_array_value,
+			     select_job_res_ptr->cpu_array_cnt, buffer);
+	} else {
+		pack32((uint32_t) 0, buffer);
+	}
+
+	pack16_array(select_job_res_ptr->cpus,
+		     select_job_res_ptr->nhosts, buffer);
+	if (select_job_res_ptr->cpus_used) {
+		pack16_array(select_job_res_ptr->cpus_used,
+			     select_job_res_ptr->nhosts, buffer);
+	} else
+		pack16_array(select_job_res_ptr->cpus_used, 0, buffer);
+
+	if (select_job_res_ptr->memory_allocated) {
+		pack32_array(select_job_res_ptr->memory_allocated,  
+			     select_job_res_ptr->nhosts, buffer);
+	} else
+		pack32_array(select_job_res_ptr->memory_allocated, 0, buffer);
+	if (select_job_res_ptr->memory_used) {
+		pack32_array(select_job_res_ptr->memory_used,  
+			     select_job_res_ptr->nhosts, buffer);
+	} else
+		pack32_array(select_job_res_ptr->memory_used, 0, buffer);
+
+	for (i=0; i<select_job_res_ptr->nhosts; i++) {
+		core_cnt += select_job_res_ptr->sockets_per_node[i] *
+			    select_job_res_ptr->cores_per_socket[i] *
+			    select_job_res_ptr->sock_core_rep_count[i];
+		sock_recs += select_job_res_ptr->sock_core_rep_count[i];
+		if (sock_recs >= select_job_res_ptr->nhosts)
+			break;
+	}
+	i++;
+	pack16_array(select_job_res_ptr->sockets_per_node,
+		     (uint32_t) i, buffer);
+	pack16_array(select_job_res_ptr->cores_per_socket,
+		     (uint32_t) i, buffer);
+	pack32_array(select_job_res_ptr->sock_core_rep_count, 
+		     (uint32_t) i, buffer);
+
+	pack32(core_cnt, buffer);
+	xassert(core_cnt == bit_size(select_job_res_ptr->core_bitmap));
+	pack_bit_fmt(select_job_res_ptr->core_bitmap, buffer);
+	xassert(core_cnt == bit_size(select_job_res_ptr->core_bitmap_used));
+	pack_bit_fmt(select_job_res_ptr->core_bitmap_used, buffer);
+	/* Do not pack the node_bitmap, but rebuild it in reset_node_bitmap()
+	 * based upon job_ptr->nodes and the current node table */
+}
+
+extern int unpack_select_job_res(select_job_res_t *select_job_res_pptr, 
+				 Buf buffer)
+{
+	char *bit_fmt = NULL;
+	uint32_t core_cnt, empty, tmp32;
+	select_job_res_t select_job_res;
+
+	xassert(select_job_res_pptr);
+	safe_unpack32(&empty, buffer);
+	if (empty == NO_VAL) {
+		*select_job_res_pptr = NULL;
+		return SLURM_SUCCESS;
+	}
+
+	select_job_res = xmalloc(sizeof(struct select_job_res));
+	select_job_res->nhosts = empty;
+	safe_unpack32(&select_job_res->nprocs, buffer);
+	safe_unpack8(&select_job_res->node_req, buffer);
+
+	safe_unpack32(&select_job_res->cpu_array_cnt, buffer);
+	if (select_job_res->cpu_array_cnt) {
+		safe_unpack32_array(&select_job_res->cpu_array_reps,
+				    &tmp32, buffer);
+		if (tmp32 != select_job_res->cpu_array_cnt)
+			goto unpack_error;
+		safe_unpack16_array(&select_job_res->cpu_array_value,
+				    &tmp32, buffer);
+		if (tmp32 != select_job_res->cpu_array_cnt)
+			goto unpack_error;
+	}
+
+	safe_unpack16_array(&select_job_res->cpus, &tmp32, buffer);
+	if (tmp32 != select_job_res->nhosts)
+		goto unpack_error;
+	safe_unpack16_array(&select_job_res->cpus_used, &tmp32, buffer);
+	if (tmp32 == 0)
+		xfree(select_job_res->cpus_used);
+
+	safe_unpack32_array(&select_job_res->memory_allocated,
+			    &tmp32, buffer);
+	if (tmp32 == 0)
+		xfree(select_job_res->memory_allocated);
+	else if (tmp32 != select_job_res->nhosts)
+		goto unpack_error;
+	safe_unpack32_array(&select_job_res->memory_used, &tmp32, buffer);
+	if (tmp32 == 0)
+		xfree(select_job_res->memory_used);
+
+	safe_unpack16_array(&select_job_res->sockets_per_node, &tmp32, buffer);
+	safe_unpack16_array(&select_job_res->cores_per_socket, &tmp32, buffer);
+	safe_unpack32_array(&select_job_res->sock_core_rep_count,
+			    &tmp32, buffer);
+
+	safe_unpack32(&core_cnt, buffer);    /* NOTE: Not part of struct */
+	safe_unpackstr_xmalloc(&bit_fmt, &tmp32, buffer);
+	select_job_res->core_bitmap = bit_alloc((bitoff_t) core_cnt);
+	if (bit_unfmt(select_job_res->core_bitmap, bit_fmt))
+		goto unpack_error;
+	xfree(bit_fmt);
+	safe_unpackstr_xmalloc(&bit_fmt, &tmp32, buffer);
+	select_job_res->core_bitmap_used = bit_alloc((bitoff_t) core_cnt);
+	if (bit_unfmt(select_job_res->core_bitmap_used, bit_fmt))
+		goto unpack_error;
+	xfree(bit_fmt);
+	/* node_bitmap is not packed, but rebuilt in reset_node_bitmap()
+	 * based upon job_ptr->nodes and the current node table */
+
+	*select_job_res_pptr = select_job_res;
+	return SLURM_SUCCESS;
+
+  unpack_error:
+	free_select_job_res(&select_job_res);
+	xfree(bit_fmt);
+	*select_job_res_pptr = NULL;
+	return SLURM_ERROR;
+}
+
+extern int get_select_job_res_offset(select_job_res_t select_job_res_ptr, 
+				     uint32_t node_id, uint16_t socket_id, 
+				     uint16_t core_id)
+{
+	int i, bit_inx = 0;
+
+	xassert(select_job_res_ptr);
+
+	for (i=0; i<select_job_res_ptr->nhosts; i++) {
+		if (select_job_res_ptr->sock_core_rep_count[i] <= node_id) {
+			bit_inx += select_job_res_ptr->sockets_per_node[i] *
+				   select_job_res_ptr->cores_per_socket[i] *
+				   select_job_res_ptr->sock_core_rep_count[i];
+			node_id -= select_job_res_ptr->sock_core_rep_count[i];
+		} else if (socket_id >= select_job_res_ptr->
+					sockets_per_node[i]) {
+			error("get_select_job_res_bit: socket_id >= socket_cnt "
+			      "(%u >= %u)", socket_id, 
+			      select_job_res_ptr->sockets_per_node[i]);
+			return -1;
+		} else if (core_id >= select_job_res_ptr->cores_per_socket[i]) {
+			error("get_select_job_res_bit: core_id >= core_cnt "
+			      "(%u >= %u)", core_id, 
+			      select_job_res_ptr->cores_per_socket[i]);
+			return -1;
+		} else {
+			bit_inx += select_job_res_ptr->sockets_per_node[i] *
+				   select_job_res_ptr->cores_per_socket[i] *
+				   node_id;
+			bit_inx += select_job_res_ptr->cores_per_socket[i] *
+				   socket_id;
+			bit_inx += core_id;
+			break;
+		}
+	}
+	i = bit_size(select_job_res_ptr->core_bitmap);
+	if (bit_inx >= i) {
+		error("get_select_job_res_bit: offset >= bitmap size "
+		      "(%d >= %d)", bit_inx, i);
+		return -1;
+	}
+
+	return bit_inx;
+}
+
+extern int get_select_job_res_bit(select_job_res_t select_job_res_ptr, 
+				  uint32_t node_id, uint16_t socket_id, 
+				  uint16_t core_id)
+{
+	int bit_inx = get_select_job_res_offset(select_job_res_ptr, node_id,
+						socket_id, core_id);
+	if (bit_inx < 0)
+		return SLURM_ERROR;
+
+	return bit_test(select_job_res_ptr->core_bitmap, bit_inx);
+}
+
+extern int set_select_job_res_bit(select_job_res_t select_job_res_ptr, 
+				  uint32_t node_id, uint16_t socket_id, 
+				  uint16_t core_id)
+{
+	int bit_inx = get_select_job_res_offset(select_job_res_ptr, node_id,
+						socket_id, core_id);
+	if (bit_inx < 0)
+		return SLURM_ERROR;
+
+	bit_set(select_job_res_ptr->core_bitmap, bit_inx);
+	return SLURM_SUCCESS;
+}
+
+extern int get_select_job_res_node(select_job_res_t select_job_res_ptr, 
+				   uint32_t node_id)
+{
+	int i, bit_inx = 0, core_cnt = 0;
+
+	xassert(select_job_res_ptr);
+
+	for (i=0; i<select_job_res_ptr->nhosts; i++) {
+		if (select_job_res_ptr->sock_core_rep_count[i] <= node_id) {
+			bit_inx += select_job_res_ptr->sockets_per_node[i] *
+				   select_job_res_ptr->cores_per_socket[i] *
+				   select_job_res_ptr->sock_core_rep_count[i];
+			node_id -= select_job_res_ptr->sock_core_rep_count[i];
+		} else {
+			bit_inx += select_job_res_ptr->sockets_per_node[i] *
+				   select_job_res_ptr->cores_per_socket[i] *
+				   node_id;
+			core_cnt = select_job_res_ptr->sockets_per_node[i] *
+				   select_job_res_ptr->cores_per_socket[i];
+			break;
+		}
+	}
+	if (core_cnt < 1) {
+		error("get_select_job_res_node: core_cnt=0");
+		return 0;
+	}
+	i = bit_size(select_job_res_ptr->core_bitmap);
+	if ((bit_inx + core_cnt) > i) {
+		error("get_select_job_res_node: offset > bitmap size "
+		      "(%d >= %d)", (bit_inx + core_cnt), i);
+		return 0;
+	}
+
+	for (i=0; i<core_cnt; i++) {
+		if (bit_test(select_job_res_ptr->core_bitmap, bit_inx++))
+			return 1;
+	}
+	return 0;
+}
+
+extern int set_select_job_res_node(select_job_res_t select_job_res_ptr, 
+				   uint32_t node_id)
+{
+	int i, bit_inx = 0, core_cnt = 0;
+
+	xassert(select_job_res_ptr);
+
+	for (i=0; i<select_job_res_ptr->nhosts; i++) {
+		if (select_job_res_ptr->sock_core_rep_count[i] <= node_id) {
+			bit_inx += select_job_res_ptr->sockets_per_node[i] *
+				   select_job_res_ptr->cores_per_socket[i] *
+				   select_job_res_ptr->sock_core_rep_count[i];
+			node_id -= select_job_res_ptr->sock_core_rep_count[i];
+		} else {
+			bit_inx += select_job_res_ptr->sockets_per_node[i] *
+				   select_job_res_ptr->cores_per_socket[i] *
+				   node_id;
+			core_cnt = select_job_res_ptr->sockets_per_node[i] *
+				   select_job_res_ptr->cores_per_socket[i];
+			break;
+		}
+	}
+	if (core_cnt < 1) {
+		error("set_select_job_res_node: core_cnt=0");
+		return SLURM_ERROR;
+	}
+
+	i = bit_size(select_job_res_ptr->core_bitmap);
+	if ((bit_inx + core_cnt) > i) {
+		error("set_select_job_res_node: offset > bitmap size "
+		      "(%d >= %d)", (bit_inx + core_cnt), i);
+		return SLURM_ERROR;
+	}
+
+	for (i=0; i<core_cnt; i++)
+		bit_set(select_job_res_ptr->core_bitmap, bit_inx++);
+
+	return SLURM_SUCCESS;
+}
+
+extern int get_select_job_res_cnt(select_job_res_t select_job_res_ptr, 
+				  uint32_t node_id,
+				  uint16_t *socket_cnt, 
+				  uint16_t *cores_per_socket_cnt)
+{
+	int i, node_inx = -1;
+
+	xassert(socket_cnt);
+	xassert(cores_per_socket_cnt);
+	xassert(select_job_res_ptr->cores_per_socket);
+	xassert(select_job_res_ptr->sock_core_rep_count);
+	xassert(select_job_res_ptr->sockets_per_node);
+
+	for (i=0; i<select_job_res_ptr->nhosts; i++) {
+		node_inx += select_job_res_ptr->sock_core_rep_count[i];
+		if (node_id <= node_inx) {
+			*cores_per_socket_cnt = select_job_res_ptr->
+						cores_per_socket[i];
+			*socket_cnt = select_job_res_ptr->sockets_per_node[i];
+			return SLURM_SUCCESS;
+		}	
+	}
+
+	error("get_select_job_res_cnt: invalid node_id: %u", node_id);
+	*cores_per_socket_cnt = 0;
+	*socket_cnt = 0;
+	return SLURM_ERROR;
+}
+
+/* Return 1 if the given job can fit into the given full-length core_bitmap,
+ * else return 0.
+ */
+extern int can_select_job_cores_fit(select_job_res_t select_ptr,
+				    bitstr_t *full_bitmap,
+				    const uint16_t *bits_per_node,
+				    const uint32_t *bit_rep_count)
+{
+	uint32_t i, n, count = 1, last_bit = 0;
+	uint32_t c = 0, j = 0, k = 0;
+	
+	if (!full_bitmap)
+		return 1;
+	
+	for (i = 0, n = 0; i < select_ptr->nhosts; n++) {
+		last_bit += bits_per_node[k];
+		if (++count > bit_rep_count[k]) {
+			k++;
+			count = 1;
+		}
+		if (bit_test(select_ptr->node_bitmap, n) == 0) {
+			c = last_bit;
+			continue;
+		}
+		for (; c < last_bit; c++, j++) {
+			if (bit_test(full_bitmap, c) &&
+			    bit_test(select_ptr->core_bitmap, j))
+				return 0;
+		}
+		i++;
+	}
+	return 1;
+}
+
+/* add the given job to the given full_core_bitmap */
+extern void add_select_job_to_row(select_job_res_t select_ptr,
+				  bitstr_t **full_core_bitmap,
+				  const uint16_t *cores_per_node,
+				  const uint32_t *core_rep_count)
+{
+	uint32_t i, n, count = 1, last_bit = 0;
+	uint32_t c = 0, j = 0, k = 0;
+	
+	if (!select_ptr->core_bitmap)
+		return;
+
+	/* add the job to the row_bitmap */
+	if (*full_core_bitmap == NULL) {
+		uint32_t size = 0;
+		for (i = 0; core_rep_count[i]; i++) {
+			size += cores_per_node[i] * core_rep_count[i];
+		}
+		*full_core_bitmap = bit_alloc(size);
+		if (!*full_core_bitmap)
+			fatal("add_select_job_to_row: bitmap memory error");
+	}
+
+	for (i = 0, n = 0; i < select_ptr->nhosts; n++) {
+		last_bit += cores_per_node[k];
+		if (++count > core_rep_count[k]) {
+			k++;
+			count = 1;
+		}
+		if (bit_test(select_ptr->node_bitmap, n) == 0) {
+			c = last_bit;
+			continue;
+		}
+		for (; c < last_bit; c++, j++) {
+			if (bit_test(select_ptr->core_bitmap, j))
+				bit_set(*full_core_bitmap, c);
+		}
+		i++;
+	}
+}
diff --git a/src/common/select_job_res.h b/src/common/select_job_res.h
new file mode 100644
index 0000000000000000000000000000000000000000..6112b696915ec90c8af3c08ead5685eff2195aa4
--- /dev/null
+++ b/src/common/select_job_res.h
@@ -0,0 +1,233 @@
+/*****************************************************************************\
+ *  select_job_res.h - functions to manage data structure identifying specific
+ *	CPUs allocated to a job, step or partition
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Written by Morris Jette <jette1@llnl.gov>.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *  
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+\*****************************************************************************/
+
+#ifndef _SELECT_JOB_RES_H
+#define _SELECT_JOB_RES_H
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#  if HAVE_INTTYPES_H
+#    include <inttypes.h>
+#  else
+#    if HAVE_STDINT_H
+#      include <stdint.h>
+#    endif
+#  endif			/* HAVE_INTTYPES_H */
+#endif
+
+#include "src/common/bitstring.h"
+#include "src/common/pack.h"
+#include "src/slurmctld/slurmctld.h"
+
+/* struct select_job_res defines exactly which resources are allocated
+ *	to a job, step, partition, etc.
+ *
+ * core_bitmap		- Bitmap of allocated cores for all nodes and sockets
+ * core_bitmap_used	- Bitmap of cores allocated to job steps
+ * cores_per_socket	- Count of cores per socket on this node, build by 
+ *			  build_select_job_res() and insures consistent 
+ *			  interpretation of core_bitmap
+ * cpus			- Count of desired/allocated CPUs per node for job/step
+ * cpus_used		- For a job, count of CPUs per node used by job steps
+ * cpu_array_cnt	- Count of elements in cpu_array_* below
+ * cpu_array_value	- Count of allocated CPUs per node for job
+ * cpu_array_reps	- Number of consecutive nodes on which cpu_array_value
+ *			  is duplicated. See NOTES below.
+ * memory_allocated	- MB per node reserved for the job or step
+ * memory_used		- MB per node of memory consumed by job steps
+ * nhosts		- Number of nodes in the allocation
+ * node_bitmap		- Bitmap of nodes allocated to the job. Unlike the
+ *			  node_bitmap in slurmctld's job record, the bits
+ *			  here do NOT get cleared as the job completes on a
+ *			  node
+ * node_req		- NODE_CR_RESERVED|NODE_CR_ONE_ROW|NODE_CR_AVAILABLE
+ * nprocs		- Number of processors in the allocation
+ * sock_core_rep_count	- How many consecutive nodes that sockets_per_node
+ *			  and cores_per_socket apply to, build by 
+ *			  build_select_job_res() and insures consistent 
+ *			  interpretation of core_bitmap
+ * sockets_per_node	- Count of sockets on this node, build by 
+ *			  build_select_job_res() and insures consistent 
+ *			  interpretation of core_bitmap
+ *
+ * NOTES:
+ * cpu_array_* contains the same information as "cpus", but in a more compact
+ * format. For example if cpus = {4, 4, 2, 2, 2, 2, 2, 2} then cpu_array_cnt=2
+ * cpu_array_value = {4, 2} and cpu_array_reps = {2, 6}. We do not need to 
+ * save/restore these values, but generate them by calling 
+ * build_select_job_res_cpu_array()
+ *
+ * Sample layout of core_bitmap:
+ *   |               Node_0              |               Node_1              |
+ *   |      Sock_0     |      Sock_1     |      Sock_0     |      Sock_1     |
+ *   | Core_0 | Core_1 | Core_0 | Core_1 | Core_0 | Core_1 | Core_0 | Core_1 |
+ *   | Bit_0  | Bit_1  | Bit_2  | Bit_3  | Bit_4  | Bit_5  | Bit_6  | Bit_7  |
+ */
+struct select_job_res {
+	bitstr_t *	core_bitmap;
+	bitstr_t *	core_bitmap_used;
+	uint32_t	cpu_array_cnt;
+	uint16_t *	cpu_array_value;
+	uint32_t *	cpu_array_reps;
+	uint16_t *	cpus;
+	uint16_t *	cpus_used;
+	uint16_t *	cores_per_socket;
+	uint32_t *	memory_allocated;
+	uint32_t *	memory_used;
+	uint32_t	nhosts;
+	bitstr_t *	node_bitmap;
+	uint8_t		node_req;
+	uint32_t	nprocs;
+	uint32_t *	sock_core_rep_count;
+	uint16_t *	sockets_per_node;
+};
+
+/* Create an empty select_job_res data structure, just a call to xmalloc() */
+extern select_job_res_t create_select_job_res(void);
+
+/* Set the socket and core counts associated with a set of selected
+ * nodes of a select_job_res data structure based upon slurmctld state.
+ * (sets cores_per_socket, sockets_per_node, and sock_core_rep_count based
+ * upon the value of node_bitmap, also creates core_bitmap based upon
+ * the total number of cores in the allocation). Call this ONLY from 
+ * slurmctld. Example of use:
+ *
+ * select_job_res_t select_job_res_ptr = create_select_job_res();
+ * node_name2bitmap("dummy[2,5,12,16]", true, &(select_res_ptr->node_bitmap));
+ * rc = build_select_job_res(select_job_res_ptr, node_record_table_ptr,
+ *			     slurmctld_conf.fast_schedule);
+ */
+extern int build_select_job_res(select_job_res_t select_job_res_ptr,
+				void *node_rec_table,
+				uint16_t fast_schedule);
+
+/* Rebuild cpu_array_cnt, cpu_array_value, and cpu_array_reps based upon the
+ * values of cpus in an existing data structure
+ * Return total CPU count or -1 on error */
+extern int build_select_job_res_cpu_array(select_job_res_t select_job_res_ptr);
+
+/* Rebuild cpus array based upon the values of nhosts, cpu_array_value and
+ * cpu_array_reps in an existing data structure
+ * Return total CPU count or -1 on error */
+extern int build_select_job_res_cpus_array(select_job_res_t select_job_res_ptr);
+
+/* Validate a select_job_res data structure originally built using
+ * build_select_job_res() is still valid based upon slurmctld state.
+ * NOTE: Reset the node_bitmap field before calling this function.
+ * If the sockets_per_node or cores_per_socket for any node in the allocation 
+ * changes, then return SLURM_ERROR. Otherwise return SLURM_SUCCESS. Any 
+ * change in a node's socket or core count require that any job running on
+ * that node be killed. Example of use:
+ *
+ * rc = valid_select_job_res(select_job_res_ptr, node_record_table_ptr,
+ *			     slurmctld_conf.fast_schedule);
+ */
+extern int valid_select_job_res(select_job_res_t select_job_res_ptr,
+				void *node_rec_table,
+				uint16_t fast_schedule);
+
+/* Make a copy of a select_job_res data structure, 
+ * free using free_select_job_res() */
+extern select_job_res_t copy_select_job_res(select_job_res_t 
+					    select_job_res_ptr);
+
+/* Free select_job_res data structure created using copy_select_job_res() or
+ *	unpack_select_job_res() */
+extern void free_select_job_res(select_job_res_t *select_job_res_pptr);
+
+/* Log the contents of a select_job_res data structure using info() */
+extern void log_select_job_res(uint32_t job_id, 
+			       select_job_res_t select_job_res_ptr);
+
+/* Un/pack full select_job_res data structure */
+extern void pack_select_job_res(select_job_res_t select_job_res_ptr, 
+				Buf buffer);
+extern int unpack_select_job_res(select_job_res_t *select_job_res_pptr, 
+				 Buf buffer);
+
+/* Reset the node_bitmap in a select_job_res data structure
+ * This is needed after a restart/reconfiguration since nodes can 
+ * be added or removed from the system resulting in changing in 
+ * the bitmap size or bit positions */
+extern void reset_node_bitmap(select_job_res_t select_job_res_ptr,
+			      bitstr_t *new_node_bitmap);
+
+/* For a given node_id, socket_id and core_id, get it's offset within
+ * the core bitmap */
+extern int get_select_job_res_offset(select_job_res_t select_job_res_ptr, 
+				     uint32_t node_id, uint16_t socket_id, 
+				     uint16_t core_id);
+
+/* Get/set bit value at specified location.
+ *	node_id, socket_id and core_id are all zero origin */
+extern int get_select_job_res_bit(select_job_res_t select_job_res_ptr, 
+				  uint32_t node_id,
+				  uint16_t socket_id, uint16_t core_id);
+extern int set_select_job_res_bit(select_job_res_t select_job_res_ptr, 
+				  uint32_t node_id,
+				  uint16_t socket_id, uint16_t core_id);
+
+/* Get/set bit value at specified location for whole node allocations
+ *	get is for any socket/core on the specified node
+ *	set is for all sockets/cores on the specified node
+ *	fully comptabable with set/get_select_job_res_bit()
+ *	node_id is all zero origin */
+extern int get_select_job_res_node(select_job_res_t select_job_res_ptr, 
+				   uint32_t node_id);
+extern int set_select_job_res_node(select_job_res_t select_job_res_ptr, 
+				   uint32_t node_id);
+
+/* Get socket and core count for a specific node_id (zero origin) */
+extern int get_select_job_res_cnt(select_job_res_t select_job_res_ptr, 
+				  uint32_t node_id,
+				  uint16_t *socket_cnt,
+ 				  uint16_t *cores_per_socket_cnt);
+
+/* check if given job can fit into the given full-length core_bitmap */
+extern int can_select_job_cores_fit(select_job_res_t select_ptr,
+				    bitstr_t *full_bitmap,
+				    const uint16_t *bits_per_node,
+				    const uint32_t *bit_rep_count);
+
+/* add the given job to the given full_core_bitmap */
+extern void add_select_job_to_row(select_job_res_t select_ptr,
+				  bitstr_t **full_core_bitmap,
+				  const uint16_t *cores_per_node,
+				  const uint32_t *core_rep_count);
+
+#endif /* !_SELECT_JOB_RES_H */
diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c
index 98a50322b57da6f536820370b1dd24697ecacc72..07f1fce00a55bc6656a6d7e6a11c8b40b77e3599 100644
--- a/src/common/slurm_accounting_storage.c
+++ b/src/common/slurm_accounting_storage.c
@@ -4,13 +4,14 @@
  *  $Id: slurm_accounting_storage.c 10744 2007-01-11 20:09:18Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Aubke <da@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -42,7 +43,12 @@
 #  include "config.h"
 #endif
 
+#ifndef   _GNU_SOURCE
+#  define _GNU_SOURCE
+#endif
+
 #include <pthread.h>
+#include <string.h>
 
 #include "src/common/list.h"
 #include "src/common/slurm_accounting_storage.h"
@@ -52,6 +58,7 @@
 #include "src/common/xstring.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/sacctmgr/sacctmgr.h"
+#include "src/common/slurm_strcasestr.h"
 
 /*
  * Local data
@@ -77,6 +84,8 @@ typedef struct slurm_acct_storage_ops {
 				    List qos_list);
 	int  (*add_wckeys)         (void *db_conn, uint32_t uid,
 				    List wckey_list);
+	int  (*add_reservation)    (void *db_conn,
+				    acct_reservation_rec_t *resv);
 	List (*modify_users)       (void *db_conn, uint32_t uid,
 				    acct_user_cond_t *user_cond,
 				    acct_user_rec_t *user);
@@ -95,6 +104,8 @@ typedef struct slurm_acct_storage_ops {
 	List (*modify_wckeys)      (void *db_conn, uint32_t uid,
 				    acct_wckey_cond_t *wckey_cond,
 				    acct_wckey_rec_t *wckey);
+	int  (*modify_reservation) (void *db_conn,
+				    acct_reservation_rec_t *resv);
 	List (*remove_users)       (void *db_conn, uint32_t uid,
 				    acct_user_cond_t *user_cond);
 	List (*remove_coord)       (void *db_conn, uint32_t uid,
@@ -110,18 +121,23 @@ typedef struct slurm_acct_storage_ops {
 				    acct_qos_cond_t *qos_cond);
 	List (*remove_wckeys)      (void *db_conn, uint32_t uid,
 				    acct_wckey_cond_t *wckey_cond);
+	int  (*remove_reservation) (void *db_conn,
+				    acct_reservation_rec_t *resv);
 	List (*get_users)          (void *db_conn, uint32_t uid,
 				    acct_user_cond_t *user_cond);
 	List (*get_accts)          (void *db_conn, uint32_t uid,
 				    acct_account_cond_t *acct_cond);
 	List (*get_clusters)       (void *db_conn, uint32_t uid,
 				    acct_cluster_cond_t *cluster_cond);
+	List (*get_config)         (void *db_conn);
 	List (*get_associations)   (void *db_conn, uint32_t uid,
 				    acct_association_cond_t *assoc_cond);
 	List (*get_qos)            (void *db_conn, uint32_t uid,
 				    acct_qos_cond_t *qos_cond);
 	List (*get_wckeys)         (void *db_conn, uint32_t uid,
 				    acct_wckey_cond_t *wckey_cond);
+	List (*get_resvs)          (void *db_conn, uint32_t uid,
+				    acct_reservation_cond_t *resv_cond);
 	List (*get_txn)            (void *db_conn, uint32_t uid,
 				    acct_txn_cond_t *txn_cond);
 	int  (*get_usage)          (void *db_conn, uint32_t uid,
@@ -129,7 +145,8 @@ typedef struct slurm_acct_storage_ops {
 				    time_t start, 
 				    time_t end);
 	int (*roll_usage)          (void *db_conn, 
-				    time_t sent_start);
+				    time_t sent_start, time_t sent_end,
+				    uint16_t archive_data);
 	int  (*node_down)          (void *db_conn,
 				    char *cluster,
 				    struct node_record *node_ptr,
@@ -140,7 +157,7 @@ typedef struct slurm_acct_storage_ops {
 				    struct node_record *node_ptr,
 				    time_t event_time);
 	int  (*cluster_procs)      (void *db_conn,
-				    char *cluster,
+				    char *cluster, char *cluster_nodes,
 				    uint32_t procs, time_t event_time);
 	int  (*c_get_usage)        (void *db_conn, uint32_t uid,
 				    void *cluster_rec, int type,
@@ -212,12 +229,14 @@ static slurm_acct_storage_ops_t * _acct_storage_get_ops(
 		"acct_storage_p_add_associations",
 		"acct_storage_p_add_qos",
 		"acct_storage_p_add_wckeys",
+		"acct_storage_p_add_reservation",
 		"acct_storage_p_modify_users",
 		"acct_storage_p_modify_accounts",
 		"acct_storage_p_modify_clusters",
 		"acct_storage_p_modify_associations",
 		"acct_storage_p_modify_qos",
 		"acct_storage_p_modify_wckeys",
+		"acct_storage_p_modify_reservation",
 		"acct_storage_p_remove_users",
 		"acct_storage_p_remove_coord",
 		"acct_storage_p_remove_accts",
@@ -225,12 +244,15 @@ static slurm_acct_storage_ops_t * _acct_storage_get_ops(
 		"acct_storage_p_remove_associations",
 		"acct_storage_p_remove_qos",
 		"acct_storage_p_remove_wckeys",
+		"acct_storage_p_remove_reservation",
 		"acct_storage_p_get_users",
 		"acct_storage_p_get_accts",
 		"acct_storage_p_get_clusters",
+		"acct_storage_p_get_config",
 		"acct_storage_p_get_associations",
 		"acct_storage_p_get_qos",
 		"acct_storage_p_get_wckeys",
+		"acct_storage_p_get_reservations",
 		"acct_storage_p_get_txn",
 		"acct_storage_p_get_usage",
 		"acct_storage_p_roll_usage",
@@ -495,8 +517,7 @@ extern void destroy_acct_cluster_rec(void *object)
 			list_destroy(acct_cluster->accounting_list);
 		xfree(acct_cluster->control_host);
 		xfree(acct_cluster->name);
-		if(acct_cluster->valid_qos_list)
-			list_destroy(acct_cluster->valid_qos_list);
+		xfree(acct_cluster->nodes);
 		destroy_acct_association_rec(acct_cluster->root_assoc);
 		xfree(acct_cluster);
 	}
@@ -521,6 +542,8 @@ extern void destroy_acct_association_rec(void *object)
 		if(acct_association->accounting_list)
 			list_destroy(acct_association->accounting_list);
 		xfree(acct_association->acct);
+		if(acct_association->childern_list)
+			list_destroy(acct_association->childern_list);
 		xfree(acct_association->cluster);
 		xfree(acct_association->parent_acct);
 		xfree(acct_association->partition);
@@ -550,6 +573,19 @@ extern void destroy_acct_qos_rec(void *object)
 	}
 }
 
+extern void destroy_acct_reservation_rec(void *object)
+{
+	acct_reservation_rec_t *acct_resv = (acct_reservation_rec_t *)object;
+	if(acct_resv) {
+		xfree(acct_resv->assocs);
+		xfree(acct_resv->cluster);
+		xfree(acct_resv->name);
+		xfree(acct_resv->nodes);
+		xfree(acct_resv->node_inx);
+		xfree(acct_resv);
+	}
+}
+
 extern void destroy_acct_txn_rec(void *object)
 {
 	acct_txn_rec_t *acct_txn = (acct_txn_rec_t *)object;
@@ -703,10 +739,15 @@ extern void destroy_acct_job_cond(void *object)
 			list_destroy(job_cond->groupid_list);
 		if(job_cond->partition_list)
 			list_destroy(job_cond->partition_list);
+		if(job_cond->resv_list)
+			list_destroy(job_cond->resv_list);
+		if(job_cond->resvid_list)
+			list_destroy(job_cond->resvid_list);
 		if(job_cond->step_list)
 			list_destroy(job_cond->step_list);
 		if(job_cond->state_list)
 			list_destroy(job_cond->state_list);
+		xfree(job_cond->used_nodes);
 		if(job_cond->userid_list)
 			list_destroy(job_cond->userid_list);
 		if(job_cond->wckey_list)
@@ -727,6 +768,21 @@ extern void destroy_acct_qos_cond(void *object)
 	}
 }
 
+extern void destroy_acct_reservation_cond(void *object)
+{
+	acct_reservation_cond_t *acct_resv = (acct_reservation_cond_t *)object;
+	if(acct_resv) {
+		if(acct_resv->cluster_list) 
+			list_destroy(acct_resv->cluster_list);
+		if(acct_resv->id_list)
+			list_destroy(acct_resv->id_list);
+		if(acct_resv->name_list)
+			list_destroy(acct_resv->name_list);
+		xfree(acct_resv->nodes);
+		xfree(acct_resv);
+	}
+}
+
 extern void destroy_acct_txn_cond(void *object)
 {
 	acct_txn_cond_t *acct_txn = (acct_txn_cond_t *)object;
@@ -842,8 +898,6 @@ extern void init_acct_association_rec(acct_association_rec_t *assoc)
 
 	memset(assoc, 0, sizeof(acct_association_rec_t));
 
-	assoc->fairshare = NO_VAL;
-
 	assoc->grp_cpu_mins = NO_VAL;
 	assoc->grp_cpus = NO_VAL;
 	assoc->grp_jobs = NO_VAL;
@@ -851,12 +905,21 @@ extern void init_acct_association_rec(acct_association_rec_t *assoc)
 	assoc->grp_submit_jobs = NO_VAL;
 	assoc->grp_wall = NO_VAL;
 
+	assoc->level_shares = NO_VAL;
+
 	assoc->max_cpu_mins_pj = NO_VAL;
 	assoc->max_cpus_pj = NO_VAL;
 	assoc->max_jobs = NO_VAL;
 	assoc->max_nodes_pj = NO_VAL;
 	assoc->max_submit_jobs = NO_VAL;
 	assoc->max_wall_pj = NO_VAL;
+
+	assoc->shares_norm = (double)NO_VAL;
+	assoc->shares_raw = NO_VAL;
+
+	assoc->usage_efctv = 0;
+	assoc->usage_norm = (long double)NO_VAL;
+	assoc->usage_raw = 0;
 }
 
 extern void init_acct_qos_rec(acct_qos_rec_t *qos)
@@ -881,6 +944,8 @@ extern void init_acct_qos_rec(acct_qos_rec_t *qos)
 	qos->max_nodes_pu = NO_VAL;
 	qos->max_submit_jobs_pu = NO_VAL;
 	qos->max_wall_pu = NO_VAL;
+
+	qos->usage_factor = NO_VAL;
 }
 
 /****************************************************************************\
@@ -1230,38 +1295,6 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-
-extern void pack_update_shares_used(void *in, uint16_t rpc_version, Buf buffer)
-{
-	shares_used_object_t *object = (shares_used_object_t *)in;
-
-	if(!object) {
-		pack32(0, buffer);
-		pack32(0, buffer);
-		return;
-	}
-
-	pack32(object->assoc_id, buffer);
-	pack32(object->shares_used, buffer);
-}
-
-extern int unpack_update_shares_used(void **object, uint16_t rpc_version,
-				     Buf buffer)
-{
-	shares_used_object_t *object_ptr =
-		xmalloc(sizeof(shares_used_object_t));
-
-	*object = (void *) object_ptr;
-	safe_unpack32(&object_ptr->assoc_id, buffer);
-	safe_unpack32(&object_ptr->shares_used, buffer);
-
-	return SLURM_SUCCESS;
-
-unpack_error:
-	destroy_update_shares_rec(object_ptr);
-	*object = NULL;
-	return SLURM_ERROR;
-}
 extern void pack_acct_account_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
 	acct_coord_rec_t *coord = NULL;
@@ -1485,24 +1518,47 @@ extern void pack_cluster_accounting_rec(void *in, uint16_t rpc_version,
 {
 	cluster_accounting_rec_t *object = (cluster_accounting_rec_t *)in;
 	
-	if(!object) {
-		pack64(0, buffer);
-		pack32(0, buffer);
-		pack64(0, buffer);
-		pack64(0, buffer);
-		pack64(0, buffer);
-		pack_time(0, buffer);
-		pack64(0, buffer);
-		return;
+	if(rpc_version >= 5) {
+		if(!object) {
+			pack64(0, buffer);
+			pack32(0, buffer);
+			pack64(0, buffer);
+			pack64(0, buffer);
+			pack64(0, buffer);
+			pack64(0, buffer);
+			pack_time(0, buffer);
+			pack64(0, buffer);
+			return;
+		}
+		
+		pack64(object->alloc_secs, buffer);
+		pack32(object->cpu_count, buffer);
+		pack64(object->down_secs, buffer);
+		pack64(object->idle_secs, buffer);
+		pack64(object->over_secs, buffer);
+		pack64(object->pdown_secs, buffer);
+		pack_time(object->period_start, buffer);
+		pack64(object->resv_secs, buffer);
+	} else {
+		if(!object) {
+			pack64(0, buffer);
+			pack32(0, buffer);
+			pack64(0, buffer);
+			pack64(0, buffer);
+			pack64(0, buffer);
+			pack_time(0, buffer);
+			pack64(0, buffer);
+			return;
+		}
+		
+		pack64(object->alloc_secs, buffer);
+		pack32(object->cpu_count, buffer);
+		pack64(object->down_secs, buffer);
+		pack64(object->idle_secs, buffer);
+		pack64(object->over_secs, buffer);
+		pack_time(object->period_start, buffer);
+		pack64(object->resv_secs, buffer);
 	}
-
- 	pack64(object->alloc_secs, buffer);
-	pack32(object->cpu_count, buffer);
-	pack64(object->down_secs, buffer);
-	pack64(object->idle_secs, buffer);
-	pack64(object->over_secs, buffer);
-	pack_time(object->period_start, buffer);
-	pack64(object->resv_secs, buffer);
 }
 
 extern int unpack_cluster_accounting_rec(void **object, uint16_t rpc_version,
@@ -1512,13 +1568,25 @@ extern int unpack_cluster_accounting_rec(void **object, uint16_t rpc_version,
 		xmalloc(sizeof(cluster_accounting_rec_t));
 	
 	*object = object_ptr;
-	safe_unpack64(&object_ptr->alloc_secs, buffer);
-	safe_unpack32(&object_ptr->cpu_count, buffer);
-	safe_unpack64(&object_ptr->down_secs, buffer);
-	safe_unpack64(&object_ptr->idle_secs, buffer);
-	safe_unpack64(&object_ptr->over_secs, buffer);
-	safe_unpack_time(&object_ptr->period_start, buffer);
-	safe_unpack64(&object_ptr->resv_secs, buffer);
+
+	if(rpc_version >= 5) {
+		safe_unpack64(&object_ptr->alloc_secs, buffer);
+		safe_unpack32(&object_ptr->cpu_count, buffer);
+		safe_unpack64(&object_ptr->down_secs, buffer);
+		safe_unpack64(&object_ptr->idle_secs, buffer);
+		safe_unpack64(&object_ptr->over_secs, buffer);
+		safe_unpack64(&object_ptr->pdown_secs, buffer);
+		safe_unpack_time(&object_ptr->period_start, buffer);
+		safe_unpack64(&object_ptr->resv_secs, buffer);
+	} else {
+		safe_unpack64(&object_ptr->alloc_secs, buffer);
+		safe_unpack32(&object_ptr->cpu_count, buffer);
+		safe_unpack64(&object_ptr->down_secs, buffer);
+		safe_unpack64(&object_ptr->idle_secs, buffer);
+		safe_unpack64(&object_ptr->over_secs, buffer);
+		safe_unpack_time(&object_ptr->period_start, buffer);
+		safe_unpack64(&object_ptr->resv_secs, buffer);
+	}
 	
 	return SLURM_SUCCESS;
 
@@ -1532,19 +1600,20 @@ extern void pack_acct_cluster_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
 	cluster_accounting_rec_t *acct_info = NULL;
 	ListIterator itr = NULL;
-	char *tmp_info = NULL;
 	uint32_t count = NO_VAL;
 	acct_cluster_rec_t *object = (acct_cluster_rec_t *)in;
 
-	if(rpc_version >= 3) {
+	if(rpc_version >= 5) {
 		if(!object) {
 			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
 			packnull(buffer);
 			pack32(0, buffer);
+			pack32(0, buffer);
 
+			packnull(buffer);
 			packnull(buffer);
 
-			pack32(NO_VAL, buffer);
 			pack_acct_association_rec(NULL, rpc_version, buffer);
 
 			pack16(0, buffer);
@@ -1566,25 +1635,55 @@ extern void pack_acct_cluster_rec(void *in, uint16_t rpc_version, Buf buffer)
 		}
 		count = NO_VAL;
 
+		pack16(object->classification, buffer);
 		packstr(object->control_host, buffer);
 		pack32(object->control_port, buffer);
+		pack32(object->cpu_count, buffer);
 
 		packstr(object->name, buffer);
+		packstr(object->nodes, buffer);
+
+		pack_acct_association_rec(object->root_assoc,
+					  rpc_version, buffer);
+
+		pack16(object->rpc_version, buffer);
+	} else if(rpc_version >= 3) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			pack32(0, buffer);
+
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+			pack_acct_association_rec(NULL, rpc_version, buffer);
 
-		if(object->valid_qos_list)
-			count = list_count(object->valid_qos_list);
+			pack16(0, buffer);
+			return;
+		}
+ 
+		if(object->accounting_list)
+			count = list_count(object->accounting_list);
 
 		pack32(count, buffer);
 
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->valid_qos_list);
-			while((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
+			itr = list_iterator_create(object->accounting_list);
+			while((acct_info = list_next(itr))) {
+				pack_cluster_accounting_rec(
+					acct_info, rpc_version, buffer);
 			}
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
+		packstr(object->control_host, buffer);
+		pack32(object->control_port, buffer);
+
+		packstr(object->name, buffer);
+
+		pack32(count, buffer); /* for defunt valid_qos_list */
+
 		pack_acct_association_rec(object->root_assoc,
 					  rpc_version, buffer);
 
@@ -1630,7 +1729,7 @@ extern void pack_acct_cluster_rec(void *in, uint16_t rpc_version, Buf buffer)
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 		} else {
-			pack32(object->root_assoc->fairshare, buffer);
+			pack32(object->root_assoc->shares_raw, buffer);
 			pack32(object->root_assoc->max_cpu_mins_pj, buffer);
 			pack32(object->root_assoc->max_jobs, buffer);
 			pack32(object->root_assoc->max_nodes_pj, buffer);
@@ -1646,14 +1745,13 @@ extern int unpack_acct_cluster_rec(void **object, uint16_t rpc_version,
 {
 	uint32_t uint32_tmp;
 	int i;
-	char *tmp_info = NULL;
 	uint32_t count;
 	acct_cluster_rec_t *object_ptr = xmalloc(sizeof(acct_cluster_rec_t));
 	cluster_accounting_rec_t *acct_info = NULL;
 
 	*object = object_ptr;
 
-	if(rpc_version >= 3) {
+	if(rpc_version >= 5) {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
 			object_ptr->accounting_list =
@@ -1667,24 +1765,44 @@ extern int unpack_acct_cluster_rec(void **object, uint16_t rpc_version,
 			}
 		}
 
+		safe_unpack16(&object_ptr->classification, buffer);
 		safe_unpackstr_xmalloc(&object_ptr->control_host,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->control_port, buffer);
+		safe_unpack32(&object_ptr->cpu_count, buffer);
 
 		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->nodes, &uint32_tmp, buffer);
+
+		if(unpack_acct_association_rec(
+			   (void **)&object_ptr->root_assoc, 
+			   rpc_version, buffer)
+		   == SLURM_ERROR)
+			goto unpack_error;
 
+		safe_unpack16(&object_ptr->rpc_version, buffer);
+	} else if(rpc_version >= 3) {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->valid_qos_list = 
-				list_create(slurm_destroy_char);
+			object_ptr->accounting_list =
+				list_create(destroy_cluster_accounting_rec);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
-				list_append(object_ptr->valid_qos_list,
-					    tmp_info);
+				unpack_cluster_accounting_rec(
+					(void *)&acct_info,
+					rpc_version, buffer);
+				list_append(object_ptr->accounting_list,
+					    acct_info);
 			}
 		}
 
+		safe_unpackstr_xmalloc(&object_ptr->control_host,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->control_port, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+
+		safe_unpack32(&count, buffer); /* for defunt valid_qos_list */
+
 		if(unpack_acct_association_rec(
 			   (void **)&object_ptr->root_assoc, 
 			   rpc_version, buffer)
@@ -1711,7 +1829,7 @@ extern int unpack_acct_cluster_rec(void **object, uint16_t rpc_version,
 		object_ptr->root_assoc = 
 			xmalloc(sizeof(acct_association_rec_t));
 		init_acct_association_rec(object_ptr->root_assoc);
-		safe_unpack32(&object_ptr->root_assoc->fairshare, buffer);
+		safe_unpack32(&object_ptr->root_assoc->shares_raw, buffer);
 		safe_unpack32((uint32_t *)&object_ptr->root_assoc->
 			      max_cpu_mins_pj, buffer);
 		safe_unpack32(&object_ptr->root_assoc->max_jobs, buffer);
@@ -1742,10 +1860,10 @@ extern void pack_acct_accounting_rec(void *in, uint16_t rpc_version, Buf buffer)
 		pack_time(0, buffer);
 		return;
 	}
-
+	
 	pack64(object->alloc_secs, buffer);
 	pack32(object->id, buffer);
-	pack_time(object->period_start, buffer);
+	pack_time(object->period_start, buffer);		
 }
 
 extern int unpack_acct_accounting_rec(void **object, uint16_t rpc_version,
@@ -1755,10 +1873,11 @@ extern int unpack_acct_accounting_rec(void **object, uint16_t rpc_version,
 		xmalloc(sizeof(acct_accounting_rec_t));
 	
 	*object = object_ptr;
+	
 	safe_unpack64(&object_ptr->alloc_secs, buffer);
 	safe_unpack32(&object_ptr->id, buffer);
-	safe_unpack_time(&object_ptr->period_start, buffer);
-
+	safe_unpack_time(&object_ptr->period_start, buffer);	
+		
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -1832,7 +1951,10 @@ extern void pack_acct_association_rec(void *in, uint16_t rpc_version,
 		packstr(object->acct, buffer);
 		packstr(object->cluster, buffer);
 
-		pack32(object->fairshare, buffer);
+		/* this used to be named fairshare to not have to redo
+		   the order of things just to be in alpha order we
+		   just renamed it and called it good */
+		pack32(object->shares_raw, buffer);
 
 		pack64(object->grp_cpu_mins, buffer);
 		pack32(object->grp_cpus, buffer);
@@ -1873,7 +1995,7 @@ extern void pack_acct_association_rec(void *in, uint16_t rpc_version,
 		pack32(object->uid, buffer);
 
 		packstr(object->user, buffer);	
-	} else if (rpc_version >= 3) {
+	} else if (rpc_version == 3) {
 		if(!object) {
 			pack32(NO_VAL, buffer);
 			packnull(buffer);
@@ -1931,7 +2053,7 @@ extern void pack_acct_association_rec(void *in, uint16_t rpc_version,
 		packstr(object->acct, buffer);
 		packstr(object->cluster, buffer);
 
-		pack32(object->fairshare, buffer);
+		pack32(object->shares_raw, buffer);
 
 		pack64(object->grp_cpu_mins, buffer);
 		pack32(object->grp_cpus, buffer);
@@ -1971,7 +2093,8 @@ extern void pack_acct_association_rec(void *in, uint16_t rpc_version,
 		pack32(object->rgt, buffer);
 		pack32(object->uid, buffer);
 
-		pack32(object->used_shares, buffer);
+		/* used shares which is taken out in 4 */
+		pack32(0, buffer);
 
 		packstr(object->user, buffer);	
 	} else {
@@ -2019,7 +2142,7 @@ extern void pack_acct_association_rec(void *in, uint16_t rpc_version,
 
 		packstr(object->acct, buffer);
 		packstr(object->cluster, buffer);
-		pack32(object->fairshare, buffer);
+		pack32(object->shares_raw, buffer);
 		pack32(object->id, buffer);
 		pack32(object->lft, buffer);
 		pack32(object->max_cpu_mins_pj, buffer);
@@ -2031,7 +2154,9 @@ extern void pack_acct_association_rec(void *in, uint16_t rpc_version,
 		packstr(object->partition, buffer);
 		pack32(object->rgt, buffer);
 		pack32(object->uid, buffer);
-		pack32(object->used_shares, buffer);
+		/* used shares which is taken out in 4 */
+		pack32(0, buffer);
+
 		packstr(object->user, buffer);	
 	} 
 }
@@ -2071,7 +2196,7 @@ extern int unpack_acct_association_rec(void **object, uint16_t rpc_version,
 		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
 				       buffer);
 
-		safe_unpack32(&object_ptr->fairshare, buffer);
+		safe_unpack32(&object_ptr->shares_raw, buffer);
 
 		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
 		safe_unpack32(&object_ptr->grp_cpus, buffer);
@@ -2132,7 +2257,7 @@ extern int unpack_acct_association_rec(void **object, uint16_t rpc_version,
 		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
 				       buffer);
 
-		safe_unpack32(&object_ptr->fairshare, buffer);
+		safe_unpack32(&object_ptr->shares_raw, buffer);
 
 		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
 		safe_unpack32(&object_ptr->grp_cpus, buffer);
@@ -2172,7 +2297,8 @@ extern int unpack_acct_association_rec(void **object, uint16_t rpc_version,
 		safe_unpack32(&object_ptr->rgt, buffer);
 		safe_unpack32(&object_ptr->uid, buffer);
 
-		safe_unpack32(&object_ptr->used_shares, buffer);
+		/* used shares which is taken out in 4 */
+		safe_unpack32(&uint32_tmp, buffer);
 
 		safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
 	} else {
@@ -2194,7 +2320,7 @@ extern int unpack_acct_association_rec(void **object, uint16_t rpc_version,
 		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
 				       buffer);
 
-		safe_unpack32(&object_ptr->fairshare, buffer);
+		safe_unpack32(&object_ptr->shares_raw, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
 		safe_unpack32(&object_ptr->lft, buffer);
 
@@ -2213,7 +2339,8 @@ extern int unpack_acct_association_rec(void **object, uint16_t rpc_version,
 		safe_unpack32(&object_ptr->rgt, buffer);
 		safe_unpack32(&object_ptr->uid, buffer);
 
-		safe_unpack32(&object_ptr->used_shares, buffer);
+		/* used shares which is taken out in 4 */
+		safe_unpack32(&uint32_tmp, buffer);
 
 		safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
 	} 
@@ -2233,7 +2360,105 @@ extern void pack_acct_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 	uint32_t count = NO_VAL;
 	char *tmp_info = NULL;
 
-	if(rpc_version >= 3) {
+	if(rpc_version >= 5) {
+		if(!object) {
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(0, buffer);
+
+			packdouble(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			return;
+		}
+		packstr(object->description, buffer);	
+		pack32(object->id, buffer);
+
+		pack64(object->grp_cpu_mins, buffer);
+		pack32(object->grp_cpus, buffer);
+		pack32(object->grp_jobs, buffer);
+		pack32(object->grp_nodes, buffer);
+		pack32(object->grp_submit_jobs, buffer);
+		pack32(object->grp_wall, buffer);
+
+		pack64(object->max_cpu_mins_pu, buffer);
+		pack32(object->max_cpus_pu, buffer);
+		pack32(object->max_jobs_pu, buffer);
+		pack32(object->max_nodes_pu, buffer);
+		pack32(object->max_submit_jobs_pu, buffer);
+		pack32(object->max_wall_pu, buffer);
+
+		packstr(object->name, buffer);	
+
+		if(object->preemptee_list)
+			count = list_count(object->preemptee_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->preemptee_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		if(object->preemptor_list)
+			count = list_count(object->preemptor_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->preemptor_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		pack32(object->priority, buffer);
+		
+		packdouble(object->usage_factor, buffer);
+
+		if(object->user_limit_list)
+			count = list_count(object->user_limit_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			acct_used_limits_t *used_limits = NULL;
+			itr = list_iterator_create(object->user_limit_list);
+			while((used_limits = list_next(itr))) {
+				pack_acct_used_limits(used_limits,
+						      rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+	} else if(rpc_version >= 3) {
 		if(!object) {
 			packnull(buffer);
 			pack32(0, buffer);
@@ -2352,7 +2577,7 @@ extern int unpack_acct_qos_rec(void **object, uint16_t rpc_version, Buf buffer)
 	
 	init_acct_qos_rec(object_ptr);
 
-	if(rpc_version >= 3) {
+	if(rpc_version >= 5) {
 		safe_unpackstr_xmalloc(&object_ptr->description,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
@@ -2399,6 +2624,8 @@ extern int unpack_acct_qos_rec(void **object, uint16_t rpc_version, Buf buffer)
 
 		safe_unpack32(&object_ptr->priority, buffer);
 
+		safe_unpackdouble(&object_ptr->usage_factor, buffer);
+
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
 			void *used_limits = NULL;
@@ -2413,18 +2640,147 @@ extern int unpack_acct_qos_rec(void **object, uint16_t rpc_version, Buf buffer)
 			}
 		}
 
-	} else {
+	} else if(rpc_version >= 3) {
 		safe_unpackstr_xmalloc(&object_ptr->description,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
-		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
-	}
 
-	return SLURM_SUCCESS;
+		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
+		safe_unpack32(&object_ptr->grp_cpus, buffer);
+		safe_unpack32(&object_ptr->grp_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_nodes, buffer);
+		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_wall, buffer);
 
-unpack_error:
-	destroy_acct_qos_rec(object_ptr);
-	*object = NULL;
+		safe_unpack64(&object_ptr->max_cpu_mins_pu, buffer);
+		safe_unpack32(&object_ptr->max_cpus_pu, buffer);
+		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_nodes_pu, buffer);
+		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_wall_pu, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->preemptee_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->preemptee_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->preemptor_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->preemptor_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&object_ptr->priority, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			void *used_limits = NULL;
+
+			object_ptr->user_limit_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				unpack_acct_used_limits(&used_limits,
+							rpc_version, buffer);
+				list_append(object_ptr->user_limit_list,
+					    used_limits);
+			}
+		}
+
+	} else {
+		safe_unpackstr_xmalloc(&object_ptr->description,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->id, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_acct_qos_rec(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+extern void pack_acct_reservation_rec(void *in, uint16_t rpc_version,
+				      Buf buffer)
+{
+	acct_reservation_rec_t *object = (acct_reservation_rec_t *)in;
+
+	if(!object) {
+		pack64(0, buffer);
+		packnull(buffer);
+		packnull(buffer);
+		pack32((uint32_t)NO_VAL, buffer);
+		pack64(0, buffer);
+		pack16((uint16_t)NO_VAL, buffer);
+		pack32(0, buffer);
+		packnull(buffer);
+		packnull(buffer);
+		packnull(buffer);
+		pack_time(0, buffer);
+		pack_time(0, buffer);
+		pack_time(0, buffer);
+		return;
+	}
+	
+	pack64(object->alloc_secs, buffer);
+	packstr(object->assocs, buffer);
+	packstr(object->cluster, buffer);
+	pack32(object->cpus, buffer);
+	pack64(object->down_secs, buffer);
+	pack16(object->flags, buffer);
+	pack32(object->id, buffer);
+	packstr(object->name, buffer);
+	packstr(object->nodes, buffer);
+	packstr(object->node_inx, buffer);
+	pack_time(object->time_end, buffer);
+	pack_time(object->time_start, buffer);	
+	pack_time(object->time_start_prev, buffer);	
+}
+
+extern int unpack_acct_reservation_rec(void **object, uint16_t rpc_version,
+				      Buf buffer)
+{
+	uint32_t uint32_tmp;
+	acct_reservation_rec_t *object_ptr = 
+		xmalloc(sizeof(acct_reservation_rec_t));
+
+	*object = object_ptr;
+
+	safe_unpack64(&object_ptr->alloc_secs, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->assocs, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp, buffer);
+	safe_unpack32(&object_ptr->cpus, buffer);
+	safe_unpack64(&object_ptr->down_secs, buffer);
+	safe_unpack16(&object_ptr->flags, buffer);
+	safe_unpack32(&object_ptr->id, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->nodes, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->node_inx, &uint32_tmp, buffer);
+	safe_unpack_time(&object_ptr->time_end, buffer);
+	safe_unpack_time(&object_ptr->time_start, buffer);	
+	safe_unpack_time(&object_ptr->time_start_prev, buffer);	
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_acct_reservation_rec(object_ptr);
+	*object = NULL;
 	return SLURM_ERROR;
 }
 
@@ -3070,34 +3426,68 @@ extern void pack_acct_cluster_cond(void *in, uint16_t rpc_version, Buf buffer)
 	acct_cluster_cond_t *object = (acct_cluster_cond_t *)in;
 	uint32_t count = NO_VAL;
 
-	if(!object) {
-		pack32(NO_VAL, buffer);
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack16(0, buffer);
-		pack16(0, buffer);
-		return;
-	}
- 
-	if(object->cluster_list)
-		count = list_count(object->cluster_list);
-	
-	pack32(count, buffer);
-			
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->cluster_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
+	if(rpc_version >= 5) {
+		if(!object) {
+			pack16(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack_time(0, buffer);
+			pack_time(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
 		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
 
-	pack32(object->usage_end, buffer);
-	pack32(object->usage_start, buffer);
-
-	pack16(object->with_usage, buffer);
-	pack16(object->with_deleted, buffer);
+		pack16(object->classification, buffer);
+		
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+		
+		pack32(count, buffer);
+		
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		pack_time(object->usage_end, buffer);
+		pack_time(object->usage_start, buffer);
+		
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+	} else {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
+		
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+		
+		pack32(count, buffer);
+		
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		pack32(object->usage_end, buffer);
+		pack32(object->usage_start, buffer);
+		
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+	} 
 }
 
 extern int unpack_acct_cluster_cond(void **object, uint16_t rpc_version, 
@@ -3110,20 +3500,43 @@ extern int unpack_acct_cluster_cond(void **object, uint16_t rpc_version,
 	char *tmp_info = NULL;
 
 	*object = object_ptr;
-	safe_unpack32(&count, buffer);
-	if(count && count != NO_VAL) {
-		object_ptr->cluster_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->cluster_list, tmp_info);
+
+	if(rpc_version >= 5) {
+		safe_unpack16(&object_ptr->classification, buffer);
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->cluster_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->cluster_list, tmp_info);
+			}
 		}
-	}
-	safe_unpack32(&object_ptr->usage_end, buffer);
-	safe_unpack32(&object_ptr->usage_start, buffer);
+		safe_unpack_time(&object_ptr->usage_end, buffer);
+		safe_unpack_time(&object_ptr->usage_start, buffer);
 
-	safe_unpack16(&object_ptr->with_usage, buffer);
-	safe_unpack16(&object_ptr->with_deleted, buffer);
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+	} else {
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->cluster_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->cluster_list, tmp_info);
+			}
+		}
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_end = uint32_tmp;
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_start = uint32_tmp;
 
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -3141,7 +3554,7 @@ extern void pack_acct_association_cond(void *in, uint16_t rpc_version,
 	ListIterator itr = NULL;
 	acct_association_cond_t *object = (acct_association_cond_t *)in;
 
-	if(rpc_version >= 3) {
+	if(rpc_version >= 5) {
 		if(!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -3169,8 +3582,8 @@ extern void pack_acct_association_cond(void *in, uint16_t rpc_version,
 			
 			pack32(NO_VAL, buffer);
 
-			pack32(0, buffer);
-			pack32(0, buffer);
+			pack_time(0, buffer);
+			pack_time(0, buffer);
 
 			pack32(NO_VAL, buffer);
 
@@ -3428,8 +3841,8 @@ extern void pack_acct_association_cond(void *in, uint16_t rpc_version,
 		}
 		count = NO_VAL;
 
-		pack32(object->usage_end, buffer);
-		pack32(object->usage_start, buffer);
+		pack_time(object->usage_end, buffer);
+		pack_time(object->usage_start, buffer);
 
 		if(object->user_list)
 			count = list_count(object->user_list);
@@ -3450,21 +3863,41 @@ extern void pack_acct_association_cond(void *in, uint16_t rpc_version,
 		pack16(object->with_sub_accts, buffer);
 		pack16(object->without_parent_info, buffer);
 		pack16(object->without_parent_limits, buffer);
-	} else {
+	} else if(rpc_version >= 3) {
 		if(!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
+
 			pack32(NO_VAL, buffer);
-			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			
+			pack32(NO_VAL, buffer);
+
 			pack32(0, buffer);
 			pack32(0, buffer);
+
 			pack32(NO_VAL, buffer);
+
+			pack16(0, buffer);
+			pack16(0, buffer);
 			pack16(0, buffer);
 			pack16(0, buffer);
 			pack16(0, buffer);
@@ -3498,58 +3931,25 @@ extern void pack_acct_association_cond(void *in, uint16_t rpc_version,
 		}
 		count = NO_VAL;
 
-		if(object->fairshare_list 
-		   && list_count(object->fairshare_list)) 
-			pack32(atoi(list_peek(object->fairshare_list)), 
-			       buffer);
-		else 
-			pack32(count, buffer);
-	
-		if(object->id_list)
-			count = list_count(object->id_list);
+		if(object->fairshare_list)
+			count = list_count(object->fairshare_list);
 	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->id_list);
+			itr = list_iterator_create(object->fairshare_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
-		
-		if(object->max_cpu_mins_pj_list
-		   && list_count(object->max_cpu_mins_pj_list)) 
-			pack32(atoi(list_peek(object->max_cpu_mins_pj_list)), 
-			       buffer);
-		else 
-			pack32(count, buffer);
-		
-		if(object->max_jobs_list && list_count(object->max_jobs_list)) 
-			pack32(atoi(list_peek(object->max_jobs_list)), 
-			       buffer);
-		else 
-			pack32(count, buffer);
-
-		if(object->max_nodes_pj_list
-		   && list_count(object->max_nodes_pj_list)) 
-			pack32(atoi(list_peek(object->max_nodes_pj_list)), 
-			       buffer);
-		else 
-			pack32(count, buffer);
-
-		if(object->max_wall_pj_list 
-		   && list_count(object->max_wall_pj_list)) 
-			pack32(atoi(list_peek(object->max_wall_pj_list)), 
-			       buffer);
-		else 
-			pack32(count, buffer);
 
-		if(object->partition_list)
-			count = list_count(object->partition_list);
+		if(object->grp_cpu_mins_list)
+			count = list_count(object->grp_cpu_mins_list);
 	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->partition_list);
+			itr = list_iterator_create(object->grp_cpu_mins_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
@@ -3557,22 +3957,12 @@ extern void pack_acct_association_cond(void *in, uint16_t rpc_version,
 		}
 		count = NO_VAL;
 
-		if(object->parent_acct_list 
-		   && list_count(object->parent_acct_list)) 
-			packstr(list_peek(object->parent_acct_list), 
-			       buffer);
-		else 
-			packnull(buffer);
-
-		pack32(object->usage_end, buffer);
-		pack32(object->usage_start, buffer);
-
-		if(object->user_list)
-			count = list_count(object->user_list);
+		if(object->grp_cpus_list)
+			count = list_count(object->grp_cpus_list);
 	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->user_list);
+			itr = list_iterator_create(object->grp_cpus_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
@@ -3580,407 +3970,1997 @@ extern void pack_acct_association_cond(void *in, uint16_t rpc_version,
 		}
 		count = NO_VAL;
 
-		pack16(object->with_usage, buffer);
-		pack16(object->with_deleted, buffer);
-		pack16(object->without_parent_info, buffer);
-		pack16(object->without_parent_limits, buffer);
-	} 
-}
-
-extern int unpack_acct_association_cond(void **object, 
-					uint16_t rpc_version, Buf buffer)
-{
-	uint32_t uint32_tmp;
-	int i;
-	uint32_t count;
-	acct_association_cond_t *object_ptr =
-		xmalloc(sizeof(acct_association_cond_t));
-	char *tmp_info = NULL;
-	*object = object_ptr;
-
-	if(rpc_version >= 3) {
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->acct_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->acct_list, tmp_info);
+		if(object->grp_jobs_list)
+			count = list_count(object->grp_jobs_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr);
 		}
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->cluster_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->cluster_list, 
-					    tmp_info);
+		count = NO_VAL;
+
+		if(object->grp_nodes_list)
+			count = list_count(object->grp_nodes_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_nodes_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr);
 		}
+		count = NO_VAL;
 
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->fairshare_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->fairshare_list, 
-					    tmp_info);
+		if(object->grp_submit_jobs_list)
+			count = list_count(object->grp_submit_jobs_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->grp_submit_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr);
 		}
+		count = NO_VAL;
 
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->grp_cpu_mins_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->grp_cpu_mins_list, 
-					    tmp_info);
+		if(object->grp_wall_list)
+			count = list_count(object->grp_wall_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->grp_wall_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr);
 		}
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->grp_cpus_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->grp_cpus_list, 
-					    tmp_info);
+		count = NO_VAL;
+
+		if(object->id_list)
+			count = list_count(object->id_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
 		}
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->grp_jobs_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->grp_jobs_list, 
-					    tmp_info);
+		count = NO_VAL;
+
+		if(object->max_cpu_mins_pj_list)
+			count = list_count(object->max_cpu_mins_pj_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->max_cpu_mins_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr);
 		}
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->grp_nodes_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->grp_nodes_list,
-					    tmp_info);
+		count = NO_VAL;
+		if(object->max_cpus_pj_list)
+			count = list_count(object->max_cpus_pj_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_cpus_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr);
 		}
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->grp_submit_jobs_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->grp_submit_jobs_list, 
-					    tmp_info);
+		count = NO_VAL;
+		if(object->max_jobs_list)
+			count = list_count(object->max_jobs_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr);
 		}
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->grp_wall_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->grp_wall_list, 
-					    tmp_info);
+		count = NO_VAL;
+		if(object->max_nodes_pj_list)
+			count = list_count(object->max_nodes_pj_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_nodes_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr);
 		}
-
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->id_list = list_create(slurm_destroy_char);
+		count = NO_VAL;
+		if(object->max_submit_jobs_list)
+			count = list_count(object->max_submit_jobs_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(
+				object->max_submit_jobs_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		if(object->max_wall_pj_list)
+			count = list_count(object->max_wall_pj_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->max_wall_pj_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+	
+		if(object->partition_list)
+			count = list_count(object->partition_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->partition_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->parent_acct_list)
+			count = list_count(object->parent_acct_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->parent_acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->qos_list)
+			count = list_count(object->qos_list);
+
+		pack32(count, buffer);
+
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->qos_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32(object->usage_end, buffer);
+		pack32(object->usage_start, buffer);
+
+		if(object->user_list)
+			count = list_count(object->user_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->user_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+		pack16(object->with_raw_qos, buffer);
+		pack16(object->with_sub_accts, buffer);
+		pack16(object->without_parent_info, buffer);
+		pack16(object->without_parent_limits, buffer);
+	} else {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
+
+		if(object->acct_list)
+			count = list_count(object->acct_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->fairshare_list 
+		   && list_count(object->fairshare_list)) 
+			pack32(atoi(list_peek(object->fairshare_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
+	
+		if(object->id_list)
+			count = list_count(object->id_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
+		
+		if(object->max_cpu_mins_pj_list
+		   && list_count(object->max_cpu_mins_pj_list)) 
+			pack32(atoi(list_peek(object->max_cpu_mins_pj_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
+		
+		if(object->max_jobs_list && list_count(object->max_jobs_list)) 
+			pack32(atoi(list_peek(object->max_jobs_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
+
+		if(object->max_nodes_pj_list
+		   && list_count(object->max_nodes_pj_list)) 
+			pack32(atoi(list_peek(object->max_nodes_pj_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
+
+		if(object->max_wall_pj_list 
+		   && list_count(object->max_wall_pj_list)) 
+			pack32(atoi(list_peek(object->max_wall_pj_list)), 
+			       buffer);
+		else 
+			pack32(count, buffer);
+
+		if(object->partition_list)
+			count = list_count(object->partition_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->partition_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->parent_acct_list 
+		   && list_count(object->parent_acct_list)) 
+			packstr(list_peek(object->parent_acct_list), 
+			       buffer);
+		else 
+			packnull(buffer);
+
+		pack32(object->usage_end, buffer);
+		pack32(object->usage_start, buffer);
+
+		if(object->user_list)
+			count = list_count(object->user_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->user_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+		pack16(object->without_parent_info, buffer);
+		pack16(object->without_parent_limits, buffer);
+	} 
+}
+
+extern int unpack_acct_association_cond(void **object, 
+					uint16_t rpc_version, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	int i;
+	uint32_t count;
+	acct_association_cond_t *object_ptr =
+		xmalloc(sizeof(acct_association_cond_t));
+	char *tmp_info = NULL;
+	*object = object_ptr;
+
+	if(rpc_version >= 5) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->acct_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->acct_list, tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->cluster_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->cluster_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->fairshare_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->fairshare_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_cpu_mins_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_cpu_mins_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_cpus_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_cpus_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_jobs_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_nodes_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_nodes_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_submit_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_submit_jobs_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_wall_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_wall_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
+						       buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
+		}
+	
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_cpu_mins_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_cpu_mins_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_cpus_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_cpus_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_jobs_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_nodes_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_nodes_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_submit_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_submit_jobs_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_wall_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_wall_pj_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->partition_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->partition_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->parent_acct_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->parent_acct_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->qos_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->qos_list, tmp_info);
+			}
+		}
+
+		safe_unpack_time(&object_ptr->usage_end, buffer);
+		safe_unpack_time(&object_ptr->usage_start, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->user_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->user_list, tmp_info);
+			}
+		}
+
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+		safe_unpack16(&object_ptr->with_raw_qos, buffer);
+		safe_unpack16(&object_ptr->with_sub_accts, buffer);
+		safe_unpack16(&object_ptr->without_parent_info, buffer);
+		safe_unpack16(&object_ptr->without_parent_limits, buffer);
+	} else if(rpc_version >= 3) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->acct_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->acct_list, tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->cluster_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->cluster_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->fairshare_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->fairshare_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_cpu_mins_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_cpu_mins_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_cpus_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_cpus_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_jobs_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_nodes_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_nodes_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_submit_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_submit_jobs_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->grp_wall_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->grp_wall_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
+						       buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
+		}
+	
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_cpu_mins_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_cpu_mins_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_cpus_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_cpus_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_jobs_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_nodes_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_nodes_pj_list,
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_submit_jobs_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_submit_jobs_list, 
+					    tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->max_wall_pj_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->max_wall_pj_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->partition_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->partition_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->parent_acct_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->parent_acct_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->qos_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->qos_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_end = uint32_tmp;
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_start = uint32_tmp;
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->user_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->user_list, tmp_info);
+			}
+		}
+
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+		safe_unpack16(&object_ptr->with_raw_qos, buffer);
+		safe_unpack16(&object_ptr->with_sub_accts, buffer);
+		safe_unpack16(&object_ptr->without_parent_info, buffer);
+		safe_unpack16(&object_ptr->without_parent_limits, buffer);
+	} else {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->acct_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->acct_list, tmp_info);
+			}
+		}
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->cluster_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->cluster_list,
+					    tmp_info);
+			}
+		}
+		/* We have to check for 0 here because of a bug in
+		   version 2 that sent 0's when it should had sent
+		   NO_VAL
+		*/
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->fairshare_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->fairshare_list,
+				    xstrdup_printf("%u", count));
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
+						       buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
+		}
+	
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->max_cpu_mins_pj_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->max_cpu_mins_pj_list,
+				    xstrdup_printf("%u", count));
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->max_jobs_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->max_jobs_list,
+				    xstrdup_printf("%u", count));
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->max_nodes_pj_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->max_nodes_pj_list,
+				    xstrdup_printf("%u", count));
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count && count != NO_VAL) {
+			object_ptr->max_wall_pj_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->max_wall_pj_list,
+				    xstrdup_printf("%u", count));
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->partition_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->partition_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+		if(tmp_info) {
+			object_ptr->parent_acct_list = 
+				list_create(slurm_destroy_char);
+			list_append(object_ptr->parent_acct_list, tmp_info);
+		}
+
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_end = uint32_tmp;
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_start = uint32_tmp;
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->user_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->user_list, tmp_info);
+			}
+		}
+
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+		safe_unpack16(&object_ptr->without_parent_info, buffer);
+		safe_unpack16(&object_ptr->without_parent_limits, buffer);
+	} 
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_acct_association_cond(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+extern void pack_acct_job_cond(void *in, uint16_t rpc_version, Buf buffer)
+{
+	char *tmp_info = NULL;
+	jobacct_selected_step_t *job = NULL;
+	uint32_t count = NO_VAL;
+
+	ListIterator itr = NULL;
+	acct_job_cond_t *object = (acct_job_cond_t *)in;
+
+	if(rpc_version >= 5) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack_time(0, buffer);
+			pack_time(0, buffer);
+			packnull(buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
+
+		if(object->acct_list)
+			count = list_count(object->acct_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->associd_list)
+			count = list_count(object->associd_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->associd_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
+
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->duplicates, buffer);
+
+		if(object->groupid_list)
+			count = list_count(object->groupid_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->groupid_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
+	
+		if(object->partition_list)
+			count = list_count(object->partition_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->partition_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->resv_list)
+			count = list_count(object->resv_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->resv_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->resvid_list)
+			count = list_count(object->resvid_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->resvid_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->step_list)
+			count = list_count(object->step_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->step_list);
+			while((job = list_next(itr))) {
+				pack_jobacct_selected_step(job, rpc_version, 
+							   buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->state_list)
+			count = list_count(object->state_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->state_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack_time(object->usage_end, buffer);
+		pack_time(object->usage_start, buffer);
+
+		packstr(object->used_nodes, buffer);
+
+		if(object->userid_list)
+			count = list_count(object->userid_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->userid_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->wckey_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->without_steps, buffer);
+		pack16(object->without_usage_truncation, buffer);
+	} else if(rpc_version >= 4) {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			return;
+		}
+
+		if(object->acct_list)
+			count = list_count(object->acct_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->associd_list)
+			count = list_count(object->associd_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->associd_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
+
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->duplicates, buffer);
+
+		if(object->groupid_list)
+			count = list_count(object->groupid_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->groupid_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
+	
+		if(object->partition_list)
+			count = list_count(object->partition_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->partition_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->step_list)
+			count = list_count(object->step_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->step_list);
+			while((job = list_next(itr))) {
+				pack_jobacct_selected_step(job, rpc_version, 
+							   buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->state_list)
+			count = list_count(object->state_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->state_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32(object->usage_end, buffer);
+		pack32(object->usage_start, buffer);
+
+		if(object->userid_list)
+			count = list_count(object->userid_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->userid_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->wckey_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->without_steps, buffer);
+	} else {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			return;
+		}
+
+		if(object->acct_list)
+			count = list_count(object->acct_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->acct_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->associd_list)
+			count = list_count(object->associd_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->associd_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
+
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->cluster_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->duplicates, buffer);
+
+		if(object->groupid_list)
+			count = list_count(object->groupid_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->groupid_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
+	
+		if(object->partition_list)
+			count = list_count(object->partition_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->partition_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->step_list)
+			count = list_count(object->step_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->step_list);
+			while((job = list_next(itr))) {
+				pack_jobacct_selected_step(job, rpc_version,
+							   buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->state_list)
+			count = list_count(object->state_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->state_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack32(object->usage_end, buffer);
+		pack32(object->usage_start, buffer);
+
+		if(object->userid_list)
+			count = list_count(object->userid_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->userid_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		pack16(object->without_steps, buffer);
+	}
+}
+
+extern int unpack_acct_job_cond(void **object, uint16_t rpc_version, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	int i;
+	uint32_t count;
+	acct_job_cond_t *object_ptr = xmalloc(sizeof(acct_job_cond_t));
+	char *tmp_info = NULL;
+	jobacct_selected_step_t *job = NULL;
+
+	*object = object_ptr;
+
+	if(rpc_version >= 5) {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->acct_list = list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->id_list, tmp_info);
+				list_append(object_ptr->acct_list, tmp_info);
 			}
 		}
-	
+
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->max_cpu_mins_pj_list = 
+			object_ptr->associd_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->max_cpu_mins_pj_list,
-					    tmp_info);
+				list_append(object_ptr->associd_list, tmp_info);
 			}
 		}
+
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->max_cpus_pj_list = 
+			object_ptr->cluster_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->max_cpus_pj_list,
-					    tmp_info);
+				list_append(object_ptr->cluster_list, tmp_info);
 			}
 		}
+
+		safe_unpack16(&object_ptr->duplicates, buffer);
+
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->max_jobs_list = 
+			object_ptr->groupid_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
 						       buffer);
-				list_append(object_ptr->max_jobs_list,
+				list_append(object_ptr->groupid_list, tmp_info);
+			}
+		}
+	
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->partition_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->partition_list, 
 					    tmp_info);
 			}
 		}
+
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->max_nodes_pj_list = 
+			object_ptr->resv_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->max_nodes_pj_list,
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->resv_list, 
 					    tmp_info);
 			}
 		}
+
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->max_submit_jobs_list = 
+			object_ptr->resvid_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->max_submit_jobs_list, 
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->resvid_list, 
 					    tmp_info);
 			}
 		}
+
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->max_wall_pj_list = 
+			object_ptr->step_list =
+				list_create(destroy_jobacct_selected_step);
+			for(i=0; i<count; i++) {
+				unpack_jobacct_selected_step(&job, rpc_version,
+							     buffer);
+				list_append(object_ptr->step_list, job);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->state_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->state_list, tmp_info);
+			}
+		}
+	
+		safe_unpack_time(&object_ptr->usage_end, buffer);
+		safe_unpack_time(&object_ptr->usage_start, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->used_nodes,
+				       &uint32_tmp, buffer);
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->userid_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->max_wall_pj_list,
-					    tmp_info);
+				list_append(object_ptr->userid_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->partition_list = 
+			object_ptr->wckey_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->partition_list,
-					    tmp_info);
+				list_append(object_ptr->wckey_list, tmp_info);
 			}
 		}
 
+		safe_unpack16(&object_ptr->without_steps, buffer);
+		safe_unpack16(&object_ptr->without_usage_truncation, buffer);
+	} else if(rpc_version >= 4) {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->parent_acct_list = 
+			object_ptr->acct_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->acct_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->associd_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->parent_acct_list,
-					    tmp_info);
+				list_append(object_ptr->associd_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->qos_list = list_create(slurm_destroy_char);
+			object_ptr->cluster_list =
+				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->qos_list, tmp_info);
+				list_append(object_ptr->cluster_list, tmp_info);
 			}
 		}
 
-		safe_unpack32(&object_ptr->usage_end, buffer);
-		safe_unpack32(&object_ptr->usage_start, buffer);
+		safe_unpack16(&object_ptr->duplicates, buffer);
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->user_list = 
+			object_ptr->groupid_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
+						       buffer);
+				list_append(object_ptr->groupid_list, tmp_info);
+			}
+		}
+	
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->partition_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->partition_list, 
+					    tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->step_list =
+				list_create(destroy_jobacct_selected_step);
+			for(i=0; i<count; i++) {
+				unpack_jobacct_selected_step(&job, rpc_version,
+							     buffer);
+				list_append(object_ptr->step_list, job);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->state_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->state_list, tmp_info);
+			}
+		}
+	
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_end = uint32_tmp;
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_start = uint32_tmp;
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->userid_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->user_list, tmp_info);
+				list_append(object_ptr->userid_list, tmp_info);
 			}
 		}
 
-		safe_unpack16(&object_ptr->with_usage, buffer);
-		safe_unpack16(&object_ptr->with_deleted, buffer);
-		safe_unpack16(&object_ptr->with_raw_qos, buffer);
-		safe_unpack16(&object_ptr->with_sub_accts, buffer);
-		safe_unpack16(&object_ptr->without_parent_info, buffer);
-		safe_unpack16(&object_ptr->without_parent_limits, buffer);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->wckey_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->wckey_list, tmp_info);
+			}
+		}
+
+		safe_unpack16(&object_ptr->without_steps, buffer);
 	} else {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->acct_list = 
-				list_create(slurm_destroy_char);
+			object_ptr->acct_list = list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
 				list_append(object_ptr->acct_list, tmp_info);
 			}
 		}
+
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->cluster_list = 
+			object_ptr->associd_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->cluster_list,
-					    tmp_info);
+				list_append(object_ptr->associd_list, tmp_info);
 			}
 		}
-		/* We have to check for 0 here because of a bug in
-		   version 2 that sent 0's when it should had sent
-		   NO_VAL
-		*/
+
 		safe_unpack32(&count, buffer);
-		if(count && count != NO_VAL) {
-			object_ptr->fairshare_list = 
+		if(count != NO_VAL) {
+			object_ptr->cluster_list =
 				list_create(slurm_destroy_char);
-			list_append(object_ptr->fairshare_list,
-				    xstrdup_printf("%u", count));
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->cluster_list, tmp_info);
+			}
 		}
 
+		safe_unpack16(&object_ptr->duplicates, buffer);
+
 		safe_unpack32(&count, buffer);
-		if(count && count != NO_VAL) {
-			object_ptr->id_list = list_create(slurm_destroy_char);
+		if(count != NO_VAL) {
+			object_ptr->groupid_list = 
+				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
 						       buffer);
-				list_append(object_ptr->id_list, tmp_info);
+				list_append(object_ptr->groupid_list, tmp_info);
 			}
 		}
 	
 		safe_unpack32(&count, buffer);
-		if(count && count != NO_VAL) {
-			object_ptr->max_cpu_mins_pj_list = 
-				list_create(slurm_destroy_char);
-			list_append(object_ptr->max_cpu_mins_pj_list,
-				    xstrdup_printf("%u", count));
-		}
-
-		safe_unpack32(&count, buffer);
-		if(count && count != NO_VAL) {
-			object_ptr->max_jobs_list = 
+		if(count != NO_VAL) {
+			object_ptr->partition_list =
 				list_create(slurm_destroy_char);
-			list_append(object_ptr->max_jobs_list,
-				    xstrdup_printf("%u", count));
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->partition_list, 
+					    tmp_info);
+			}
 		}
 
 		safe_unpack32(&count, buffer);
-		if(count && count != NO_VAL) {
-			object_ptr->max_nodes_pj_list = 
-				list_create(slurm_destroy_char);
-			list_append(object_ptr->max_nodes_pj_list,
-				    xstrdup_printf("%u", count));
+		if(count != NO_VAL) {
+			object_ptr->step_list =
+				list_create(destroy_jobacct_selected_step);
+			for(i=0; i<count; i++) {
+				unpack_jobacct_selected_step(&job, rpc_version,
+							     buffer);
+				list_append(object_ptr->step_list, job);
+			}
 		}
 
 		safe_unpack32(&count, buffer);
-		if(count && count != NO_VAL) {
-			object_ptr->max_wall_pj_list = 
+		if(count != NO_VAL) {
+			object_ptr->state_list =
 				list_create(slurm_destroy_char);
-			list_append(object_ptr->max_wall_pj_list,
-				    xstrdup_printf("%u", count));
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->state_list, tmp_info);
+			}
 		}
+	
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_end = uint32_tmp;
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_start = uint32_tmp;
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->partition_list = 
+			object_ptr->userid_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->partition_list,
-					    tmp_info);
+				list_append(object_ptr->userid_list, tmp_info);
 			}
 		}
 
-		safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-		if(tmp_info) {
-			object_ptr->parent_acct_list = 
-				list_create(slurm_destroy_char);
-			list_append(object_ptr->parent_acct_list, tmp_info);
+		safe_unpack16(&object_ptr->without_steps, buffer);
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_acct_job_cond(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+extern void pack_acct_qos_cond(void *in, uint16_t rpc_version, Buf buffer)
+{
+	uint32_t count = NO_VAL;
+	char *tmp_info = NULL;
+	ListIterator itr = NULL;
+	acct_qos_cond_t *object = (acct_qos_cond_t *)in;
+
+	if(!object) {
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack16(0, buffer);
+		return;
+	}
+
+	if(object->description_list)
+		count = list_count(object->description_list);
+	
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(object->description_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = NO_VAL;
+
+	if(object->id_list)
+		count = list_count(object->id_list);
+	
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(object->id_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = NO_VAL;
+
+	if(object->name_list) 
+		count = list_count(object->name_list);
+
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(object->name_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr); 
+	}
+	count = NO_VAL;
+
+	pack16(object->with_deleted, buffer);
+}
+
+extern int unpack_acct_qos_cond(void **object, uint16_t rpc_version, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	int i;
+	uint32_t count;
+	acct_qos_cond_t *object_ptr = xmalloc(sizeof(acct_qos_cond_t));
+	char *tmp_info = NULL;
+
+	*object = object_ptr;
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->description_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->description_list, tmp_info);
+		}
+	}
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->id_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->id_list, tmp_info);
+		}
+	}
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->name_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->name_list, tmp_info);
+		}
+	}
+
+	safe_unpack16(&object_ptr->with_deleted, buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_acct_qos_cond(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+extern void pack_acct_reservation_cond(void *in, uint16_t rpc_version,
+				      Buf buffer)
+{
+	acct_reservation_cond_t *object = (acct_reservation_cond_t *)in;
+	uint32_t count = NO_VAL;
+	ListIterator itr = NULL;
+	char *tmp_info = NULL;
+
+	if(!object) {
+		pack32((uint32_t)NO_VAL, buffer);
+		pack16(0, buffer);
+		pack32((uint16_t)NO_VAL, buffer);
+		pack32((uint16_t)NO_VAL, buffer);
+		packnull(buffer);
+		pack_time(0, buffer);
+		pack_time(0, buffer);
+		pack16(0, buffer);
+		return;
+	}
+	
+	if(object->cluster_list)
+		count = list_count(object->cluster_list);
+	
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(object->cluster_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = NO_VAL;
+
+	pack16(object->flags, buffer);
+	
+	if(object->id_list)
+		count = list_count(object->id_list);
+	
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(object->id_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = NO_VAL;
+
+	if(object->name_list)
+		count = list_count(object->name_list);
+	
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(object->name_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = NO_VAL;
+
+	packstr(object->nodes, buffer);
+	pack_time(object->time_end, buffer);
+	pack_time(object->time_start, buffer);	
+	pack16(object->with_usage, buffer);	
+}
+
+extern int unpack_acct_reservation_cond(void **object, uint16_t rpc_version,
+				      Buf buffer)
+{
+	uint32_t uint32_tmp, count;
+	int i = 0;
+	char *tmp_info = NULL;
+	acct_reservation_cond_t *object_ptr = 
+		xmalloc(sizeof(acct_reservation_cond_t));
+
+	*object = object_ptr;
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->cluster_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->cluster_list, tmp_info);
 		}
+	}
 
-		safe_unpack32(&object_ptr->usage_end, buffer);
-		safe_unpack32(&object_ptr->usage_start, buffer);
+	safe_unpack16(&object_ptr->flags, buffer);
 
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->user_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->user_list, tmp_info);
-			}
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->id_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->id_list, tmp_info);
 		}
+	}
 
-		safe_unpack16(&object_ptr->with_usage, buffer);
-		safe_unpack16(&object_ptr->with_deleted, buffer);
-		safe_unpack16(&object_ptr->without_parent_info, buffer);
-		safe_unpack16(&object_ptr->without_parent_limits, buffer);
-	} 
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->name_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->name_list, tmp_info);
+		}
+	}
+
+	safe_unpackstr_xmalloc(&object_ptr->nodes, &uint32_tmp, buffer);
+	safe_unpack_time(&object_ptr->time_end, buffer);
+	safe_unpack_time(&object_ptr->time_start, buffer);	
+	safe_unpack16(&object_ptr->with_usage, buffer);	
 
 	return SLURM_SUCCESS;
 
 unpack_error:
-	destroy_acct_association_cond(object_ptr);
+	destroy_acct_reservation_cond(object_ptr);
 	*object = NULL;
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_job_cond(void *in, uint16_t rpc_version, Buf buffer)
+extern void pack_acct_txn_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
-	char *tmp_info = NULL;
-	jobacct_selected_step_t *job = NULL;
 	uint32_t count = NO_VAL;
-
+	char *tmp_info = NULL;
 	ListIterator itr = NULL;
-	acct_job_cond_t *object = (acct_job_cond_t *)in;
+	acct_txn_cond_t *object = (acct_txn_cond_t *)in;
 
-	if(rpc_version >= 4) {
+	if(rpc_version >= 5) {
 		if(!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
-			pack16(0, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
-			pack32(0, buffer);
-			pack32(0, buffer);
 			pack32(NO_VAL, buffer);
+			pack_time(0, buffer);
+			pack_time(0, buffer);
 			pack32(NO_VAL, buffer);
 			pack16(0, buffer);
 			return;
 		}
-
 		if(object->acct_list)
 			count = list_count(object->acct_list);
 	
@@ -3994,24 +5974,12 @@ extern void pack_acct_job_cond(void *in, uint16_t rpc_version, Buf buffer)
 		}
 		count = NO_VAL;
 
-		if(object->associd_list)
-			count = list_count(object->associd_list);
-	
-		pack32(count, buffer);
-		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->associd_list);
-			while((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-		}
-		count = NO_VAL;
-
-		if(object->cluster_list)
-			count = list_count(object->cluster_list);
+		if(object->action_list)
+			count = list_count(object->action_list);
 	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->cluster_list);
+			itr = list_iterator_create(object->action_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
@@ -4019,94 +5987,92 @@ extern void pack_acct_job_cond(void *in, uint16_t rpc_version, Buf buffer)
 		}
 		count = NO_VAL;
 
-		pack16(object->duplicates, buffer);
+		if(object->actor_list) 
+			count = list_count(object->actor_list);
 
-		if(object->groupid_list)
-			count = list_count(object->groupid_list);
-	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->groupid_list);
+			itr = list_iterator_create(object->actor_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr); 
 		}
 		count = NO_VAL;
-	
-		if(object->partition_list)
-			count = list_count(object->partition_list);
-	
+
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->partition_list);
+			itr = list_iterator_create(object->cluster_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			}
+			} 
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		if(object->step_list)
-			count = list_count(object->step_list);
-	
+		if(object->id_list)
+			count = list_count(object->id_list);
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->step_list);
-			while((job = list_next(itr))) {
-				pack_jobacct_selected_step(job, rpc_version, 
-							   buffer);
-			}
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			} 
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		if(object->state_list)
-			count = list_count(object->state_list);
-	
+		if(object->info_list)
+			count = list_count(object->info_list);
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->state_list);
+			itr = list_iterator_create(object->info_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			}
+			} 
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		pack32(object->usage_end, buffer);
-		pack32(object->usage_start, buffer);
-
-		if(object->userid_list)
-			count = list_count(object->userid_list);
-	
+		if(object->name_list)
+			count = list_count(object->name_list);
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->userid_list);
+			itr = list_iterator_create(object->name_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			}
+			} 
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
+		pack_time(object->time_end, buffer);
+		pack_time(object->time_start, buffer);
+		if(object->user_list)
+			count = list_count(object->user_list);
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->wckey_list);
+			itr = list_iterator_create(object->user_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			}
+			} 
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
-
-		pack16(object->without_steps, buffer);
-	} else {
+		
+		pack16(object->with_assoc_info, buffer);
+	} else if(rpc_version >= 3) {
 		if(!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
-			pack16(0, buffer);
-			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -4116,7 +6082,6 @@ extern void pack_acct_job_cond(void *in, uint16_t rpc_version, Buf buffer)
 			pack16(0, buffer);
 			return;
 		}
-
 		if(object->acct_list)
 			count = list_count(object->acct_list);
 	
@@ -4130,573 +6095,496 @@ extern void pack_acct_job_cond(void *in, uint16_t rpc_version, Buf buffer)
 		}
 		count = NO_VAL;
 
-		if(object->associd_list)
-			count = list_count(object->associd_list);
+		if(object->action_list)
+			count = list_count(object->action_list);
 	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->associd_list);
+			itr = list_iterator_create(object->action_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		if(object->actor_list) 
+			count = list_count(object->actor_list);
+
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->actor_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
+			list_iterator_destroy(itr); 
 		}
 		count = NO_VAL;
 
 		if(object->cluster_list)
 			count = list_count(object->cluster_list);
-	
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
 			itr = list_iterator_create(object->cluster_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			}
+			} 
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		pack16(object->duplicates, buffer);
+		if(object->id_list)
+			count = list_count(object->id_list);
+	 
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			} 
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
 
-		if(object->groupid_list)
-			count = list_count(object->groupid_list);
-	
+		if(object->info_list)
+			count = list_count(object->info_list);
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->groupid_list);
+			itr = list_iterator_create(object->info_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			}
+			} 
+			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
-	
-		if(object->partition_list)
-			count = list_count(object->partition_list);
-	
+
+		if(object->name_list)
+			count = list_count(object->name_list);
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->partition_list);
+			itr = list_iterator_create(object->name_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			}
+			} 
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		if(object->step_list)
-			count = list_count(object->step_list);
-	
+		pack32(object->time_end, buffer);
+		pack32(object->time_start, buffer);
+		if(object->user_list)
+			count = list_count(object->user_list);
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->step_list);
-			while((job = list_next(itr))) {
-				pack_jobacct_selected_step(job, rpc_version,
-							   buffer);
+			itr = list_iterator_create(object->user_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			} 
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+		
+		pack16(object->with_assoc_info, buffer);
+	} else {
+		if(!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(0, buffer);
+			pack32(0, buffer);
+			return;
+		}
+		if(object->action_list)
+			count = list_count(object->action_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->action_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
 			}
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		if(object->state_list)
-			count = list_count(object->state_list);
-	
+		if(object->actor_list) 
+			count = list_count(object->actor_list);
+
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->state_list);
+			itr = list_iterator_create(object->actor_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
-			list_iterator_destroy(itr);
+			list_iterator_destroy(itr); 
 		}
 		count = NO_VAL;
 
-		pack32(object->usage_end, buffer);
-		pack32(object->usage_start, buffer);
-
-		if(object->userid_list)
-			count = list_count(object->userid_list);
-	
+		if(object->id_list)
+			count = list_count(object->id_list);
+	 
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->userid_list);
+			itr = list_iterator_create(object->id_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			}
+			} 
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		pack16(object->without_steps, buffer);
-	}
+		pack32(object->time_end, buffer);
+		pack32(object->time_start, buffer);
+	} 
 }
 
-extern int unpack_acct_job_cond(void **object, uint16_t rpc_version, Buf buffer)
+extern int unpack_acct_txn_cond(void **object, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
 	uint32_t count;
-	acct_job_cond_t *object_ptr = xmalloc(sizeof(acct_job_cond_t));
+	acct_txn_cond_t *object_ptr = xmalloc(sizeof(acct_txn_cond_t));
 	char *tmp_info = NULL;
-	jobacct_selected_step_t *job = NULL;
 
 	*object = object_ptr;
-
-	if(rpc_version >= 4) {
+	if (rpc_version >= 5) {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->acct_list = list_create(slurm_destroy_char);
+			object_ptr->acct_list =
+				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
 				list_append(object_ptr->acct_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->associd_list =
+			object_ptr->action_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->associd_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->action_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->cluster_list =
+			object_ptr->actor_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->cluster_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->actor_list, tmp_info);
 			}
 		}
 
-		safe_unpack16(&object_ptr->duplicates, buffer);
-
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->groupid_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
-						       buffer);
-				list_append(object_ptr->groupid_list, tmp_info);
-			}
-		}
-	
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->partition_list =
+			object_ptr->cluster_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info,
 						       &uint32_tmp, buffer);
-				list_append(object_ptr->partition_list, 
-					    tmp_info);
+				list_append(object_ptr->cluster_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->step_list =
-				list_create(destroy_jobacct_selected_step);
+			object_ptr->id_list = list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				unpack_jobacct_selected_step(&job, rpc_version,
-							     buffer);
-				list_append(object_ptr->step_list, job);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->id_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->state_list =
+			object_ptr->info_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info,
 						       &uint32_tmp, buffer);
-				list_append(object_ptr->state_list, tmp_info);
+				list_append(object_ptr->info_list, tmp_info);
 			}
 		}
-	
-		safe_unpack32(&object_ptr->usage_end, buffer);
-		safe_unpack32(&object_ptr->usage_start, buffer);
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->userid_list = 
+			object_ptr->name_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->userid_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->name_list, tmp_info);
 			}
 		}
 
+		safe_unpack_time(&object_ptr->time_end, buffer);
+		safe_unpack_time(&object_ptr->time_start, buffer);
+
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->wckey_list = 
+			object_ptr->user_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->wckey_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->user_list, tmp_info);
 			}
 		}
 
-		safe_unpack16(&object_ptr->without_steps, buffer);
-	} else {
+		safe_unpack16(&object_ptr->with_assoc_info, buffer);
+	} else if (rpc_version >= 3) {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->acct_list = list_create(slurm_destroy_char);
+			object_ptr->acct_list =
+				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
 				list_append(object_ptr->acct_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->associd_list =
+			object_ptr->action_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->associd_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->action_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->cluster_list =
+			object_ptr->actor_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->cluster_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->actor_list, tmp_info);
 			}
 		}
 
-		safe_unpack16(&object_ptr->duplicates, buffer);
-
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->groupid_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
-						       buffer);
-				list_append(object_ptr->groupid_list, tmp_info);
-			}
-		}
-	
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->partition_list =
+			object_ptr->cluster_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info,
 						       &uint32_tmp, buffer);
-				list_append(object_ptr->partition_list, 
-					    tmp_info);
+				list_append(object_ptr->cluster_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->step_list =
-				list_create(destroy_jobacct_selected_step);
+			object_ptr->id_list = list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				unpack_jobacct_selected_step(&job, rpc_version,
-							     buffer);
-				list_append(object_ptr->step_list, job);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->id_list, tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->state_list =
+			object_ptr->info_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info,
 						       &uint32_tmp, buffer);
-				list_append(object_ptr->state_list, tmp_info);
+				list_append(object_ptr->info_list, tmp_info);
 			}
 		}
-	
-		safe_unpack32(&object_ptr->usage_end, buffer);
-		safe_unpack32(&object_ptr->usage_start, buffer);
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->userid_list = 
+			object_ptr->name_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->userid_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->name_list, tmp_info);
 			}
 		}
 
-		safe_unpack16(&object_ptr->without_steps, buffer);
-	}
-
-	return SLURM_SUCCESS;
-
-unpack_error:
-	destroy_acct_job_cond(object_ptr);
-	*object = NULL;
-	return SLURM_ERROR;
-}
-
-extern void pack_acct_qos_cond(void *in, uint16_t rpc_version, Buf buffer)
-{
-	uint32_t count = NO_VAL;
-	char *tmp_info = NULL;
-	ListIterator itr = NULL;
-	acct_qos_cond_t *object = (acct_qos_cond_t *)in;
-
-	if(!object) {
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-		pack16(0, buffer);
-		return;
-	}
-
-	if(object->description_list)
-		count = list_count(object->description_list);
-	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->description_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
-
-	if(object->id_list)
-		count = list_count(object->id_list);
-	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->id_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
-
-	if(object->name_list) 
-		count = list_count(object->name_list);
-
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->name_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		}
-		list_iterator_destroy(itr); 
-	}
-	count = NO_VAL;
-
-	pack16(object->with_deleted, buffer);
-}
-
-extern int unpack_acct_qos_cond(void **object, uint16_t rpc_version, Buf buffer)
-{
-	uint32_t uint32_tmp;
-	int i;
-	uint32_t count;
-	acct_qos_cond_t *object_ptr = xmalloc(sizeof(acct_qos_cond_t));
-	char *tmp_info = NULL;
-
-	*object = object_ptr;
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->time_end = uint32_tmp;
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->time_start = uint32_tmp;
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->description_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->description_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->user_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->user_list, tmp_info);
+			}
 		}
-	}
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->id_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->id_list, tmp_info);
+		safe_unpack16(&object_ptr->with_assoc_info, buffer);
+	} else {
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->action_list =
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->action_list, tmp_info);
+			}
 		}
-	}
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->name_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, buffer);
-			list_append(object_ptr->name_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->actor_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->actor_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->id_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info,
+						       &uint32_tmp, buffer);
+				list_append(object_ptr->id_list, tmp_info);
+			}
 		}
+
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->time_end = uint32_tmp;
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->time_start = uint32_tmp;
 	}
 
-	safe_unpack16(&object_ptr->with_deleted, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
-	destroy_acct_qos_cond(object_ptr);
+	destroy_acct_txn_cond(object_ptr);
 	*object = NULL;
 	return SLURM_ERROR;
 }
 
-extern void pack_acct_txn_cond(void *in, uint16_t rpc_version, Buf buffer)
+extern void pack_acct_wckey_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
-	uint32_t count = NO_VAL;
 	char *tmp_info = NULL;
-	ListIterator itr = NULL;
-	acct_txn_cond_t *object = (acct_txn_cond_t *)in;
+	uint32_t count = NO_VAL;
 
-	if(rpc_version >= 3) {
+	ListIterator itr = NULL;
+	acct_wckey_cond_t *object = (acct_wckey_cond_t *)in;
+	if(rpc_version >= 5) {
 		if(!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(0, buffer);
-			pack32(0, buffer);
-			pack32(NO_VAL, buffer);
-			pack16(0, buffer);
-			return;
-		}
-		if(object->acct_list)
-			count = list_count(object->acct_list);
-	
-		pack32(count, buffer);
-		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->acct_list);
-			while((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
 
-		if(object->action_list)
-			count = list_count(object->action_list);
-	
-		pack32(count, buffer);
-		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->action_list);
-			while((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
+			pack_time(0, buffer);
+			pack_time(0, buffer);
 
-		if(object->actor_list) 
-			count = list_count(object->actor_list);
+			pack32(NO_VAL, buffer);
 
-		pack32(count, buffer);
-		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->actor_list);
-			while((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr); 
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
 		}
-		count = NO_VAL;
 
 		if(object->cluster_list)
 			count = list_count(object->cluster_list);
-	 
+	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
 			itr = list_iterator_create(object->cluster_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			} 
+			}
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
 		if(object->id_list)
 			count = list_count(object->id_list);
-	 
+	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
 			itr = list_iterator_create(object->id_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			} 
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
-
-		if(object->info_list)
-			count = list_count(object->info_list);
-	 
-		pack32(count, buffer);
-		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->info_list);
-			while((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			} 
-			list_iterator_destroy(itr);
+			}
 		}
 		count = NO_VAL;
 
 		if(object->name_list)
 			count = list_count(object->name_list);
-	 
+	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
 			itr = list_iterator_create(object->name_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			} 
+			}
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		pack32(object->time_end, buffer);
-		pack32(object->time_start, buffer);
+		pack_time(object->usage_end, buffer);
+		pack_time(object->usage_start, buffer);
+
 		if(object->user_list)
 			count = list_count(object->user_list);
-	 
+	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
 			itr = list_iterator_create(object->user_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			} 
+			}
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
-		
-		pack16(object->with_assoc_info, buffer);
+
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
 	} else {
 		if(!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
+
 			pack32(0, buffer);
 			pack32(0, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack16(0, buffer);
+			pack16(0, buffer);
 			return;
 		}
-		if(object->action_list)
-			count = list_count(object->action_list);
+
+		if(object->cluster_list)
+			count = list_count(object->cluster_list);
 	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->action_list);
+			itr = list_iterator_create(object->cluster_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
@@ -4704,88 +6592,73 @@ extern void pack_acct_txn_cond(void *in, uint16_t rpc_version, Buf buffer)
 		}
 		count = NO_VAL;
 
-		if(object->actor_list) 
-			count = list_count(object->actor_list);
+		if(object->id_list)
+			count = list_count(object->id_list);
+	
+		pack32(count, buffer);
+		if(count && count != NO_VAL) {
+			itr = list_iterator_create(object->id_list);
+			while((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+		}
+		count = NO_VAL;
 
+		if(object->name_list)
+			count = list_count(object->name_list);
+	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->actor_list);
+			itr = list_iterator_create(object->name_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
-			list_iterator_destroy(itr); 
+			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		if(object->id_list)
-			count = list_count(object->id_list);
-	 
+		pack32(object->usage_end, buffer);
+		pack32(object->usage_start, buffer);
+
+		if(object->user_list)
+			count = list_count(object->user_list);
+	
 		pack32(count, buffer);
 		if(count && count != NO_VAL) {
-			itr = list_iterator_create(object->id_list);
+			itr = list_iterator_create(object->user_list);
 			while((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
-			} 
+			}
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
-		pack32(object->time_end, buffer);
-		pack32(object->time_start, buffer);
-	} 
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+	}
 }
 
-extern int unpack_acct_txn_cond(void **object, uint16_t rpc_version, Buf buffer)
+extern int unpack_acct_wckey_cond(void **object, uint16_t rpc_version,
+				  Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
 	uint32_t count;
-	acct_txn_cond_t *object_ptr = xmalloc(sizeof(acct_txn_cond_t));
-	char *tmp_info = NULL;
-
-	*object = object_ptr;
-	if (rpc_version >= 3) {
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->acct_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
-				list_append(object_ptr->acct_list, tmp_info);
-			}
-		}
-
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->action_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
-				list_append(object_ptr->action_list, tmp_info);
-			}
-		}
-
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->actor_list = 
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
-				list_append(object_ptr->actor_list, tmp_info);
-			}
-		}
+	acct_wckey_cond_t *object_ptr =	xmalloc(sizeof(acct_wckey_cond_t));
+	char *tmp_info = NULL;
+
+	*object = object_ptr;
 
+	if(rpc_version >= 5) {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->cluster_list =
+			object_ptr->cluster_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
-				list_append(object_ptr->cluster_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->cluster_list, 
+					    tmp_info);
 			}
 		}
 
@@ -4793,236 +6666,92 @@ extern int unpack_acct_txn_cond(void **object, uint16_t rpc_version, Buf buffer)
 		if(count != NO_VAL) {
 			object_ptr->id_list = list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
+						       buffer);
 				list_append(object_ptr->id_list, tmp_info);
 			}
 		}
-
-		safe_unpack32(&count, buffer);
-		if(count != NO_VAL) {
-			object_ptr->info_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
-				list_append(object_ptr->info_list, tmp_info);
-			}
-		}
-
+	
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->name_list =
+			object_ptr->name_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
 				list_append(object_ptr->name_list, tmp_info);
 			}
 		}
 
-		safe_unpack32(&object_ptr->time_end, buffer);
-		safe_unpack32(&object_ptr->time_start, buffer);
+		safe_unpack_time(&object_ptr->usage_end, buffer);
+		safe_unpack_time(&object_ptr->usage_start, buffer);
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->user_list =
+			object_ptr->user_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
 				list_append(object_ptr->user_list, tmp_info);
 			}
 		}
 
-		safe_unpack16(&object_ptr->with_assoc_info, buffer);
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
 	} else {
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->action_list =
+			object_ptr->cluster_list = 
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
-				list_append(object_ptr->action_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->cluster_list, 
+					    tmp_info);
 			}
 		}
 
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->actor_list = 
-				list_create(slurm_destroy_char);
+			object_ptr->id_list = list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
-				list_append(object_ptr->actor_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
+						       buffer);
+				list_append(object_ptr->id_list, tmp_info);
 			}
 		}
-
+	
 		safe_unpack32(&count, buffer);
 		if(count != NO_VAL) {
-			object_ptr->id_list = list_create(slurm_destroy_char);
+			object_ptr->name_list = 
+				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info,
-						       &uint32_tmp, buffer);
-				list_append(object_ptr->id_list, tmp_info);
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->name_list, tmp_info);
 			}
 		}
 
-		safe_unpack32(&object_ptr->time_end, buffer);
-		safe_unpack32(&object_ptr->time_start, buffer);
-	} 
-
-	return SLURM_SUCCESS;
-
-unpack_error:
-	destroy_acct_txn_cond(object_ptr);
-	*object = NULL;
-	return SLURM_ERROR;
-}
-
-extern void pack_acct_wckey_cond(void *in, uint16_t rpc_version, Buf buffer)
-{
-	char *tmp_info = NULL;
-	uint32_t count = NO_VAL;
-
-	ListIterator itr = NULL;
-	acct_wckey_cond_t *object = (acct_wckey_cond_t *)in;
-
-	if(!object) {
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-
-		pack32(NO_VAL, buffer);
-		pack32(NO_VAL, buffer);
-
-		pack16(0, buffer);
-		pack16(0, buffer);
-		return;
-	}
-
-	if(object->cluster_list)
-		count = list_count(object->cluster_list);
-	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->cluster_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
-
-	if(object->id_list)
-		count = list_count(object->id_list);
-	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->id_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		}
-	}
-	count = NO_VAL;
-
-	if(object->name_list)
-		count = list_count(object->name_list);
-	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->name_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
-
-	pack32(object->usage_end, buffer);
-	pack32(object->usage_start, buffer);
-
-	if(object->user_list)
-		count = list_count(object->user_list);
-	
-	pack32(count, buffer);
-	if(count && count != NO_VAL) {
-		itr = list_iterator_create(object->user_list);
-		while((tmp_info = list_next(itr))) {
-			packstr(tmp_info, buffer);
-		}
-		list_iterator_destroy(itr);
-	}
-	count = NO_VAL;
-
-	pack16(object->with_usage, buffer);
-	pack16(object->with_deleted, buffer);
-}
-
-extern int unpack_acct_wckey_cond(void **object, uint16_t rpc_version,
-				  Buf buffer)
-{
-	uint32_t uint32_tmp;
-	int i;
-	uint32_t count;
-	acct_wckey_cond_t *object_ptr =	xmalloc(sizeof(acct_wckey_cond_t));
-	char *tmp_info = NULL;
-
-	*object = object_ptr;
-
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->cluster_list = 
-			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-					       buffer);
-			list_append(object_ptr->cluster_list, 
-				    tmp_info);
-		}
-	}
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_end = uint32_tmp;
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->usage_start = uint32_tmp;
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->id_list = list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp, 
-					       buffer);
-			list_append(object_ptr->id_list, tmp_info);
-		}
-	}
-	
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->name_list = 
-			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-					       buffer);
-			list_append(object_ptr->name_list, tmp_info);
+		safe_unpack32(&count, buffer);
+		if(count != NO_VAL) {
+			object_ptr->user_list = 
+				list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->user_list, tmp_info);
+			}
 		}
-	}
-
-	safe_unpack32(&object_ptr->usage_end, buffer);
-	safe_unpack32(&object_ptr->usage_start, buffer);
 
-	safe_unpack32(&count, buffer);
-	if(count != NO_VAL) {
-		object_ptr->user_list = 
-			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
-			safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-					       buffer);
-			list_append(object_ptr->user_list, tmp_info);
-		}
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
 	}
-
-	safe_unpack16(&object_ptr->with_usage, buffer);
-	safe_unpack16(&object_ptr->with_deleted, buffer);
-
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -5038,21 +6767,29 @@ extern void pack_acct_archive_cond(void *in, uint16_t rpc_version, Buf buffer)
 	if(!object) {
 		packnull(buffer);
 		pack16((uint16_t)NO_VAL, buffer);
+		pack16((uint16_t)NO_VAL, buffer);
 		packnull(buffer);
 		pack16((uint16_t)NO_VAL, buffer);
+		pack16((uint16_t)NO_VAL, buffer);
 		pack_acct_job_cond(NULL, rpc_version, buffer);
 		pack16((uint16_t)NO_VAL, buffer);
 		pack16((uint16_t)NO_VAL, buffer);
+		pack16((uint16_t)NO_VAL, buffer);
+		pack16((uint16_t)NO_VAL, buffer);
 		return;
 	}
 
 	packstr(object->archive_dir, buffer);
+	pack16(object->archive_events, buffer);
 	pack16(object->archive_jobs, buffer);
 	packstr(object->archive_script, buffer);
 	pack16(object->archive_steps, buffer);
+	pack16(object->archive_suspend, buffer);
 	pack_acct_job_cond(object->job_cond, rpc_version, buffer);
-	pack16(object->job_purge, buffer);
-	pack16(object->step_purge, buffer);
+	pack16(object->purge_event, buffer);
+	pack16(object->purge_job, buffer);
+	pack16(object->purge_step, buffer);
+	pack16(object->purge_suspend, buffer);
 }
 
 extern int unpack_acct_archive_cond(void **object, uint16_t rpc_version,
@@ -5065,15 +6802,19 @@ extern int unpack_acct_archive_cond(void **object, uint16_t rpc_version,
 	*object = object_ptr;
 
 	safe_unpackstr_xmalloc(&object_ptr->archive_dir, &uint32_tmp, buffer);
+	safe_unpack16(&object_ptr->archive_events, buffer);
 	safe_unpack16(&object_ptr->archive_jobs, buffer);
 	safe_unpackstr_xmalloc(&object_ptr->archive_script,
 			       &uint32_tmp, buffer);
 	safe_unpack16(&object_ptr->archive_steps, buffer);
+	safe_unpack16(&object_ptr->archive_suspend, buffer);
 	if(unpack_acct_job_cond((void *)&object_ptr->job_cond,
 				rpc_version, buffer) == SLURM_ERROR)
 		goto unpack_error;
-	safe_unpack16(&object_ptr->job_purge, buffer);
-	safe_unpack16(&object_ptr->step_purge, buffer);
+	safe_unpack16(&object_ptr->purge_event, buffer);
+	safe_unpack16(&object_ptr->purge_job, buffer);
+	safe_unpack16(&object_ptr->purge_step, buffer);
+	safe_unpack16(&object_ptr->purge_suspend, buffer);
 
 	return SLURM_SUCCESS;
 
@@ -5401,8 +7142,7 @@ extern List get_acct_hierarchical_rec_list(List assoc_list)
 }
 
 /* IN/OUT: tree_list a list of acct_print_tree_t's */ 
-extern char *get_tree_acct_name(char *name, char *parent, char *cluster, 
-				List tree_list)
+extern char *get_tree_acct_name(char *name, char *parent, List tree_list)
 {
 	ListIterator itr = NULL;
 	acct_print_tree_t *acct_print_tree = NULL;
@@ -5499,22 +7239,86 @@ extern char *get_qos_complete_str(List qos_list, List num_qos_list)
 	return print_this;
 }
 
+extern char *get_classification_str(uint16_t class)
+{
+	bool classified = class & ACCT_CLASSIFIED_FLAG;
+	acct_classification_type_t type = class & ACCT_CLASS_BASE;
+
+	switch(type) {
+	case ACCT_CLASS_NONE:
+		return NULL;
+		break;
+	case ACCT_CLASS_CAPACITY:
+		if(classified)
+			return "*Capacity";
+		else
+			return "Capacity";
+		break;
+	case ACCT_CLASS_CAPABILITY:
+		if(classified)
+			return "*Capability";
+		else
+			return "Capability";
+		break;
+	case ACCT_CLASS_CAPAPACITY:
+		if(classified)
+			return "*Capapacity";
+		else
+			return "Capapacity";
+		break;
+	default:
+		if(classified)
+			return "*Unknown";
+		else
+			return "Unknown";
+		break;
+	}
+}
+
+extern uint16_t str_2_classification(char *class)
+{
+	uint16_t type = 0;
+	if(!class)
+		return type;
+
+	if(slurm_strcasestr(class, "capac"))
+		type = ACCT_CLASS_CAPACITY;
+	else if(slurm_strcasestr(class, "capab"))
+		type = ACCT_CLASS_CAPABILITY;
+	else if(slurm_strcasestr(class, "capap"))
+		type = ACCT_CLASS_CAPAPACITY;
+	
+	if(slurm_strcasestr(class, "*")) 
+		type |= ACCT_CLASSIFIED_FLAG; 
+	else if(slurm_strcasestr(class, "class")) 
+		type |= ACCT_CLASSIFIED_FLAG;
+	
+	return type;
+}
 
 extern void log_assoc_rec(acct_association_rec_t *assoc_ptr, List qos_list)
 {
+	xassert(assoc_ptr);
+
 	debug2("association rec id : %u", assoc_ptr->id);
 	debug2("  acct             : %s", assoc_ptr->acct);
 	debug2("  cluster          : %s", assoc_ptr->cluster);
 
-	if(assoc_ptr->fairshare == INFINITE)
-		debug2("  Fairshare        : NONE");
-	else if(assoc_ptr->fairshare != NO_VAL) 
-		debug2("  Fairshare        : %u", assoc_ptr->fairshare);
+	if(assoc_ptr->shares_raw == INFINITE)
+		debug2("  RawShares        : NONE");
+	else if(assoc_ptr->shares_raw != NO_VAL) 
+		debug2("  RawShares        : %u", assoc_ptr->shares_raw);
+
+	if(assoc_ptr->shares_norm != (double)NO_VAL) 
+		debug2("  NormalizedShares : %f", assoc_ptr->shares_norm);
+
+	if(assoc_ptr->level_shares != NO_VAL) 
+		debug2("  LevelShares      : %u", assoc_ptr->level_shares);
 
 	if(assoc_ptr->grp_cpu_mins == INFINITE)
-		debug2("  GrpCPUMins      : NONE");
+		debug2("  GrpCPUMins       : NONE");
 	else if(assoc_ptr->grp_cpu_mins != NO_VAL) 
-		debug2("  GrpCPUMins      : %llu", assoc_ptr->grp_cpu_mins);
+		debug2("  GrpCPUMins       : %llu", assoc_ptr->grp_cpu_mins);
 		
 	if(assoc_ptr->grp_cpus == INFINITE)
 		debug2("  GrpCPUs          : NONE");
@@ -5591,14 +7395,14 @@ extern void log_assoc_rec(acct_association_rec_t *assoc_ptr, List qos_list)
 	}
 
 	if(assoc_ptr->parent_acct)
-		debug2("  parent_acct      : %s", assoc_ptr->parent_acct);
+		debug2("  ParentAccount    : %s", assoc_ptr->parent_acct);
 	if(assoc_ptr->partition)
-		debug2("  partition        : %s", assoc_ptr->partition);
+		debug2("  Partition        : %s", assoc_ptr->partition);
 	if(assoc_ptr->user)
-		debug2("  user             : %s(%u)",
+		debug2("  User             : %s(%u)",
 		       assoc_ptr->user, assoc_ptr->uid);
-	debug2("  used_jobs        : %u", assoc_ptr->used_jobs);
-	debug2("  used_shares      : %u", assoc_ptr->used_shares);
+	debug2("  UsedJobs        : %u", assoc_ptr->used_jobs);
+	debug2("  RawUsage        : %Lf", assoc_ptr->usage_raw);
 }
 
 /*
@@ -5741,6 +7545,15 @@ extern int acct_storage_g_add_wckeys(void *db_conn, uint32_t uid,
 		(db_conn, uid, wckey_list);
 }
 
+extern int acct_storage_g_add_reservation(void *db_conn,
+					   acct_reservation_rec_t *resv)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return NO_VAL;
+	return (*(g_acct_storage_context->ops.add_reservation))
+		(db_conn, resv);
+}
+
 extern List acct_storage_g_modify_users(void *db_conn, uint32_t uid,
 					acct_user_cond_t *user_cond,
 					acct_user_rec_t *user)
@@ -5802,6 +7615,15 @@ extern List acct_storage_g_modify_wckeys(void *db_conn, uint32_t uid,
 		(db_conn, uid, wckey_cond, wckey);
 }
 
+extern int acct_storage_g_modify_reservation(void *db_conn,
+					   acct_reservation_rec_t *resv)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return NO_VAL;
+	return (*(g_acct_storage_context->ops.modify_reservation))
+		(db_conn, resv);
+}
+
 extern List acct_storage_g_remove_users(void *db_conn, uint32_t uid,
 					acct_user_cond_t *user_cond)
 {
@@ -5867,6 +7689,15 @@ extern List acct_storage_g_remove_wckeys(void *db_conn, uint32_t uid,
 		(db_conn, uid, wckey_cond);
 }
 
+extern int acct_storage_g_remove_reservation(void *db_conn,
+					     acct_reservation_rec_t *resv)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return NO_VAL;
+	return (*(g_acct_storage_context->ops.remove_reservation))
+		(db_conn, resv);
+}
+
 extern List acct_storage_g_get_users(void *db_conn, uint32_t uid,
 				     acct_user_cond_t *user_cond)
 {
@@ -5894,6 +7725,13 @@ extern List acct_storage_g_get_clusters(void *db_conn, uint32_t uid,
 		(db_conn, uid, cluster_cond);
 }
 
+extern List acct_storage_g_get_config(void *db_conn)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return NULL;
+	return (*(g_acct_storage_context->ops.get_config))(db_conn);
+}
+
 extern List acct_storage_g_get_associations(void *db_conn, uint32_t uid,
 					    acct_association_cond_t *assoc_cond)
 {
@@ -5920,6 +7758,15 @@ extern List acct_storage_g_get_wckeys(void *db_conn, uint32_t uid,
 							   wckey_cond);
 }
 
+extern List acct_storage_g_get_reservations(void *db_conn, uint32_t uid, 
+				      acct_reservation_cond_t *resv_cond)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return NULL;
+	return (*(g_acct_storage_context->ops.get_resvs))(db_conn, uid,
+							  resv_cond);
+}
+
 extern List acct_storage_g_get_txn(void *db_conn,  uint32_t uid, 
 				   acct_txn_cond_t *txn_cond)
 {
@@ -5939,11 +7786,13 @@ extern int acct_storage_g_get_usage(void *db_conn,  uint32_t uid,
 }
 
 extern int acct_storage_g_roll_usage(void *db_conn, 
-				     time_t sent_start)
+				     time_t sent_start, time_t sent_end,
+				     uint16_t archive_data)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
-	return (*(g_acct_storage_context->ops.roll_usage))(db_conn, sent_start);
+	return (*(g_acct_storage_context->ops.roll_usage))
+		(db_conn, sent_start, sent_end, archive_data);
 }
 
 extern int clusteracct_storage_g_node_down(void *db_conn,
@@ -5972,13 +7821,14 @@ extern int clusteracct_storage_g_node_up(void *db_conn,
 
 extern int clusteracct_storage_g_cluster_procs(void *db_conn,
 					       char *cluster,
+					       char *cluster_nodes,
 					       uint32_t procs,
 					       time_t event_time)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
  	return (*(g_acct_storage_context->ops.cluster_procs))
-		(db_conn, cluster, procs, event_time);
+		(db_conn, cluster, cluster_nodes, procs, event_time);
 }
 
 
diff --git a/src/common/slurm_accounting_storage.h b/src/common/slurm_accounting_storage.h
index af73d80e51803e71f3f7a93afaeda71e51e6d6eb..24b1df9c845868bef5b852da4f028987c96ea5c8 100644
--- a/src/common/slurm_accounting_storage.h
+++ b/src/common/slurm_accounting_storage.h
@@ -2,13 +2,14 @@
  *  slurm_accounting_storage.h - Define accounting storage plugin functions.
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -71,6 +72,17 @@ typedef enum {
 	ACCT_MODIFY_WCKEY,
 } acct_update_type_t;
 
+typedef enum {
+	ACCT_CLASS_NONE, /* no class given */
+	ACCT_CLASS_CAPABILITY, /* capability cluster */
+	ACCT_CLASS_CAPACITY, /* capacity cluster */
+	ACCT_CLASS_CAPAPACITY, /* a cluster that is both capability
+				* and capacity */
+} acct_classification_type_t;
+
+#define ACCT_CLASSIFIED_FLAG 0x0100
+#define ACCT_CLASS_BASE      0x00ff
+
 /* Association conditions used for queries of the database */
 typedef struct {
 	List acct_list;		/* list of char * */
@@ -99,8 +111,8 @@ typedef struct {
 
 	List qos_list; /* list of char * */	
 
-	uint32_t usage_end; 
-	uint32_t usage_start; 
+	time_t usage_end; 
+	time_t usage_start; 
 
 	List user_list;		/* list of char * */
 
@@ -134,51 +146,47 @@ typedef struct {
 typedef struct {
 	uint64_t alloc_secs; /* number of cpu seconds allocated */
 	uint32_t id;	/* association/wckey ID		*/
-	time_t period_start; 
+	time_t period_start; /* when this record was started */
 } acct_accounting_rec_t;
 
 typedef struct acct_association_rec {
 	List accounting_list; 	/* list of acct_accounting_rec_t *'s */
 	char *acct;		/* account/project associated to association */
+	List childern_list;     /* list of childern associations
+				 * (DON'T PACK) */
 	char *cluster;		/* cluster associated to association
 				 * */
 
-	uint32_t fairshare;	/* fairshare number */
-
-	uint64_t grp_cpu_mins; /* max number of cpu hours the
-				     * underlying group of
-				     * associations can run for */
+	uint64_t grp_cpu_mins; /* max number of cpu minutes the
+				* underlying group of
+				* associations can run for */
 	uint32_t grp_cpus; /* max number of cpus the
-				* underlying group of 
-				* associations can allocate at one time */
+			    * underlying group of 
+			    * associations can allocate at one time */
 	uint32_t grp_jobs;	/* max number of jobs the
 				 * underlying group of associations can run
 				 * at one time */
 	uint32_t grp_nodes; /* max number of nodes the
-				 * underlying group of
-				 * associations can allocate at once */
+			     * underlying group of
+			     * associations can allocate at once */
 	uint32_t grp_submit_jobs; /* max number of jobs the
-				       * underlying group of
-				       * associations can submit at
-				       * one time */
+				   * underlying group of
+				   * associations can submit at
+				   * one time */
 	uint32_t grp_wall; /* total time in hours the 
 			    * underlying group of
 			    * associations can run for */
-
-	uint32_t grp_used_cpu_mins; /* cpu mins the
-				      * underlying group of
-				      * associations has ran for 
-				      * (DON'T PACK) */
+	
 	uint32_t grp_used_cpus; /* count of active jobs in the group
 				 * (DON'T PACK) */
 	uint32_t grp_used_nodes; /* count of active jobs in the group
 				  * (DON'T PACK) */
-	uint32_t grp_used_wall; /* group count of time used in
-				     * running jobs (DON'T PACK) */
+	double grp_used_wall;   /* group count of time used in
+				 * running jobs (DON'T PACK) */
 	
 	uint32_t id;		/* id identifing a combination of
 				 * user-account-cluster(-partition) */
-
+	
 	uint32_t level_shares;  /* number of shares on this level of
 				 * the tree (DON'T PACK) */
 	
@@ -189,11 +197,11 @@ typedef struct acct_association_rec {
 	uint64_t max_cpu_mins_pj; /* max number of cpu seconds this 
 				   * association can have per job */
 	uint32_t max_cpus_pj; /* max number of cpus this 
-				    * association can allocate per job */
+			       * association can allocate per job */
 	uint32_t max_jobs;	/* max number of jobs this association can run
 				 * at one time */
 	uint32_t max_nodes_pj; /* max number of nodes this
-				     * association can allocate per job */
+				* association can allocate per job */
 	uint32_t max_submit_jobs; /* max number of jobs that can be
 				     submitted by association */
 	uint32_t max_wall_pj; /* longest time this
@@ -207,16 +215,23 @@ typedef struct acct_association_rec {
 	uint32_t parent_id;	/* id of parent account */
 	char *partition;	/* optional partition in a cluster 
 				 * associated to association */
-
+	
 	List qos_list;          /* list of char * */
-
+	
 	uint32_t rgt;		/* rgt used for grouping sub
 				 * associations and jobs as a right
 				 * most container used with lft */
+
+	double shares_norm;     /* normalized shares (DON'T PACK) */
+	uint32_t shares_raw;	/* number of shares allocated to association */
+
 	uint32_t uid;		/* user ID */
 	
+	long double usage_efctv;/* effective, normalized usage (DON'T PACK) */
+	long double usage_norm;	/* normalized usage (DON'T PACK) */
+	long double usage_raw;	/* measure of resource usage (DON'T PACK) */
+
 	uint32_t used_jobs;	/* count of active jobs (DON'T PACK) */
-	uint32_t used_shares;	/* measure of resource usage */
 	uint32_t used_submit_jobs; /* count of jobs pending or running
 				    * (DON'T PACK) */
 	
@@ -224,22 +239,23 @@ typedef struct acct_association_rec {
 } acct_association_rec_t;
 
 typedef struct {
+	uint16_t classification; /* how this machine is classified */
 	List cluster_list; /* list of char * */
-	uint32_t usage_end; 
-	uint32_t usage_start; 
+	time_t usage_end; 
+	time_t usage_start; 
 	uint16_t with_deleted; 
 	uint16_t with_usage; 
 } acct_cluster_cond_t;
 
 typedef struct {
 	List accounting_list; /* list of cluster_accounting_rec_t *'s */
+	uint16_t classification; /* how this machine is classified */
 	char *control_host;
 	uint32_t control_port;
+	uint32_t cpu_count;
 	char *name;
-
-	List valid_qos_list;
+	char *nodes;
 	acct_association_rec_t *root_assoc; /* root association for cluster */
-
 	uint16_t rpc_version; /* version of rpc this cluter is running */
 } acct_cluster_rec_t;
 
@@ -255,13 +271,20 @@ typedef struct {
 	uint16_t duplicates;    /* report duplicate job entries */
 	List groupid_list;	/* list of char * */
 	List partition_list;	/* list of char * */
+	List resv_list;		/* list of char * */
+	List resvid_list;	/* list of char * */
 	List step_list;         /* list of jobacct_selected_step_t */
 	List state_list;        /* list of char * */
-	uint32_t usage_end; 
-	uint32_t usage_start; 
+	time_t usage_end; 
+	time_t usage_start; 
+	char *used_nodes;       /* a ranged node string where jobs ran */
 	List userid_list;		/* list of char * */
 	List wckey_list;		/* list of char * */
 	uint16_t without_steps; /* don't give me step info */
+	uint16_t without_usage_truncation; /* give me the information
+					    * without truncating the
+					    * time to the usage_start
+					    * and usage_end */
 } acct_job_cond_t;
 
 typedef struct {
@@ -284,8 +307,6 @@ typedef struct {
 				   * one time */
 	uint32_t grp_wall; /* total time in hours this qos can run for */
 
-	uint32_t grp_used_cpu_mins; /* cpu hours this qos has ran for 
-				      * (DON'T PACK) */
 	uint32_t grp_used_cpus; /* count of cpus in use in this qos
 				 * (DON'T PACK) */
 	uint32_t grp_used_jobs;	/* count of active jobs (DON'T PACK) */
@@ -293,7 +314,7 @@ typedef struct {
 				  * (DON'T PACK) */
 	uint32_t grp_used_submit_jobs; /* count of jobs pending or running
 				    * (DON'T PACK) */
-	uint32_t grp_used_wall; /* group count of time (minutes) used in
+	double grp_used_wall;   /* group count of time (minutes) used in
 				 * running jobs (DON'T PACK) */
 
 	uint64_t max_cpu_mins_pu; /* max number of cpu mins a user can
@@ -310,12 +331,16 @@ typedef struct {
 			       * qos can run a job */
 
 	char *name;
+	double norm_priority;/* normalized priority (DON'T PACK) */
 	List preemptee_list; /* list of char * list of qos's that this
 				qos can preempt */
 	List preemptor_list; /* list of char * list of qos's that this
 			      * qos is preempted by */
 	uint32_t priority;  /* ranged int needs to be a unint for
 			     * heterogeneous systems */
+	double usage_factor; /* factor to apply to usage in this qos */
+	long double usage_raw;	/* measure of resource usage (DON'T PACK) */
+
 	List user_limit_list; /* acct_used_limits_t's */
 } acct_qos_rec_t;
 
@@ -326,6 +351,36 @@ typedef struct {
 	uint16_t with_deleted; 
 } acct_qos_cond_t;
 
+typedef struct {
+	List cluster_list; /* cluster reservations are on list of
+			    * char * */
+	uint16_t flags; /* flags for reservation. */
+	List id_list;   /* ids of reservations. list of char * */
+	List name_list; /* name of reservations. list of char * */
+	char *nodes; /* list of nodes in reservation */
+	time_t time_end; /* end time of reservation */
+	time_t time_start; /* start time of reservation */
+	uint16_t with_usage; /* send usage for reservation */
+} acct_reservation_cond_t;
+
+typedef struct {
+	uint64_t alloc_secs; /* number of cpu seconds allocated */
+	char *assocs; /* comma seperated list of associations */
+	char *cluster; /* cluster reservation is for */
+	uint32_t cpus; /* how many cpus are in reservation */
+	uint64_t down_secs; /* number of cpu seconds down */
+	uint16_t flags; /* flags for reservation. */
+	uint32_t id;   /* id of reservation. */
+	char *name; /* name of reservation */
+	char *nodes; /* list of nodes in reservation */
+	char *node_inx; /* node index of nodes in reservation */
+	time_t time_end; /* end time of reservation */
+	time_t time_start; /* start time of reservation */
+	time_t time_start_prev; /* If start time was changed this is
+				 * the pervious start time.  Needed
+				 * for accounting */	
+} acct_reservation_rec_t;
+
 /* Right now this is used in the acct_qos_rec_t structure.  In the
  * user_limit_list. */
 typedef struct {
@@ -376,8 +431,8 @@ typedef struct {
 	List id_list; /* list of char * */
 	List info_list; /* list of char * */
 	List name_list; /* list of char * */
-	uint32_t time_end; 
-	uint32_t time_start; 
+	time_t time_end; 
+	time_t time_start; 
 	List user_list; /* list of char * */
 	uint16_t with_assoc_info;
 } acct_txn_cond_t;
@@ -407,8 +462,8 @@ typedef struct {
 
 	List name_list;        /* list of char * */
 
-	uint32_t usage_end; 
-	uint32_t usage_start; 
+	time_t usage_end; 
+	time_t usage_start; 
 
 	List user_list;		/* list of char * */
 
@@ -440,16 +495,17 @@ typedef struct {
 	uint64_t down_secs; /* number of cpu seconds down */
 	uint64_t idle_secs; /* number of cpu seconds idle */
 	uint64_t over_secs; /* number of cpu seconds overcommitted */
+	uint64_t pdown_secs; /* number of cpu seconds planned down */
 	time_t period_start; /* when this record was started */
 	uint64_t resv_secs; /* number of cpu seconds reserved */	
 } cluster_accounting_rec_t;
 
-
 typedef struct {
 	char *name;
 	char *print_name;
 	char *spaces;
-	uint16_t user;
+	uint16_t user; /* set to 1 if it is a user i.e. if name[0] is
+			* '|' */
 } acct_print_tree_t;
 
 typedef struct {
@@ -460,6 +516,9 @@ typedef struct {
 
 typedef struct {
 	char *archive_dir;     /* location to place archive file */
+	uint16_t archive_events; /* whether or not to keep an archive
+				    file of events that can be loaded
+				    later */
 	uint16_t archive_jobs; /* whether or not to keep an archive
 				  file of jobs that can be loaded
 				  later */
@@ -468,9 +527,15 @@ typedef struct {
 	uint16_t archive_steps; /* whether or not to keep an archive
 				  file of steps that can be loaded
 				  later */
+	uint16_t archive_suspend; /* whether or not to keep an archive
+				     file of suspend data that can be loaded
+				     later */
 	acct_job_cond_t *job_cond; /* conditions for the jobs to archive */
-	uint16_t job_purge; /* purge jobs older than this in months */
-	uint16_t step_purge; /* purge steps older than this in months */
+	uint16_t purge_event; /* purge events older than this in months */
+	uint16_t purge_job; /* purge jobs older than this in months */
+	uint16_t purge_step; /* purge steps older than this in months */
+	uint16_t purge_suspend; /* purge suspend data older than this
+				 * in months */
 } acct_archive_cond_t;
 
 typedef struct {
@@ -480,6 +545,8 @@ typedef struct {
 			     insert of jobs since past */
 } acct_archive_rec_t;
 
+extern uint32_t qos_max_priority; /* max priority in all qos's */
+
 extern void destroy_acct_user_rec(void *object);
 extern void destroy_acct_account_rec(void *object);
 extern void destroy_acct_coord_rec(void *object);
@@ -488,6 +555,7 @@ extern void destroy_acct_cluster_rec(void *object);
 extern void destroy_acct_accounting_rec(void *object);
 extern void destroy_acct_association_rec(void *object);
 extern void destroy_acct_qos_rec(void *object);
+extern void destroy_acct_reservation_rec(void *object);
 extern void destroy_acct_txn_rec(void *object);
 extern void destroy_acct_wckey_rec(void *object);
 extern void destroy_acct_archive_rec(void *object);
@@ -498,6 +566,7 @@ extern void destroy_acct_cluster_cond(void *object);
 extern void destroy_acct_association_cond(void *object);
 extern void destroy_acct_job_cond(void *object);
 extern void destroy_acct_qos_cond(void *object);
+extern void destroy_acct_reservation_cond(void *object);
 extern void destroy_acct_txn_cond(void *object);
 extern void destroy_acct_wckey_cond(void *object);
 extern void destroy_acct_archive_cond(void *object);
@@ -538,6 +607,10 @@ extern int unpack_acct_association_rec(void **object, uint16_t rpc_version,
 				       Buf buffer);
 extern void pack_acct_qos_rec(void *in, uint16_t rpc_version, Buf buffer);
 extern int unpack_acct_qos_rec(void **object, uint16_t rpc_version, Buf buffer);
+extern void pack_acct_reservation_rec(void *in, uint16_t rpc_version,
+				      Buf buffer);
+extern int unpack_acct_reservation_rec(void **object, uint16_t rpc_version,
+				       Buf buffer);
 extern void pack_acct_txn_rec(void *in, uint16_t rpc_version, Buf buffer);
 extern int unpack_acct_txn_rec(void **object, uint16_t rpc_version, Buf buffer);
 extern void pack_acct_wckey_rec(void *in, uint16_t rpc_version, Buf buffer);
@@ -566,6 +639,10 @@ extern int unpack_acct_job_cond(void **object, uint16_t rpc_version,
 extern void pack_acct_qos_cond(void *in, uint16_t rpc_version, Buf buffer);
 extern int unpack_acct_qos_cond(void **object, uint16_t rpc_version,
 				Buf buffer);
+extern void pack_acct_reservation_cond(void *in, uint16_t rpc_version,
+				       Buf buffer);
+extern int unpack_acct_reservation_cond(void **object, uint16_t rpc_version,
+					Buf buffer);
 extern void pack_acct_txn_cond(void *in, uint16_t rpc_version, Buf buffer);
 extern int unpack_acct_txn_cond(void **object, uint16_t rpc_version,
 				Buf buffer);
@@ -602,11 +679,13 @@ extern List get_hierarchical_sorted_assoc_list(List assoc_list);
 extern List get_acct_hierarchical_rec_list(List assoc_list);
 
 /* IN/OUT: tree_list a list of acct_print_tree_t's */ 
-extern char *get_tree_acct_name(char *name, char *parent, char *cluster, 
-				List tree_list);
+extern char *get_tree_acct_name(char *name, char *parent, List tree_list);
 
 extern char *get_qos_complete_str(List qos_list, List num_qos_list);
 
+extern char *get_classification_str(uint16_t class);
+extern uint16_t str_2_classification(char *class);
+
 extern void log_assoc_rec(acct_association_rec_t *assoc_ptr, List qos_list);
 
 extern int slurm_acct_storage_init(char *loc); /* load the plugin */
@@ -698,6 +777,14 @@ extern int acct_storage_g_add_qos(void *db_conn, uint32_t uid,
 extern int acct_storage_g_add_wckeys(void *db_conn, uint32_t uid, 
 				     List wckey_list);
 
+/* 
+ * add reservation's in accounting system 
+ * IN:  acct_reservation_rec_t *resv reservation to be added.
+ * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ */
+extern int acct_storage_g_add_reservation(void *db_conn, 
+					  acct_reservation_rec_t *resv);
+
 /* 
  * modify existing users in the accounting system 
  * IN:  acct_user_cond_t *user_cond
@@ -759,6 +846,13 @@ extern List acct_storage_g_modify_wckeys(void *db_conn, uint32_t uid,
 					 acct_wckey_cond_t *wckey_cond,
 					 acct_wckey_rec_t *wckey);
 
+/* 
+ * modify reservation's in accounting system 
+ * IN:  acct_reservation_rec_t *resv 
+ * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ */
+extern int acct_storage_g_modify_reservation(void *db_conn, 
+					     acct_reservation_rec_t *resv);
 /* 
  * remove users from accounting system 
  * IN:  acct_user_cond_t *user_cond
@@ -817,6 +911,13 @@ extern List acct_storage_g_remove_qos(
 extern List acct_storage_g_remove_wckeys(
 	void *db_conn, uint32_t uid, acct_wckey_cond_t *wckey_cond);
 
+/* 
+ * remove reservation's in accounting system 
+ * IN:  acct_reservation_rec_t *resv 
+ * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ */
+extern int acct_storage_g_remove_reservation(void *db_conn, 
+					     acct_reservation_rec_t *resv);
 /* 
  * get info from the storage 
  * IN:  acct_user_cond_t *
@@ -847,6 +948,14 @@ extern List acct_storage_g_get_accounts(void *db_conn,  uint32_t uid,
 extern List acct_storage_g_get_clusters(
 	void *db_conn, uint32_t uid, acct_cluster_cond_t *cluster_cond);
 
+
+/* 
+ * get info from the storage 
+ * RET: List of config_key_pairs_t *
+ * note List needs to be freed when called
+ */
+extern List acct_storage_g_get_config(void *db_conn);
+
 /* 
  * get info from the storage 
  * IN:  acct_association_cond_t *
@@ -856,7 +965,6 @@ extern List acct_storage_g_get_clusters(
 extern List acct_storage_g_get_associations(
 	void *db_conn, uint32_t uid, acct_association_cond_t *assoc_cond);
 
-
 /* 
  * get info from the storage 
  * IN:  acct_qos_cond_t *
@@ -875,6 +983,15 @@ extern List acct_storage_g_get_qos(void *db_conn, uint32_t uid,
 extern List acct_storage_g_get_wckeys(void *db_conn, uint32_t uid,
 				      acct_wckey_cond_t *wckey_cond);
 
+/* 
+ * get info from the storage 
+ * IN:  acct_reservation_cond_t *
+ * RET: List of acct_reservation_rec_t *
+ * note List needs to be freed when called
+ */
+extern List acct_storage_g_get_reservations(void *db_conn, uint32_t uid,
+					    acct_reservation_cond_t *resv_cond);
+
 /* 
  * get info from the storage 
  * IN:  acct_txn_cond_t *
@@ -899,10 +1016,13 @@ extern int acct_storage_g_get_usage(
 /* 
  * roll up data in the storage 
  * IN: sent_start (option time to do a re-roll or start from this point)
+ * IN: sent_end (option time to do a re-roll or end at this point)
+ * IN: archive_data (if 0 old data is not archived in a monthly rollup)
  * RET: SLURM_SUCCESS on success SLURM_ERROR else
  */
 extern int acct_storage_g_roll_usage(void *db_conn, 
-				     time_t sent_start);
+				     time_t sent_start, time_t sent_end,
+				     uint16_t archive_data);
 /* 
  * record shares used information for backup in case slurmctld restarts 
  * IN:  account_list List of shares_used_object_t *
@@ -936,6 +1056,7 @@ extern int clusteracct_storage_g_node_up(void *db_conn,
 
 extern int clusteracct_storage_g_cluster_procs(void *db_conn, 
 					       char *cluster,
+					       char *cluster_nodes,
 					       uint32_t procs,
 					       time_t event_time);
 
diff --git a/src/common/slurm_auth.c b/src/common/slurm_auth.c
index 11d4b7f5e14acc59a30425a5ea0e7ddaa1390225..c139575fa2bbfef1b15cbd83c02e85a38189a5eb 100644
--- a/src/common/slurm_auth.c
+++ b/src/common/slurm_auth.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_auth.h b/src/common/slurm_auth.h
index 3c5bcdee4a207b84979e9df675b397c857cf6584..2e706955dadc146142ad0fc8be796e9a863400a4 100644
--- a/src/common/slurm_auth.h
+++ b/src/common/slurm_auth.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_cred.c b/src/common/slurm_cred.c
index 8163a9d127f2f44bb5fb46bd0bae2a8ae94c5921..530008d5c2d4edb9b8863866dfb8cdb7f9a996aa 100644
--- a/src/common/slurm_cred.c
+++ b/src/common/slurm_cred.c
@@ -1,15 +1,15 @@
 /*****************************************************************************\
  *  src/common/slurm_cred.c - SLURM job credential functions
- *  $Id: slurm_cred.c 17005 2009-03-24 21:57:43Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -52,12 +52,14 @@
 #  include <pthread.h>
 #endif /* WITH_PTHREADS */
 
+#include "src/common/bitstring.h"
 #include "src/common/io_hdr.h"
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
 #include "src/common/plugin.h"
 #include "src/common/plugrack.h"
+#include "src/common/select_job_res.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xassert.h"
@@ -80,9 +82,10 @@
  * 
  */
 typedef struct {
-	uint32_t jobid;		/* SLURM job id for this credential         */
-	uint32_t stepid;	/* SLURM step id for this credential        */
-	time_t   expiration;    /* Time at which cred is no longer good     */
+	uint32_t jobid;		/* SLURM job id for this credential	*/
+	uint32_t stepid;	/* SLURM step id for this credential	*/
+	time_t   ctime;		/* Time that the cred was created	*/
+	time_t   expiration;    /* Time at which cred is no longer good	*/
 } cred_state_t;
 
 /*
@@ -143,13 +146,20 @@ struct slurm_job_credential {
 	uint32_t  jobid;	/* Job ID associated with this cred	*/
 	uint32_t  stepid;	/* Job step ID for this credential	*/
 	uid_t     uid;		/* user for which this cred is valid	*/
-	uint32_t  job_mem;	/* MB of memory reserved for job	*/
-	uint32_t  task_mem;	/* MB of memory reserved per task	*/
+	uint32_t  job_mem;	/* MB of memory reserved per node OR
+				 * real memory per CPU | MEM_PER_CPU,
+				 * default=0 (no limit) */
 	time_t    ctime;	/* time of credential creation		*/
 	char     *nodes;	/* hostnames for which the cred is ok	*/
-	uint32_t  alloc_lps_cnt;/* Number of hosts in the list above	*/
-	uint32_t *alloc_lps;	/* Number of tasks on each host		*/
-
+#ifndef HAVE_BG
+	bitstr_t *core_bitmap;
+	uint16_t  core_array_size;	/* core/socket array size */
+	uint16_t *cores_per_socket;
+	uint16_t *sockets_per_node;
+	uint32_t *sock_core_rep_count;
+	uint32_t  job_nhosts;	/* count of nodes allocated to JOB */
+	char     *job_hostlist;	/* list of nodes allocated to JOB */
+#endif
 	char     *signature; 	/* credential signature			*/
 	unsigned int siglen;	/* signature length in bytes		*/
 };
@@ -171,7 +181,7 @@ typedef struct slurm_crypto_ops {
 						 unsigned int buf_size, 
 						 char *signature, 
 						 unsigned int sig_size);
-	char *(*crypto_str_error)		(void);
+	const char *(*crypto_str_error)		(int);
 } slurm_crypto_ops_t;
 
 /*
@@ -485,6 +495,8 @@ slurm_cred_ctx_destroy(slurm_cred_ctx_t ctx)
 	slurm_mutex_lock(&ctx->mutex);
 	xassert(ctx->magic == CRED_CTX_MAGIC);
 
+	if (ctx->exkey)
+		(*(g_crypto_context->ops.crypto_destroy_key))(ctx->exkey);
 	if (ctx->key)
 		(*(g_crypto_context->ops.crypto_destroy_key))(ctx->key);
 	if (ctx->job_list)
@@ -604,15 +616,32 @@ slurm_cred_create(slurm_cred_ctx_t ctx, slurm_cred_arg_t *arg)
 	cred->stepid = arg->stepid;
 	cred->uid    = arg->uid;
 	cred->job_mem = arg->job_mem;
-	cred->task_mem = arg->task_mem;
 	cred->nodes  = xstrdup(arg->hostlist);
-        cred->alloc_lps_cnt = arg->alloc_lps_cnt;
-        cred->alloc_lps  = NULL;
-        if (cred->alloc_lps_cnt > 0) {
-                cred->alloc_lps =  xmalloc(cred->alloc_lps_cnt * sizeof(uint32_t));
-                memcpy(cred->alloc_lps, arg->alloc_lps, 
-			cred->alloc_lps_cnt * sizeof(uint32_t));
-        }
+#ifndef HAVE_BG
+{
+	int i, sock_recs = 0;
+	xassert(arg->job_nhosts);
+	for (i=0; i<arg->job_nhosts; i++) {
+		sock_recs += arg->sock_core_rep_count[i];
+		if (sock_recs >= arg->job_nhosts)
+			break;
+	}
+	i++;
+	cred->core_bitmap = bit_copy(arg->core_bitmap);
+	cred->core_array_size = i;
+	cred->cores_per_socket = xmalloc(sizeof(uint16_t) * i);
+	memcpy(cred->cores_per_socket, arg->cores_per_socket,
+	       (sizeof(uint16_t) * i));
+	cred->sockets_per_node = xmalloc(sizeof(uint16_t) * i);
+	memcpy(cred->sockets_per_node, arg->sockets_per_node,
+	       (sizeof(uint16_t) * i));
+	cred->sock_core_rep_count = xmalloc(sizeof(uint32_t) * i);
+	memcpy(cred->sock_core_rep_count, arg->sock_core_rep_count,
+	       (sizeof(uint32_t) * i));
+	cred->job_nhosts = arg->job_nhosts;
+	cred->job_hostlist = xstrdup(arg->job_hostlist);
+}
+#endif
 	cred->ctime  = time(NULL);
 
 	if (_slurm_cred_sign(ctx, cred) < 0) 
@@ -651,15 +680,25 @@ slurm_cred_copy(slurm_cred_t cred)
 	rcred->stepid = cred->stepid;
 	rcred->uid    = cred->uid;
 	rcred->job_mem = cred->job_mem;
-	rcred->task_mem = cred->task_mem;
 	rcred->nodes  = xstrdup(cred->nodes);
-	rcred->alloc_lps_cnt = cred->alloc_lps_cnt;
-	rcred->alloc_lps  = NULL;
-	if (rcred->alloc_lps_cnt > 0) {
-		rcred->alloc_lps =  xmalloc(rcred->alloc_lps_cnt * sizeof(uint32_t));
-		memcpy(rcred->alloc_lps, cred->alloc_lps, 
-		rcred->alloc_lps_cnt * sizeof(uint32_t));
-	}
+#ifndef HAVE_BG
+	rcred->core_bitmap = bit_copy(cred->core_bitmap);
+	rcred->core_array_size = cred->core_array_size;
+	rcred->cores_per_socket = xmalloc(sizeof(uint16_t) * 
+					  rcred->core_array_size);
+	memcpy(rcred->cores_per_socket, cred->cores_per_socket,
+	       (sizeof(uint16_t) * rcred->core_array_size));
+	rcred->sockets_per_node = xmalloc(sizeof(uint16_t) * 
+					  rcred->core_array_size);
+	memcpy(rcred->sockets_per_node, cred->sockets_per_node,
+	       (sizeof(uint16_t) * rcred->core_array_size));
+	cred->sock_core_rep_count = xmalloc(sizeof(uint32_t) * 
+					    rcred->core_array_size);
+	memcpy(rcred->sock_core_rep_count, cred->sock_core_rep_count,
+	       (sizeof(uint32_t) * rcred->core_array_size));
+	rcred->job_nhosts = cred->job_nhosts;
+	rcred->job_hostlist = xstrdup(cred->job_hostlist);
+#endif
 	rcred->ctime  = cred->ctime;
 	rcred->siglen = cred->siglen;
 	/* Assumes signature is a string,
@@ -688,15 +727,31 @@ slurm_cred_faker(slurm_cred_arg_t *arg)
 	cred->stepid   = arg->stepid;
 	cred->uid      = arg->uid;
 	cred->job_mem  = arg->job_mem;
-	cred->task_mem = arg->task_mem;
 	cred->nodes    = xstrdup(arg->hostlist);
-	cred->alloc_lps_cnt = arg->alloc_lps_cnt;
-	cred->alloc_lps  = NULL;
-	if (cred->alloc_lps_cnt > 0) {
-		cred->alloc_lps =  xmalloc(cred->alloc_lps_cnt * sizeof(uint32_t));
-		memcpy(cred->alloc_lps, arg->alloc_lps, 
-		       cred->alloc_lps_cnt * sizeof(uint32_t));
+#ifndef HAVE_BG
+{
+	int i, sock_recs = 0;
+	for (i=0; i<arg->job_nhosts; i++) {
+		sock_recs += arg->sock_core_rep_count[i];
+		if (sock_recs >= arg->job_nhosts)
+			break;
 	}
+	i++;
+	cred->core_bitmap = bit_copy(arg->core_bitmap);
+	cred->core_array_size = i;
+	cred->cores_per_socket = xmalloc(sizeof(uint16_t) * i);
+	memcpy(cred->cores_per_socket, arg->cores_per_socket,
+	       (sizeof(uint16_t) * i));
+	cred->sockets_per_node = xmalloc(sizeof(uint16_t) * i);
+	memcpy(cred->sockets_per_node, arg->sockets_per_node,
+	       (sizeof(uint16_t) * i));
+	cred->sock_core_rep_count = xmalloc(sizeof(uint32_t) * i);
+	memcpy(cred->sock_core_rep_count, arg->sock_core_rep_count,
+	       (sizeof(uint32_t) * i));
+	cred->job_nhosts = arg->job_nhosts;
+	cred->job_hostlist = xstrdup(arg->job_hostlist);
+}
+#endif
 	cred->ctime  = time(NULL);
 	cred->siglen = SLURM_IO_KEY_SIZE;
 
@@ -724,9 +779,15 @@ slurm_cred_faker(slurm_cred_arg_t *arg)
 
 void slurm_cred_free_args(slurm_cred_arg_t *arg)
 {
+	if (arg->core_bitmap) {
+		bit_free(arg->core_bitmap);
+		arg->core_bitmap = NULL;
+	}
+	xfree(arg->cores_per_socket);
 	xfree(arg->hostlist);
-	xfree(arg->alloc_lps);
-	arg->alloc_lps_cnt = 0;
+	xfree(arg->job_hostlist);
+	xfree(arg->sock_core_rep_count);
+	xfree(arg->sockets_per_node);
 }
 
 int
@@ -743,15 +804,31 @@ slurm_cred_get_args(slurm_cred_t cred, slurm_cred_arg_t *arg)
 	arg->stepid   = cred->stepid;
 	arg->uid      = cred->uid;
 	arg->job_mem  = cred->job_mem;
-	arg->task_mem = cred->task_mem;
 	arg->hostlist = xstrdup(cred->nodes);
-	arg->alloc_lps_cnt = cred->alloc_lps_cnt;
-	if (arg->alloc_lps_cnt > 0) {
-		arg->alloc_lps = xmalloc(arg->alloc_lps_cnt * sizeof(uint32_t));
-		memcpy(arg->alloc_lps, cred->alloc_lps, 
-		       arg->alloc_lps_cnt * sizeof(uint32_t));
-	} else
-		arg->alloc_lps = NULL;
+#ifdef HAVE_BG
+	arg->core_bitmap = NULL;
+	arg->cores_per_socket = NULL;
+	arg->sockets_per_node = NULL;
+	arg->sock_core_rep_count = NULL;
+	arg->job_nhosts = 0;
+	arg->job_hostlist = NULL;
+#else
+	arg->core_bitmap = bit_copy(cred->core_bitmap);
+	arg->cores_per_socket = xmalloc(sizeof(uint16_t) * 
+					cred->core_array_size);
+	memcpy(arg->cores_per_socket, cred->cores_per_socket,
+	       (sizeof(uint16_t) * cred->core_array_size));
+	arg->sockets_per_node = xmalloc(sizeof(uint16_t) * 
+					cred->core_array_size);
+	memcpy(arg->sockets_per_node, cred->sockets_per_node,
+	       (sizeof(uint16_t) * cred->core_array_size));
+	arg->sock_core_rep_count = xmalloc(sizeof(uint32_t) * 
+					   cred->core_array_size);
+	memcpy(arg->sock_core_rep_count, cred->sock_core_rep_count,
+	       (sizeof(uint32_t) * cred->core_array_size));
+	arg->job_nhosts = cred->job_nhosts;
+	arg->job_hostlist = xstrdup(cred->job_hostlist);
+#endif
 	slurm_mutex_unlock(&cred->mutex);
 
 	return SLURM_SUCCESS;
@@ -808,16 +885,32 @@ slurm_cred_verify(slurm_cred_ctx_t ctx, slurm_cred_t cred,
 	arg->stepid   = cred->stepid;
 	arg->uid      = cred->uid;
 	arg->job_mem  = cred->job_mem;
-	arg->task_mem = cred->task_mem;
 	arg->hostlist = xstrdup(cred->nodes);
-	arg->alloc_lps_cnt = cred->alloc_lps_cnt;
-	if (arg->alloc_lps_cnt > 0) {
-		arg->alloc_lps = xmalloc(arg->alloc_lps_cnt * sizeof(uint32_t));
-		memcpy(arg->alloc_lps, cred->alloc_lps, 
-		       arg->alloc_lps_cnt * sizeof(uint32_t));
-	} else
-		arg->alloc_lps = NULL;
 
+#ifdef HAVE_BG
+	arg->core_bitmap = NULL;
+	arg->cores_per_socket = NULL;
+	arg->sockets_per_node = NULL;
+	arg->sock_core_rep_count = NULL;
+	arg->job_nhosts = 0;
+	arg->job_hostlist = NULL;
+#else
+	arg->core_bitmap = bit_copy(cred->core_bitmap);
+	arg->cores_per_socket = xmalloc(sizeof(uint16_t) * 
+					cred->core_array_size);
+	memcpy(arg->cores_per_socket, cred->cores_per_socket,
+	       (sizeof(uint16_t) * cred->core_array_size));
+	arg->sockets_per_node = xmalloc(sizeof(uint16_t) * 
+					cred->core_array_size);
+	memcpy(arg->sockets_per_node, cred->sockets_per_node,
+	       (sizeof(uint16_t) * cred->core_array_size));
+	arg->sock_core_rep_count = xmalloc(sizeof(uint32_t) * 
+					   cred->core_array_size);
+	memcpy(arg->sock_core_rep_count, cred->sock_core_rep_count,
+	       (sizeof(uint32_t) * cred->core_array_size));
+	arg->job_nhosts = cred->job_nhosts;
+	arg->job_hostlist = xstrdup(cred->job_hostlist);
+#endif
 	slurm_mutex_unlock(&cred->mutex);
 
 	return SLURM_SUCCESS;
@@ -840,8 +933,17 @@ slurm_cred_destroy(slurm_cred_t cred)
 	xassert(cred->magic == CRED_MAGIC);
 
 	slurm_mutex_lock(&cred->mutex);
+#ifndef HAVE_BG
+	if (cred->core_bitmap) {
+		bit_free(cred->core_bitmap);
+		cred->core_bitmap = NULL;
+	}
+	xfree(cred->cores_per_socket);
+	xfree(cred->job_hostlist);
+	xfree(cred->sock_core_rep_count);
+	xfree(cred->sockets_per_node);
+#endif
 	xfree(cred->nodes);
-	xfree(cred->alloc_lps);
 	xfree(cred->signature);
 	xassert(cred->magic = ~CRED_MAGIC);
 
@@ -904,7 +1006,8 @@ slurm_cred_rewind(slurm_cred_ctx_t ctx, slurm_cred_t cred)
 	xassert(ctx->magic == CRED_CTX_MAGIC);
 	xassert(ctx->type  == SLURM_CRED_VERIFIER);
 
-	rc = list_delete_all(ctx->state_list, (ListFindF) _find_cred_state, cred);
+	rc = list_delete_all(ctx->state_list, 
+			     (ListFindF) _find_cred_state, cred);
 
 	slurm_mutex_unlock(&ctx->mutex);
 
@@ -1024,9 +1127,9 @@ slurm_cred_pack(slurm_cred_t cred, Buf buffer)
 slurm_cred_t
 slurm_cred_unpack(Buf buffer)
 {
-	uint32_t     len;
-	uint32_t     tmpint;
+	uint32_t     cred_uid, len;
 	slurm_cred_t cred = NULL;
+	char        *bit_fmt = NULL;
 	char       **sigp;
 
 	xassert(buffer != NULL);
@@ -1034,29 +1137,50 @@ slurm_cred_unpack(Buf buffer)
 	cred = _slurm_cred_alloc();
 	slurm_mutex_lock(&cred->mutex);
 
+	safe_unpack32(          &cred->jobid,         buffer);
+	safe_unpack32(          &cred->stepid,        buffer);
+	safe_unpack32(          &cred_uid,            buffer);
+	cred->uid = cred_uid;
+	safe_unpack32(          &cred->job_mem,       buffer);
+	safe_unpackstr_xmalloc( &cred->nodes, &len,   buffer);
+	safe_unpack_time(       &cred->ctime,         buffer);
+#ifndef HAVE_BG
+{
+	uint32_t tot_core_cnt;
+	safe_unpack32(          &tot_core_cnt,        buffer);
+	safe_unpackstr_xmalloc( &bit_fmt,     &len,   buffer);
+	cred->core_bitmap = bit_alloc((bitoff_t) tot_core_cnt);
+	if (bit_unfmt(cred->core_bitmap, bit_fmt))
+		goto unpack_error;
+	xfree(bit_fmt);
+	safe_unpack16(          &cred->core_array_size, buffer);
+	if (cred->core_array_size) {
+                safe_unpack16_array(&cred->cores_per_socket, &len,  buffer);
+		if (len != cred->core_array_size)
+			goto unpack_error;
+                safe_unpack16_array(&cred->sockets_per_node, &len,  buffer);
+		if (len != cred->core_array_size)
+			goto unpack_error;
+                safe_unpack32_array(&cred->sock_core_rep_count, &len,  
+				    buffer);
+		if (len != cred->core_array_size)
+			goto unpack_error;
+	}
+	safe_unpack32(          &cred->job_nhosts,           buffer);
+	safe_unpackstr_xmalloc( &cred->job_hostlist, &len,   buffer);
+}
+#endif
+	/* "sigp" must be last */
 	sigp = (char **) &cred->signature;
-
-	safe_unpack32(          &cred->jobid,        buffer);
-	safe_unpack32(          &cred->stepid,       buffer);
-	safe_unpack32(          &tmpint,             buffer);
-	cred->uid = tmpint;
-	safe_unpack32(          &cred->job_mem,      buffer);
-	safe_unpack32(          &cred->task_mem,     buffer);
-	safe_unpackstr_xmalloc( &cred->nodes, &len,  buffer);
-	safe_unpack32(          &cred->alloc_lps_cnt,     buffer);
-        if (cred->alloc_lps_cnt > 0)
-                safe_unpack32_array(&cred->alloc_lps, &tmpint,  buffer);
-	safe_unpack_time(       &cred->ctime,        buffer);
-	safe_unpackmem_xmalloc( sigp,         &len,  buffer);
-
-	xassert(len > 0);
-
+	safe_unpackmem_xmalloc( sigp,                &len,   buffer);
 	cred->siglen = len;
+	xassert(len > 0);
 
 	slurm_mutex_unlock(&cred->mutex);
 	return cred;
 
     unpack_error:
+	xfree(bit_fmt);
 	slurm_mutex_unlock(&cred->mutex);
 	slurm_cred_destroy(cred);
 	return NULL;
@@ -1097,8 +1221,6 @@ slurm_cred_ctx_unpack(slurm_cred_ctx_t ctx, Buf buffer)
 void
 slurm_cred_print(slurm_cred_t cred)
 {
-        int i;
-
 	if (cred == NULL)
 		return;
 
@@ -1106,33 +1228,32 @@ slurm_cred_print(slurm_cred_t cred)
 
 	xassert(cred->magic == CRED_MAGIC);
 
-	info("Cred: Jobid    %u",  cred->jobid         );
-	info("Cred: Stepid   %u",  cred->jobid         );
-	info("Cred: UID      %lu", (u_long) cred->uid  );
-	info("Cred: job_mem  %u",  cred->job_mem       );
-	info("Cred: task_mem %u",  cred->task_mem      );
-	info("Cred: Nodes    %s",  cred->nodes         );
-	info("Cred: alloc_lps_cnt %u", cred->alloc_lps_cnt     ); 
-	info("Cred: alloc_lps: ");                            
-	for (i=0; i<cred->alloc_lps_cnt; i++)                 
-		info("alloc_lps[%d] = %u ", i, cred->alloc_lps[i]);
-	info("Cred: ctime    %s",  ctime(&cred->ctime) );
-	info("Cred: siglen   %u",  cred->siglen        );
-	slurm_mutex_unlock(&cred->mutex);
-
-}
-
-int slurm_cred_get_alloc_lps(slurm_cred_t cred, char **nodes,
-			     uint32_t *alloc_lps_cnt, uint32_t **alloc_lps)
+	info("Cred: Jobid         %u",  cred->jobid         );
+	info("Cred: Stepid        %u",  cred->jobid         );
+	info("Cred: UID           %u",  (uint32_t) cred->uid);
+	info("Cred: job_mem       %u",  cred->job_mem       );
+	info("Cred: Nodes         %s",  cred->nodes         );
+	info("Cred: ctime         %s",  ctime(&cred->ctime) );
+	info("Cred: siglen        %u",  cred->siglen        );
+#ifndef HAVE_BG
 {
-	if ((cred == NULL) || (nodes == NULL) ||
-	    (alloc_lps_cnt == NULL) || (alloc_lps == NULL))
-		return EINVAL;
+	int i;
+	char str[128];
+	info("Cred: core_bitmap   %s", 
+	     bit_fmt(str, sizeof(str), cred->core_bitmap));
+	info("Cred: sockets_per_node, cores_per_socket, rep_count");
+	for (i=0; i<cred->core_array_size; i++) {
+		info("      socks:%u cores:%u reps:%u", 
+		     cred->sockets_per_node[i],
+		     cred->cores_per_socket[i],
+		     cred->sock_core_rep_count[i]);
+	}
+	info("Cred: job_nhosts   %u",   cred->job_nhosts    );
+	info("Cred: job_hostlist %s",   cred->job_hostlist  );
+}
+#endif
+	slurm_mutex_unlock(&cred->mutex);
 
-	*nodes         = cred->nodes;
-	*alloc_lps_cnt = cred->alloc_lps_cnt;
-	*alloc_lps     = cred->alloc_lps;
-	return SLURM_SUCCESS;
 }
 
 static void 
@@ -1284,8 +1405,11 @@ _slurm_cred_sign(slurm_cred_ctx_t ctx, slurm_cred_t cred)
 			&cred->signature, &cred->siglen);
 	free_buf(buffer);
 
-	if (rc)
+	if (rc) {
+		error("Credential sign: %s", 
+		      (*(g_crypto_context->ops.crypto_str_error))(rc));
 		return SLURM_ERROR;
+	}
 	return SLURM_SUCCESS;
 }
 
@@ -1303,15 +1427,15 @@ _slurm_cred_verify_signature(slurm_cred_ctx_t ctx, slurm_cred_t cred)
 			get_buf_data(buffer), get_buf_offset(buffer),
 			cred->signature, cred->siglen);
 	if (rc && _exkey_is_valid(ctx)) {
-		rc = (*(g_crypto_context->ops.crypto_verify_sign))(ctx->key, 
+		rc = (*(g_crypto_context->ops.crypto_verify_sign))(ctx->exkey, 
 			get_buf_data(buffer), get_buf_offset(buffer),
 			cred->signature, cred->siglen);
 	}
 	free_buf(buffer);
 
 	if (rc) {
-		info("Credential signature check: %s", 
-			(*(g_crypto_context->ops.crypto_str_error))());
+		error("Credential signature check: %s", 
+		      (*(g_crypto_context->ops.crypto_str_error))(rc));
 		return SLURM_ERROR;
 	}
 	return SLURM_SUCCESS;
@@ -1321,16 +1445,33 @@ _slurm_cred_verify_signature(slurm_cred_ctx_t ctx, slurm_cred_t cred)
 static void
 _pack_cred(slurm_cred_t cred, Buf buffer)
 {
-	pack32(           cred->jobid,    buffer);
-	pack32(           cred->stepid,   buffer);
-	pack32((uint32_t) cred->uid,      buffer);
-	pack32(           cred->job_mem,  buffer);
-	pack32(           cred->task_mem, buffer);
-	packstr(          cred->nodes,    buffer);
-	pack32(           cred->alloc_lps_cnt, buffer);
-	if (cred->alloc_lps_cnt > 0)
-		pack32_array( cred->alloc_lps, cred->alloc_lps_cnt, buffer);
-	pack_time(        cred->ctime,  buffer);
+	uint32_t cred_uid = (uint32_t) cred->uid;
+
+	pack32(cred->jobid,    buffer);
+	pack32(cred->stepid,   buffer);
+	pack32(cred_uid,       buffer);
+	pack32(cred->job_mem,  buffer);
+	packstr(cred->nodes,   buffer);
+	pack_time(cred->ctime, buffer);
+#ifndef HAVE_BG
+{
+	uint32_t tot_core_cnt;
+	tot_core_cnt = bit_size(cred->core_bitmap);
+	pack32(tot_core_cnt, buffer);
+	pack_bit_fmt(cred->core_bitmap, buffer);
+	pack16(cred->core_array_size, buffer);
+	if (cred->core_array_size) {
+		pack16_array(cred->cores_per_socket, cred->core_array_size, 
+			     buffer);
+		pack16_array(cred->sockets_per_node, cred->core_array_size, 
+			     buffer);
+		pack32_array(cred->sock_core_rep_count, cred->core_array_size,
+			     buffer);
+	}
+	pack32(cred->job_nhosts,    buffer);
+	packstr(cred->job_hostlist, buffer);
+}
+#endif
 }
 
 
@@ -1345,7 +1486,9 @@ _credential_replayed(slurm_cred_ctx_t ctx, slurm_cred_t cred)
 	i = list_iterator_create(ctx->state_list);
 
 	while ((s = list_next(i))) {
-		if ((s->jobid == cred->jobid) && (s->stepid == cred->stepid))
+		if ((s->jobid  == cred->jobid)  &&
+		    (s->stepid == cred->stepid) &&
+		    (s->ctime  == cred->ctime))
 			break;
 	}
 
@@ -1459,7 +1602,8 @@ _find_job_state(slurm_cred_ctx_t ctx, uint32_t jobid)
 static int
 _find_cred_state(cred_state_t *c, slurm_cred_t cred)
 {
-	return ((c->jobid == cred->jobid) && (c->stepid == cred->stepid));
+	return ((c->jobid == cred->jobid) && (c->stepid == cred->stepid) &&
+		(c->ctime == cred->ctime));
 }
 
 static job_state_t *
@@ -1560,6 +1704,7 @@ _cred_state_create(slurm_cred_ctx_t ctx, slurm_cred_t cred)
 
 	s->jobid      = cred->jobid;
 	s->stepid     = cred->stepid;
+	s->ctime      = cred->ctime;
 	s->expiration = cred->ctime + ctx->expiry_window;
 
 	return s;
@@ -1577,6 +1722,7 @@ _cred_state_pack_one(cred_state_t *s, Buf buffer)
 {
 	pack32(s->jobid, buffer);
 	pack32(s->stepid, buffer);
+	pack_time(s->ctime, buffer);
 	pack_time(s->expiration, buffer);
 }
 
@@ -1588,6 +1734,7 @@ _cred_state_unpack_one(Buf buffer)
 
 	safe_unpack32(&s->jobid, buffer);
 	safe_unpack32(&s->stepid, buffer);
+	safe_unpack_time(&s->ctime, buffer);
 	safe_unpack_time(&s->expiration, buffer);
 	return s;
 
diff --git a/src/common/slurm_cred.h b/src/common/slurm_cred.h
index 01ce80550a0d71d80db401ae398abd962928e0db..8259b8b5219015c51adbcc6095757618b464cc24 100644
--- a/src/common/slurm_cred.h
+++ b/src/common/slurm_cred.h
@@ -1,14 +1,14 @@
 /*****************************************************************************\
  *  src/common/slurm_cred.h  - SLURM job credential operations
- *  $Id: slurm_cred.h 14499 2008-07-11 22:54:48Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -51,6 +51,7 @@
 #  include <sys/types.h>
 #endif
 
+#include "src/common/bitstring.h"
 #include "src/common/macros.h"
 #include "src/common/pack.h"
 
@@ -124,17 +125,28 @@ int  slurm_cred_ctx_unpack(slurm_cred_ctx_t ctx, Buf buffer);
 
 
 /*
- * Container for SLURM credential create and verify arguments:
+ * Container for SLURM credential create and verify arguments
+ *
+ * The core_bitmap, cores_per_socket, sockets_per_node, and 
+ * sock_core_rep_count is based upon the nodes allocated to the
+ * JOB, but the bits set in core_bitmap are those cores allocated
+ * to this STEP
  */
 typedef struct {
 	uint32_t jobid;
 	uint32_t stepid;
-	uint32_t job_mem;	/* MB of memory reserved for job */
-	uint32_t task_mem;	/* MB of memory reserved per task */
+	uint32_t job_mem;	/* MB of memory reserved per node OR
+				 * real memory per CPU | MEM_PER_CPU,
+				 * default=0 (no limit) */
 	uid_t    uid;
 	char    *hostlist;
-	uint32_t alloc_lps_cnt;
-        uint32_t *alloc_lps;
+
+	bitstr_t *core_bitmap;
+	uint16_t *cores_per_socket;
+	uint16_t *sockets_per_node;
+	uint32_t *sock_core_rep_count;
+	uint32_t  job_nhosts;	/* count of nodes allocated to JOB */
+	char     *job_hostlist;	/* list of nodes allocated to JOB */
 } slurm_cred_arg_t;
 
 /* Terminate the plugin and release all memory. */
@@ -268,17 +280,11 @@ slurm_cred_t slurm_cred_unpack(Buf buffer);
  */
 int slurm_cred_get_signature(slurm_cred_t cred, char **datap, int *len);
 
-
 /*
  * Print a slurm job credential using the info() call
  */
 void slurm_cred_print(slurm_cred_t cred);
 
-/*
- * Get count of allocated LPS (processors) by node
- */
-int slurm_cred_get_alloc_lps(slurm_cred_t cred, char **nodes, 
-			     uint32_t *alloc_lps_cnt, uint32_t **alloc_lps);
 #ifdef DISABLE_LOCALTIME
 extern char * timestr (const time_t *tp, char *buf, size_t n);
 #endif
diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c
index e04bcfbd7aa80b76f4dcedcad5d8b2131ec6cd4b..02e6071a6a6698ec639173d71c766ea0dadb9ea7 100644
--- a/src/common/slurm_errno.c
+++ b/src/common/slurm_errno.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  slurm_errno.c - error codes and functions for slurm
  ******************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jim Garlick <garlick@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -220,9 +222,28 @@ static slurm_errtab_t slurm_errtab[] = {
 	  "The node configuration changes that were made require restart "
 	  "of the slurmctld daemon to take effect"},
 	{ ESLURM_ACCOUNTING_POLICY,
-	  "Job violates accounting policy (job submit limit, the user's size and/or time limits)"},
+	  "Job violates accounting policy (job submit limit, the user's "
+	  "size and/or time limits)"},
 	{ ESLURM_INVALID_TIME_LIMIT,
 	  "Requested time limit exceeds partition limit"	},
+	{ ESLURM_RESERVATION_ACCESS,
+	  "Access denied to requested reservation"		},
+	{ ESLURM_RESERVATION_INVALID,
+	  "Requested reservation is invalid"			},
+	{ ESLURM_INVALID_TIME_VALUE,
+	  "Invalid time specified"				},
+	{ ESLURM_RESERVATION_BUSY, 
+	  "Requested reservation is in use"			},
+	{ ESLURM_RESERVATION_NOT_USABLE, 
+	  "Requested reservation not usable now"		},
+	{ ESLURM_RESERVATION_OVERLAP, 
+	  "Requested reservation overlaps with another reservation"	},
+	{ ESLURM_PORTS_BUSY,
+	  "Requires ports are in use"				},
+	{ ESLURM_PORTS_INVALID,
+	  "Requires more ports than can be reserved"		},
+	{ ESLURM_PROLOG_RUNNING,
+	  "SlurmctldProlog is still running"			},
 
 	/* slurmd error codes */
 
diff --git a/src/common/slurm_jobacct_gather.c b/src/common/slurm_jobacct_gather.c
index 834c22838161ac7f76eb0c324cd042baf82fa324..96d7ebe57694d9ff8cc1543325b1ec0cd0f96884 100644
--- a/src/common/slurm_jobacct_gather.c
+++ b/src/common/slurm_jobacct_gather.c
@@ -5,12 +5,13 @@
  *  Copyright (C) 2003-2007/ The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>, Morris Jette <jette1@llnl.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_jobacct_gather.h b/src/common/slurm_jobacct_gather.h
index 3059872e28984ae67f4eaa2ec1df42908ade257b..e7faaa499f5da0ffc8160bb095a66ba3d6a8e306 100644
--- a/src/common/slurm_jobacct_gather.h
+++ b/src/common/slurm_jobacct_gather.h
@@ -5,12 +5,13 @@
  *  Copyright (C) 2003 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.com> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_jobcomp.c b/src/common/slurm_jobcomp.c
index b9ed9d1580c0eb97eeefb00d8120a8b416bb1f89..9f6755d5544b62704fa22248c448fbe73f6f66bb 100644
--- a/src/common/slurm_jobcomp.c
+++ b/src/common/slurm_jobcomp.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2003 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>, Morris Jette <jette1@llnl.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -268,13 +269,16 @@ g_slurm_jobcomp_init( char *jobcomp_loc )
 extern int
 g_slurm_jobcomp_fini(void)
 {
-	int rc;
+	slurm_mutex_lock( &context_lock );
 
 	if ( !g_context)
-		return SLURM_SUCCESS;
+		goto done;
 
-	rc = _slurm_jobcomp_context_destroy ( g_context );
+	_slurm_jobcomp_context_destroy ( g_context );
 	g_context = NULL;
+
+  done:
+	slurm_mutex_unlock( &context_lock );
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/common/slurm_jobcomp.h b/src/common/slurm_jobcomp.h
index fdae7d9efca149f58ebff3e19d22825fc66fea0d..b56322853288c9e7f69e281bfb135fd963db741d 100644
--- a/src/common/slurm_jobcomp.h
+++ b/src/common/slurm_jobcomp.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2003 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.com> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_priority.c b/src/common/slurm_priority.c
new file mode 100644
index 0000000000000000000000000000000000000000..093718973fd99373303e06bbeed83e9c07f8ec88
--- /dev/null
+++ b/src/common/slurm_priority.c
@@ -0,0 +1,277 @@
+/*****************************************************************************\
+ *  slurm_priority.c - Define priority plugin functions
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/common/slurm_priority.h"
+#include "src/common/plugin.h"
+#include "src/common/plugrack.h"
+#include "src/common/xstring.h"
+
+typedef struct slurm_priority_ops {
+	uint32_t (*set)            (uint32_t last_prio,
+				    struct job_record *job_ptr);
+	void     (*reconfig)       ();
+	int      (*set_max_usage)  (uint32_t procs, uint32_t half_life);
+	void     (*set_assoc_usage)(acct_association_rec_t *assoc);
+	List	 (*get_priority_factors)
+				   (priority_factors_request_msg_t *req_msg);
+
+} slurm_priority_ops_t;
+
+typedef struct slurm_priority_context {
+	char	       	*priority_type;
+	plugrack_t     	plugin_list;
+	plugin_handle_t	cur_plugin;
+	int		priority_errno;
+	slurm_priority_ops_t ops;
+} slurm_priority_context_t;
+
+static slurm_priority_context_t * g_priority_context = NULL;
+static pthread_mutex_t		g_priority_context_lock = 
+	PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Local functions
+ */
+static slurm_priority_ops_t *_priority_get_ops(
+	slurm_priority_context_t *c);
+static slurm_priority_context_t *_priority_context_create(
+	const char *priority_type);
+static int _priority_context_destroy(
+	slurm_priority_context_t *c);
+
+/*
+ * Locate and load the appropriate plugin
+ */
+static slurm_priority_ops_t * _priority_get_ops(
+	slurm_priority_context_t *c)
+{
+	/*
+	 * Must be synchronized with slurm_priority_ops_t above.
+	 */
+	static const char *syms[] = {
+		"priority_p_set",
+		"priority_p_reconfig",
+		"priority_p_set_max_cluster_usage",
+		"priority_p_set_assoc_usage",
+		"priority_p_get_priority_factors_list",
+	};
+	int n_syms = sizeof( syms ) / sizeof( char * );
+
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->priority_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->priority_type);
+
+	/* Get plugin list. */
+	if ( c->plugin_list == NULL ) {
+		char *plugin_dir;
+		c->plugin_list = plugrack_create();
+		if ( c->plugin_list == NULL ) {
+			error( "cannot create plugin manager" );
+			return NULL;
+		}
+		plugrack_set_major_type( c->plugin_list, "priority" );
+		plugrack_set_paranoia( c->plugin_list,
+				       PLUGRACK_PARANOIA_NONE,
+				       0 );
+		plugin_dir = slurm_get_plugin_dir();
+		plugrack_read_dir( c->plugin_list, plugin_dir );
+		xfree(plugin_dir);
+	}
+
+	c->cur_plugin = plugrack_use_by_type( c->plugin_list,
+					      c->priority_type );
+	if ( c->cur_plugin == PLUGIN_INVALID_HANDLE ) {
+		error( "cannot find accounting_storage plugin for %s", 
+		       c->priority_type );
+		return NULL;
+	}
+
+	/* Dereference the API. */
+	if ( plugin_get_syms( c->cur_plugin,
+			      n_syms,
+			      syms,
+			      (void **) &c->ops ) < n_syms ) {
+		error( "incomplete priority plugin detected" );
+		return NULL;
+	}
+
+	return &c->ops;
+}
+
+/*
+ * Create a priority context
+ */
+static slurm_priority_context_t *_priority_context_create(
+	const char *priority_type)
+{
+	slurm_priority_context_t *c;
+
+	if ( priority_type == NULL ) {
+		debug3( "_priority_context_create: no uler type" );
+		return NULL;
+	}
+
+	c = xmalloc( sizeof( slurm_priority_context_t ) );
+	c->priority_type	= xstrdup( priority_type );
+	c->plugin_list	= NULL;
+	c->cur_plugin	= PLUGIN_INVALID_HANDLE;
+	c->priority_errno	= SLURM_SUCCESS;
+
+	return c;
+}
+
+/*
+ * Destroy a priority context
+ */
+static int _priority_context_destroy(slurm_priority_context_t *c)
+{
+	/*
+	 * Must check return code here because plugins might still
+	 * be loaded and active.
+	 */
+	if ( c->plugin_list ) {
+		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
+			return SLURM_ERROR;
+		}
+	} else {
+		plugin_unload(c->cur_plugin);
+	}
+
+	xfree( c->priority_type );
+	xfree( c );
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Initialize context for priority plugin
+ */
+extern int slurm_priority_init(void)
+{
+	int retval = SLURM_SUCCESS;
+	char *priority_type = NULL;
+	
+	slurm_mutex_lock( &g_priority_context_lock );
+
+	if ( g_priority_context )
+		goto done;
+	
+	priority_type = slurm_get_priority_type();
+	
+	g_priority_context = _priority_context_create(priority_type);
+	if ( g_priority_context == NULL ) {
+		error( "cannot create priority context for %s",
+		       priority_type );
+		retval = SLURM_ERROR;
+		goto done;
+	}
+
+	if ( _priority_get_ops( g_priority_context ) == NULL ) {
+		error( "cannot resolve priority plugin operations" );
+		_priority_context_destroy( g_priority_context );
+		g_priority_context = NULL;
+		retval = SLURM_ERROR;
+	}
+
+done:
+	slurm_mutex_unlock( &g_priority_context_lock );
+	xfree(priority_type);
+	return retval;
+}
+
+extern int slurm_priority_fini(void)
+{
+	int rc;
+
+	if (!g_priority_context)
+		return SLURM_SUCCESS;
+
+	rc = _priority_context_destroy( g_priority_context );
+	g_priority_context = NULL;
+	return rc;
+}
+
+extern uint32_t priority_g_set(uint32_t last_prio, struct job_record *job_ptr)
+{
+	if (slurm_priority_init() < 0)
+		return 0;
+
+	return (*(g_priority_context->ops.set))(last_prio, job_ptr);
+}
+
+extern void priority_g_reconfig()
+{
+	if (slurm_priority_init() < 0)
+		return;
+
+	(*(g_priority_context->ops.reconfig))();
+
+	return;
+}
+
+extern int priority_g_set_max_cluster_usage(uint32_t procs, uint32_t half_life)
+{
+	if (slurm_priority_init() < 0)
+		return SLURM_ERROR;
+
+	return (*(g_priority_context->ops.set_max_usage))(procs, half_life);
+}
+
+extern void priority_g_set_assoc_usage(acct_association_rec_t *assoc)
+{
+	if (slurm_priority_init() < 0)
+		return;
+
+       (*(g_priority_context->ops.set_assoc_usage))(assoc);
+       return;
+}
+
+extern List priority_g_get_priority_factors_list(
+	priority_factors_request_msg_t *req_msg)
+{
+	if (slurm_priority_init() < 0)
+		return NULL;
+
+	return (*(g_priority_context->ops.get_priority_factors))(req_msg);
+}
diff --git a/src/common/slurm_priority.h b/src/common/slurm_priority.h
new file mode 100644
index 0000000000000000000000000000000000000000..2cbe02d07de12895d9e7c0eac88b0ef426084028
--- /dev/null
+++ b/src/common/slurm_priority.h
@@ -0,0 +1,77 @@
+/*****************************************************************************\
+ *  slurm_priority.h - Define priority plugin functions
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _SLURM_PRIORITY_H 
+#define _SLURM_PRIORITY_H
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+#if HAVE_STDINT_H
+#  include <stdint.h>           /* for uint16_t, uint32_t definitions */
+#endif
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>         /* for uint16_t, uint32_t definitions */
+#endif
+
+#include "src/slurmctld/slurmctld.h"
+#include "src/common/slurm_accounting_storage.h"
+
+extern int slurm_priority_init(void);
+extern int slurm_priority_fini(void);
+extern uint32_t priority_g_set(uint32_t last_prio, struct job_record *job_ptr);
+extern void priority_g_reconfig();
+/*
+ * set up how much usage can happen on the cluster during a given half
+ * life.  This can only be done after we get a correct proc count for
+ * the system.
+ * IN: procs - number of proccessors on the system
+ * IN: half_life - time half_life is in seconds.
+ * RET: SLURM_SUCCESS on SUCCESS, SLURM_ERROR else.
+ */
+extern int priority_g_set_max_cluster_usage(uint32_t procs, uint32_t half_life);
+
+/* sets up the normalized usage and the effective usage of an
+ * association.
+ * IN/OUT: assoc - association to have usage set.
+ */
+extern void priority_g_set_assoc_usage(acct_association_rec_t *assoc);
+extern List priority_g_get_priority_factors_list(
+	priority_factors_request_msg_t *req_msg);
+
+#endif /*_SLURM_PRIORIY_H */
diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c
index 3bf706dba7b3f3fc80e67a3fb20f59b5c0c0c1e7..5c73501a795508b7784b803d7a56731c22d3e570 100644
--- a/src/common/slurm_protocol_api.c
+++ b/src/common/slurm_protocol_api.c
@@ -2,13 +2,14 @@
  *  slurm_protocol_api.c - high-level slurm communication functions
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -91,6 +92,10 @@ static char *_global_auth_key(void);
 static void  _remap_slurmctld_errno(void);
 static int   _unpack_msg_uid(Buf buffer);
 
+#if _DEBUG
+static void _print_data(char *data, int len);
+#endif
+
 /* define the slurmdbd_options flag */
 slurm_dbd_conf_t *slurmdbd_conf = NULL;
 
@@ -181,6 +186,23 @@ void slurm_api_clear_config(void)
 	slurm_conf_destroy();
 }
 
+/* slurm_get_complete_wait
+ * RET CompleteWait value from slurm.conf
+ */
+uint16_t slurm_get_complete_wait(void)
+{
+	uint16_t complete_wait = 0;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		complete_wait = conf->complete_wait;
+		slurm_conf_unlock();
+	}
+	return complete_wait;
+}
+
 /* update internal configuration data structure as needed.
  *	exit with lock set */
 /* static inline void _lock_update_config() */
@@ -206,6 +228,23 @@ uint16_t slurm_get_batch_start_timeout(void)
 	return batch_start_timeout;
 }
 
+/* slurm_get_resume_timeout
+ * RET ResumeTimeout value from slurm.conf
+ */
+uint16_t slurm_get_resume_timeout(void)
+{
+        uint16_t resume_timeout = 0;
+        slurm_ctl_conf_t *conf;
+
+        if(slurmdbd_conf) {
+        } else {
+                conf = slurm_conf_lock();
+                resume_timeout = conf->resume_timeout;
+                slurm_conf_unlock();
+        }
+        return resume_timeout;
+}
+
 /* slurm_get_def_mem_per_task
  * RET DefMemPerTask value from slurm.conf
  */
@@ -223,6 +262,40 @@ uint32_t slurm_get_def_mem_per_task(void)
 	return mem_per_task;
 }
 
+/* slurm_get_kill_on_bad_exit
+ * RET KillOnBadExit value from slurm.conf
+ */
+uint16_t slurm_get_kill_on_bad_exit(void)
+{
+	uint16_t kill_on_bad_exit = 0;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		kill_on_bad_exit = conf->kill_on_bad_exit;
+		slurm_conf_unlock();
+	}
+	return kill_on_bad_exit;
+}
+
+/* slurm_get_debug_flags
+ * RET DebugFlags value from slurm.conf
+ */
+uint32_t slurm_get_debug_flags(void)
+{
+	uint32_t debug_flags = 0;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		debug_flags = conf->debug_flags;
+		slurm_conf_unlock();
+	}
+	return debug_flags;
+}
+
 /* slurm_get_max_mem_per_task
  * RET MaxMemPerTask value from slurm.conf
  */
@@ -292,6 +365,24 @@ char *slurm_get_mpi_default(void)
 	return mpi_default;
 }
 
+/* slurm_get_mpi_params
+ * get mpi parameters value from slurmctld_conf object
+ * RET char *   - mpi default value from slurm.conf,  MUST be xfreed by caller
+ */
+char *slurm_get_mpi_params(void)
+{
+	char *mpi_params = NULL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		mpi_params = xstrdup(conf->mpi_params);
+		slurm_conf_unlock();
+	}
+	return mpi_params;
+}
+
 /* slurm_get_msg_timeout
  * get default message timeout value from slurmctld_conf object
  */
@@ -332,6 +423,201 @@ char *slurm_get_plugin_dir(void)
 	return plugin_dir;
 }
 
+/* slurm_get_priority_decay_hl
+ * returns the priority decay half life in seconds from slurmctld_conf object
+ * RET uint32_t - decay_hl in secs.
+ */
+uint32_t slurm_get_priority_decay_hl(void)
+{
+	uint32_t priority_hl = NO_VAL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		priority_hl = conf->priority_decay_hl;
+		slurm_conf_unlock();
+	}
+
+	return priority_hl;
+}
+
+/* slurm_get_priority_favor_small
+ * returns weither or not we are favoring small jobs from slurmctld_conf object
+ * RET bool - true if favor small, false else.
+ */
+bool slurm_get_priority_favor_small(void)
+{
+	bool factor = 0;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		factor = conf->priority_favor_small;
+		slurm_conf_unlock();
+	}
+
+	return factor;
+}
+
+
+/* slurm_get_priority_max_age
+ * returns the priority age max in seconds from slurmctld_conf object
+ * RET uint32_t - age_max in secs.
+ */
+uint32_t slurm_get_priority_max_age(void)
+{
+	uint32_t age = NO_VAL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		age = conf->priority_max_age;
+		slurm_conf_unlock();
+	}
+
+	return age;
+}
+
+/* slurm_get_priority_reset_period
+ * returns the priority usage reset period from slurmctld_conf object
+ * RET uint16_t - flag, see PRIORITY_RESET_* in slurm/slurm.h.
+ */
+uint16_t slurm_get_priority_reset_period(void)
+{
+	uint16_t reset_period = (uint16_t) 0;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		reset_period = conf->priority_reset_period;
+		slurm_conf_unlock();
+	}
+
+	return reset_period;
+}
+
+/* slurm_get_priority_type
+ * returns the priority type from slurmctld_conf object
+ * RET char *    - priority type, MUST be xfreed by caller
+ */
+char *slurm_get_priority_type(void)
+{
+	char *priority_type = NULL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		priority_type = xstrdup(conf->priority_type);
+		slurm_conf_unlock();
+	}
+
+	return priority_type;
+}
+
+/* slurm_get_priority_weight_age
+ * returns the priority weight for age from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_age(void)
+{
+	uint32_t factor = NO_VAL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		factor = conf->priority_weight_age;
+		slurm_conf_unlock();
+	}
+
+	return factor;
+}
+
+
+/* slurm_get_priority_weight_fairshare
+ * returns the priority weight for fairshare from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_fairshare(void)
+{
+	uint32_t factor = NO_VAL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		factor = conf->priority_weight_fs;
+		slurm_conf_unlock();
+	}
+
+	return factor;
+}
+
+
+/* slurm_get_priority_weight_job_size
+ * returns the priority weight for job size from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_job_size(void)
+{
+	uint32_t factor = NO_VAL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		factor = conf->priority_weight_js;
+		slurm_conf_unlock();
+	}
+
+	return factor;
+}
+
+/* slurm_get_priority_weight_partition
+ * returns the priority weight for partitions from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_partition(void)
+{
+	uint32_t factor = NO_VAL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		factor = conf->priority_weight_part;
+		slurm_conf_unlock();
+	}
+
+	return factor;
+}
+
+
+/* slurm_get_priority_weight_qos
+ * returns the priority weight for QOS from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_qos(void)
+{
+	uint32_t factor = NO_VAL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {		
+	} else {
+		conf = slurm_conf_lock();
+		factor = conf->priority_weight_qos;
+		slurm_conf_unlock();
+	}
+
+	return factor;
+}
+
+
 /* slurm_get_private_data
  * get private data from slurmctld_conf object
  */
@@ -441,6 +727,24 @@ extern char *slurm_get_crypto_type(void)
 	return crypto_type;
 }
 
+/* slurm_get_topology_plugin
+ * returns the value of topology_plugin in slurmctld_conf object
+ * RET char *    - topology type, MUST be xfreed by caller
+ */
+extern char * slurm_get_topology_plugin(void)
+{
+	char *topology_plugin = NULL;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		topology_plugin = xstrdup(conf->topology_plugin);
+		slurm_conf_unlock();
+	}
+	return topology_plugin;
+}
+
 /* slurm_get_propagate_prio_process
  * return the PropagatePrioProcess flag from slurmctld_conf object
  */
@@ -608,6 +912,45 @@ char *slurm_get_accounting_storage_user(void)
 	return storage_user;	
 }
 
+/* slurm_set_accounting_storage_user
+ * IN: char *user (name of file or database)
+ * RET 0 or error code
+ */
+int slurm_set_accounting_storage_user(char *user)
+{
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+		xfree(slurmdbd_conf->storage_user);
+		slurmdbd_conf->storage_user = xstrdup(user);
+	} else {
+		conf = slurm_conf_lock();
+		xfree(conf->accounting_storage_user);
+		conf->accounting_storage_user = xstrdup(user);
+		slurm_conf_unlock();
+	}
+	return 0;	
+}
+
+/* slurm_get_accounting_storage_backup_host
+ * returns the storage backup host from slurmctld_conf object
+ * RET char *    - storage backup host,  MUST be xfreed by caller
+ */
+char *slurm_get_accounting_storage_backup_host(void)
+{
+	char *storage_host;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+		storage_host = xstrdup(slurmdbd_conf->storage_backup_host);
+	} else {
+		conf = slurm_conf_lock();
+		storage_host = xstrdup(conf->accounting_storage_backup_host);
+		slurm_conf_unlock();
+	}
+	return storage_host;	
+}
+
 /* slurm_get_accounting_storage_host
  * returns the storage host from slurmctld_conf object
  * RET char *    - storage host,  MUST be xfreed by caller
@@ -627,6 +970,26 @@ char *slurm_get_accounting_storage_host(void)
 	return storage_host;	
 }
 
+/* slurm_set_accounting_storage_host
+ * IN: char *host (name of file or database)
+ * RET 0 or error code
+ */
+int slurm_set_accounting_storage_host(char *host)
+{
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+		xfree(slurmdbd_conf->storage_host);
+		slurmdbd_conf->storage_host = xstrdup(host);
+	} else {
+		conf = slurm_conf_lock();
+		xfree(conf->accounting_storage_host);
+		conf->accounting_storage_host = xstrdup(host);
+		slurm_conf_unlock();
+	}
+	return 0;	
+}
+
 /* slurm_get_accounting_storage_loc
  * returns the storage location from slurmctld_conf object
  * RET char *    - storage location,  MUST be xfreed by caller
@@ -646,6 +1009,26 @@ char *slurm_get_accounting_storage_loc(void)
 	return storage_loc;	
 }
 
+/* slurm_set_accounting_storage_loc
+ * IN: char *loc (name of file or database)
+ * RET 0 or error code
+ */
+int slurm_set_accounting_storage_loc(char *loc)
+{
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+		xfree(slurmdbd_conf->storage_loc);
+		slurmdbd_conf->storage_loc = xstrdup(loc);
+	} else {
+		conf = slurm_conf_lock();
+		xfree(conf->accounting_storage_loc);
+		conf->accounting_storage_loc = xstrdup(loc);
+		slurm_conf_unlock();
+	}
+	return 0;	
+}
+
 /* slurm_get_accounting_storage_enforce
  * returns what level to enforce associations at
  */
@@ -687,26 +1070,6 @@ int slurm_get_is_association_based_accounting(void)
 
 }
 
-/* slurm_set_accounting_storage_loc
- * IN: char *loc (name of file or database)
- * RET 0 or error code
- */
-int slurm_set_accounting_storage_loc(char *loc)
-{
-	slurm_ctl_conf_t *conf;
-
-	if(slurmdbd_conf) {
-		xfree(slurmdbd_conf->storage_loc);
-		slurmdbd_conf->storage_loc = xstrdup(loc);
-	} else {
-		conf = slurm_conf_lock();
-		xfree(conf->accounting_storage_loc);
-		conf->accounting_storage_loc = xstrdup(loc);
-		slurm_conf_unlock();
-	}
-	return 0;	
-}
-
 /* slurm_get_accounting_storage_pass
  * returns the storage password from slurmctld_conf object
  * RET char *    - storage password,  MUST be xfreed by caller
@@ -1013,7 +1376,7 @@ uint16_t slurm_get_slurmd_port(void)
 }
 
 /* slurm_get_slurm_user_id
- * returns slurmd uid from slurmctld_conf object
+ * returns slurm uid from slurmctld_conf object
  * RET uint32_t	- slurm user id
  */
 uint32_t slurm_get_slurm_user_id(void)
@@ -1031,6 +1394,24 @@ uint32_t slurm_get_slurm_user_id(void)
 	return slurm_uid;
 }
 
+/* slurm_get_slurmd_user_id
+ * returns slurmd uid from slurmctld_conf object
+ * RET uint32_t	- slurmd user id
+ */
+uint32_t slurm_get_slurmd_user_id(void)
+{
+	uint32_t slurmd_uid = 0;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		slurmd_uid = conf->slurmd_user_id;
+		slurm_conf_unlock();
+	}
+	return slurmd_uid;
+}
+
 /* slurm_get_root_filter
  * RET uint16_t  - Value of SchedulerRootFilter */
 extern uint16_t slurm_get_root_filter(void)
@@ -1046,6 +1427,23 @@ extern uint16_t slurm_get_root_filter(void)
 	}
 	return root_filter;
 }
+
+/* slurm_get_sched_params
+ * RET char * - Value of SchedulerParameters, MUST be xfreed by caller */
+extern char *slurm_get_sched_params(void)
+{
+	char *params = 0;
+	slurm_ctl_conf_t *conf;
+
+ 	if(slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		params = conf->sched_params;
+		slurm_conf_unlock();
+	}
+	return params;
+}
+
 /* slurm_get_sched_port
  * RET uint16_t  - Value of SchedulerPort */
 extern uint16_t slurm_get_sched_port(void)
@@ -1098,6 +1496,23 @@ char *slurm_get_select_type(void)
 	return select_type;
 }
 
+/* slurm_get_srun_io_timeout
+ * get default srun I/O task timeout value from slurmctld_conf object
+ */
+uint16_t slurm_get_srun_io_timeout(void)
+{
+	uint16_t srun_io_timeout = 0;
+	slurm_ctl_conf_t *conf;
+
+	if(slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		srun_io_timeout = conf->srun_io_timeout;
+		slurm_conf_unlock();
+	}
+	return srun_io_timeout;
+}
+
 /* slurm_get_switch_type
  * get switch type from slurmctld_conf object
  * RET char *   - switch type, MUST be xfreed by caller
@@ -1246,8 +1661,8 @@ static void _remap_slurmctld_errno(void)
  * general message management functions used by slurmctld, slurmd
 \**********************************************************************/
 
-/* 
- *  Initialize a slurm server at port "port"
+/* In the socket implementation it creates a socket, binds to it, and 
+ *	listens for connections.
  * 
  * IN  port     - port to bind the msg server to
  * RET slurm_fd - file descriptor of the connection created
@@ -1260,6 +1675,29 @@ slurm_fd slurm_init_msg_engine_port(uint16_t port)
 	return _slurm_init_msg_engine(&addr);
 }
 
+/* In the socket implementation it creates a socket, binds to it, and 
+ *	listens for connections.
+ *
+ * IN  addr_name - address to bind the msg server to (NULL means any)
+ * IN  port      - port to bind the msg server to
+ * RET slurm_fd  - file descriptor of the connection created
+ */
+slurm_fd slurm_init_msg_engine_addrname_port(char *addr_name, uint16_t port)
+{
+        slurm_addr addr;
+
+#ifdef BIND_SPECIFIC_ADDR
+	if (addr_name != NULL)
+		slurm_set_addr(&addr, port, addr_name);
+	else
+		slurm_set_addr_any(&addr, port);
+#else
+        slurm_set_addr_any(&addr, port);
+#endif
+
+	return _slurm_init_msg_engine(&addr);
+}
+
 /* 
  *  Same as above, but initialize using a slurm address "addr"
  *
@@ -1479,7 +1917,7 @@ int slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout)
 	}
 	
 #if	_DEBUG
-	_print_data (buftemp, rc);
+	_print_data (buf, buflen);
 #endif
 	buffer = create_buf(buf, buflen);
 
@@ -1490,9 +1928,13 @@ int slurm_receive_msg(slurm_fd fd, slurm_msg_t *msg, int timeout)
 	}
 	
 	if (check_header_version(&header) < 0) {
+		slurm_addr resp_addr;
+		char addr_str[32];
 		int uid = _unpack_msg_uid(buffer);
-		error("Invalid Protocol Version %u from uid=%d", 
-			header.version, uid);
+		slurm_get_peer_addr(fd, &resp_addr);
+		slurm_print_slurm_addr(&resp_addr, addr_str, sizeof(addr_str));
+		error("Invalid Protocol Version %u from uid=%d at %s", 
+			header.version, uid, addr_str);
 		free_buf(buffer);
 		rc = SLURM_PROTOCOL_VERSION_ERROR;
 		goto total_return;
@@ -1641,7 +2083,7 @@ List slurm_receive_msgs(slurm_fd fd, int steps, int timeout)
 	}
 	
 #if	_DEBUG
-	_print_data (buftemp, rc);
+	_print_data (buf, buflen);
 #endif
 	buffer = create_buf(buf, buflen);
 
@@ -1652,20 +2094,24 @@ List slurm_receive_msgs(slurm_fd fd, int steps, int timeout)
 	}
 	
 	if(check_header_version(&header) < 0) {
+		slurm_addr resp_addr;
+		char addr_str[32];
 		int uid = _unpack_msg_uid(buffer);
-		error("Invalid Protocol Version %u from uid=%d",
-			header.version, uid);
+		slurm_get_peer_addr(fd, &resp_addr);
+		slurm_print_slurm_addr(&resp_addr, addr_str, sizeof(addr_str));
+		error("Invalid Protocol Version %u from uid=%d at %s", 
+			header.version, uid, addr_str);
 		free_buf(buffer);
 		rc = SLURM_PROTOCOL_VERSION_ERROR;
 		goto total_return;
 	}
 	//info("ret_cnt = %d",header.ret_cnt);
 	if(header.ret_cnt > 0) {
-		ret_list = list_create(destroy_data_info);
-		while((ret_data_info = list_pop(header.ret_list)))
-			list_push(ret_list, ret_data_info);
+		if(header.ret_list)
+			ret_list = header.ret_list;
+		else
+			ret_list = list_create(destroy_data_info);
 		header.ret_cnt = 0;
-		list_destroy(header.ret_list);
 		header.ret_list = NULL;
 	}
 	
@@ -1824,7 +2270,7 @@ int slurm_receive_msg_and_forward(slurm_fd fd, slurm_addr *orig_addr,
 	}
 	
 #if	_DEBUG
-	_print_data (buftemp, rc);
+	_print_data (buf, buflen);
 #endif
 	buffer = create_buf(buf, buflen);
 
@@ -1835,9 +2281,13 @@ int slurm_receive_msg_and_forward(slurm_fd fd, slurm_addr *orig_addr,
 	}
 	
 	if (check_header_version(&header) < 0) {
+		slurm_addr resp_addr;
+		char addr_str[32];
 		int uid = _unpack_msg_uid(buffer);
-		error("Invalid Protocol Version %u from uid=%d", 
-			header.version, uid);
+		slurm_get_peer_addr(fd, &resp_addr);
+		slurm_print_slurm_addr(&resp_addr, addr_str, sizeof(addr_str));
+		error("Invalid Protocol Version %u from uid=%d at %s", 
+			header.version, uid, addr_str);
 		free_buf(buffer);
 		rc = SLURM_PROTOCOL_VERSION_ERROR;
 		goto total_return;
@@ -1872,6 +2322,12 @@ int slurm_receive_msg_and_forward(slurm_fd fd, slurm_addr *orig_addr,
 	if(header.forward.cnt > 0) {
 		debug("forwarding to %u", header.forward.cnt);
 		msg->forward_struct = xmalloc(sizeof(forward_struct_t));
+		slurm_mutex_init(&msg->forward_struct->forward_mutex);
+		pthread_cond_init(&msg->forward_struct->notify, NULL);
+
+		msg->forward_struct->forward_msg = 
+			xmalloc(sizeof(forward_msg_t) * header.forward.cnt);
+		
 		msg->forward_struct->buf_len = remaining_buf(buffer);
 		msg->forward_struct->buf = 
 			xmalloc(sizeof(char) * msg->forward_struct->buf_len);
@@ -2011,7 +2467,7 @@ int slurm_send_node_msg(slurm_fd fd, slurm_msg_t * msg)
 		msg->ret_list = NULL;
 	}
 	forward_wait(msg);
-	
+
 	init_header(&header, msg, msg->flags);
 	
 	/*
@@ -2698,19 +3154,21 @@ List slurm_send_recv_msgs(const char *nodelist, slurm_msg_t *msg,
 			  int timeout, bool quiet)
 {
 	List ret_list = NULL;
-	List tmp_ret_list = NULL;
-	slurm_fd fd = -1;
-	char *name = NULL;
-	char buf[8192];
+//	List tmp_ret_list = NULL;
+//	slurm_fd fd = -1;
+//	char buf[8192];
 	hostlist_t hl = NULL;
-	ret_data_info_t *ret_data_info = NULL;
-	ListIterator itr;
+//	ret_data_info_t *ret_data_info = NULL;
+//	ListIterator itr;
 
 	if(!nodelist || !strlen(nodelist)) {
 		error("slurm_send_recv_msgs: no nodelist given");
 		return NULL;
 	}
+	
 #ifdef HAVE_FRONT_END
+{
+        char *name = NULL;
 	/* only send to the front end node */
 	name = nodelist_nth_host(nodelist, 0);
 	if (!name) {
@@ -2721,89 +3179,104 @@ List slurm_send_recv_msgs(const char *nodelist, slurm_msg_t *msg,
 	}
 	hl = hostlist_create(name);
 	free(name);
+}
 #else
 /* 	info("total sending to %s",nodelist); */
 	hl = hostlist_create(nodelist);
 #endif
-	while((name = hostlist_shift(hl))) {
-		
-		if(slurm_conf_get_addr(name, &msg->address) == SLURM_ERROR) {
-			if (quiet) {
-				debug("slurm_send_recv_msgs: can't find "
-				      "address for host %s, check slurm.conf", 
-				      name);
-			} else {
-				error("slurm_send_recv_msgs: can't find "
-				      "address for host %s, check slurm.conf", 
-				      name);
-			}
-			mark_as_failed_forward(&tmp_ret_list, name, 
-					SLURM_COMMUNICATIONS_CONNECTION_ERROR);
-			free(name);
-			continue;
-		}
-		
-		if ((fd = slurm_open_msg_conn(&msg->address)) < 0) {
-			if (quiet)
-				debug("slurm_send_recv_msgs to %s: %m", name);
-			else
-				error("slurm_send_recv_msgs to %s: %m", name);
-			mark_as_failed_forward(&tmp_ret_list, name, 
-					SLURM_COMMUNICATIONS_CONNECTION_ERROR);
-			free(name);
-			continue;
-		}
 
-		hostlist_ranged_string(hl, sizeof(buf), buf);
-		forward_init(&msg->forward, NULL);
-		msg->forward.nodelist = xstrdup(buf);
-		msg->forward.timeout = timeout;
-		msg->forward.cnt = hostlist_count(hl);
-		if (msg->forward.nodelist[0]) {
-			debug3("sending to %s along with to %s", 
-			       name, msg->forward.nodelist);
-		} else
-			debug3("sending to %s", name);
-		
-		if(!(ret_list = _send_and_recv_msgs(fd, msg, timeout))) {
-			xfree(msg->forward.nodelist);
-			if (quiet) {
-				debug("slurm_send_recv_msgs"
-				      "(_send_and_recv_msgs) to %s: %m", 
-				      name);
-			} else {
-				error("slurm_send_recv_msgs"
-				      "(_send_and_recv_msgs) to %s: %m", 
-				      name);
-			}
-			mark_as_failed_forward(&tmp_ret_list, name, errno);
-			free(name);
-			continue;
-		} else {
-			itr = list_iterator_create(ret_list);
-			while((ret_data_info = list_next(itr))) 
-				if(!ret_data_info->node_name) {
-					ret_data_info->node_name =
-						xstrdup(name);
-				}
-			list_iterator_destroy(itr);
-		}
-		xfree(msg->forward.nodelist);
-		free(name);
-		break;		
+	if(!hl) {
+		error("slurm_send_recv_msgs: problem creating hostlist");
+		return NULL;
 	}
+
+	ret_list = start_msg_tree(hl, msg, timeout);
 	hostlist_destroy(hl);
 
-	if(tmp_ret_list) {
-		if(!ret_list)
-			ret_list = tmp_ret_list;
-		else {
-			while((ret_data_info = list_pop(tmp_ret_list))) 
-				list_push(ret_list, ret_data_info);
-			list_destroy(tmp_ret_list);
-		}
-	} 
 	return ret_list;
+
+	/* The below code will start from the first node in the list
+	 * to start the tree.  The start_msg_tree function starts the
+	 * tree from the calling node. */
+
+/* 	while((name = hostlist_shift(hl))) { */
+		
+/* 		if(slurm_conf_get_addr(name, &msg->address) == SLURM_ERROR) { */
+/* 			if (quiet) { */
+/* 				debug("slurm_send_recv_msgs: can't find " */
+/* 				      "address for host %s, check slurm.conf",  */
+/* 				      name); */
+/* 			} else { */
+/* 				error("slurm_send_recv_msgs: can't find " */
+/* 				      "address for host %s, check slurm.conf",  */
+/* 				      name); */
+/* 			} */
+/* 			mark_as_failed_forward(&tmp_ret_list, name,  */
+/* 					SLURM_COMMUNICATIONS_CONNECTION_ERROR); */
+/* 			free(name); */
+/* 			continue; */
+/* 		} */
+		
+/* 		if ((fd = slurm_open_msg_conn(&msg->address)) < 0) { */
+/* 			if (quiet) */
+/* 				debug("slurm_send_recv_msgs to %s: %m", name); */
+/* 			else */
+/* 				error("slurm_send_recv_msgs to %s: %m", name); */
+/* 			mark_as_failed_forward(&tmp_ret_list, name,  */
+/* 					SLURM_COMMUNICATIONS_CONNECTION_ERROR); */
+/* 			free(name); */
+/* 			continue; */
+/* 		} */
+
+/* 		hostlist_ranged_string(hl, sizeof(buf), buf); */
+/* 		forward_init(&msg->forward, NULL); */
+/* 		msg->forward.nodelist = xstrdup(buf); */
+/* 		msg->forward.timeout = timeout; */
+/* 		msg->forward.cnt = hostlist_count(hl); */
+/* 		if (msg->forward.nodelist[0]) { */
+/* 			debug3("sending to %s along with %s",  */
+/* 			       name, msg->forward.nodelist); */
+/* 		} else */
+/* 			debug3("sending to %s", name); */
+		
+/* 		if(!(ret_list = _send_and_recv_msgs(fd, msg, timeout))) { */
+/* 			xfree(msg->forward.nodelist); */
+/* 			if (quiet) { */
+/* 				debug("slurm_send_recv_msgs" */
+/* 				      "(_send_and_recv_msgs) to %s: %m",  */
+/* 				      name); */
+/* 			} else { */
+/* 				error("slurm_send_recv_msgs" */
+/* 				      "(_send_and_recv_msgs) to %s: %m",  */
+/* 				      name); */
+/* 			} */
+/* 			mark_as_failed_forward(&tmp_ret_list, name, errno); */
+/* 			free(name); */
+/* 			continue; */
+/* 		} else { */
+/* 			itr = list_iterator_create(ret_list); */
+/* 			while((ret_data_info = list_next(itr)))  */
+/* 				if(!ret_data_info->node_name) { */
+/* 					ret_data_info->node_name = */
+/* 						xstrdup(name); */
+/* 				} */
+/* 			list_iterator_destroy(itr); */
+/* 		} */
+/* 		xfree(msg->forward.nodelist); */
+/* 		free(name); */
+/* 		break;		 */
+/* 	} */
+/* 	hostlist_destroy(hl); */
+
+/* 	if(tmp_ret_list) { */
+/* 		if(!ret_list) */
+/* 			ret_list = tmp_ret_list; */
+/* 		else { */
+/* 			list_transfer(ret_list, tmp_ret_list); */
+/* 			list_destroy(tmp_ret_list); */
+/* 		} */
+/* 	}  */
+/* 	return ret_list; */
 }
 
 /*
@@ -2818,7 +3291,6 @@ List slurm_send_recv_msgs(const char *nodelist, slurm_msg_t *msg,
 List slurm_send_addr_recv_msgs(slurm_msg_t *msg, char *name, int timeout)
 {
 	List ret_list = NULL;
-	List tmp_ret_list = NULL;
 	slurm_fd fd = -1;
 	ret_data_info_t *ret_data_info = NULL;
 	ListIterator itr;
@@ -2826,15 +3298,15 @@ List slurm_send_addr_recv_msgs(slurm_msg_t *msg, char *name, int timeout)
 	if ((fd = slurm_open_msg_conn(&msg->address)) < 0) {
 		mark_as_failed_forward(&ret_list, name, 
 				       SLURM_COMMUNICATIONS_CONNECTION_ERROR);
+		errno = SLURM_COMMUNICATIONS_CONNECTION_ERROR;
 		return ret_list;
 	}
 
-	/*just to make sure */
-	forward_init(&msg->forward, NULL);
 	msg->ret_list = NULL;
 	msg->forward_struct = NULL;
 	if(!(ret_list = _send_and_recv_msgs(fd, msg, timeout))) {
-		mark_as_failed_forward(&tmp_ret_list, name, errno);
+		mark_as_failed_forward(&ret_list, name, errno);
+		errno = SLURM_COMMUNICATIONS_CONNECTION_ERROR;
 		return ret_list;
 	} else {
 		itr = list_iterator_create(ret_list);
diff --git a/src/common/slurm_protocol_api.h b/src/common/slurm_protocol_api.h
index 13365c827ae003d77873275928b9118a306a796b..34cbcd7cbacfbd7e3e93e79c0a267f82e1816630 100644
--- a/src/common/slurm_protocol_api.h
+++ b/src/common/slurm_protocol_api.h
@@ -3,13 +3,14 @@
  *	definitions
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -102,11 +103,31 @@ inline slurm_protocol_config_t *slurm_get_api_config(void);
  */
 uint16_t slurm_get_batch_start_timeout(void);
 
+/* slurm_get_resume_timeout
+ * RET ResumeTimeout value from slurm.conf
+ */
+uint16_t slurm_get_resume_timeout(void);
+
+/* slurm_get_complete_wait
+ * RET CompleteWait value from slurm.conf
+ */
+uint16_t slurm_get_complete_wait(void);
+
+/* slurm_get_debug_flags
+ * RET DebugFlags value from slurm.conf
+ */
+uint32_t slurm_get_debug_flags(void);
+
 /* slurm_get_def_mem_per_task
  * RET DefMemPerTask value from slurm.conf
  */
 uint32_t slurm_get_def_mem_per_task(void);
 
+/* slurm_get_kill_on_bad_exit
+ * RET KillOnBadExit value from slurm.conf
+ */
+uint16_t slurm_get_kill_on_bad_exit(void);
+
 /* slurm_get_max_mem_per_task
  * RET MaxMemPerTask value from slurm.conf
  */
@@ -128,6 +149,12 @@ int inline slurm_get_env_timeout(void);
  */
 char *slurm_get_mpi_default(void);
 
+/* slurm_get_mpi_params
+ * get mpi parameters value from slurmctld_conf object
+ * RET char *   - mpi default value from slurm.conf,  MUST be xfreed by caller
+ */
+char *slurm_get_mpi_params(void);
+
 /* slurm_get_msg_timeout
  * get default message timeout value from slurmctld_conf object
  */
@@ -156,24 +183,72 @@ void inline slurm_api_clear_config(void);
  */
 char *slurm_get_health_check_program(void);
 
-/* slurm_get_slurmdbd_addr
- * get slurm_dbd_addr from slurmctld_conf object from slurmctld_conf object
- * RET char *   - slurmdbd_addr, MUST be xfreed by caller
- */
-char *slurm_get_slurmdbd_addr(void);
-
-/* slurm_get_slurmdbd_port
- * get slurm_dbd_port from slurmctld_conf object from slurmctld_conf object
- * RET uint16_t   - dbd_port
- */
-uint16_t slurm_get_slurmdbd_port(void);
-
 /* slurm_get_plugin_dir
  * get plugin directory from slurmctld_conf object from slurmctld_conf object 
  * RET char *   - plugin directory, MUST be xfreed by caller
  */
 char *slurm_get_plugin_dir(void);
 
+/* slurm_get_priority_decay_hl
+ * returns the priority decay half life in seconds from slurmctld_conf object
+ * RET uint32_t - decay_hl in secs.
+ */
+uint32_t slurm_get_priority_decay_hl(void);
+
+/* slurm_get_priority_favor_small
+ * returns weither or not we are favoring small jobs from slurmctld_conf object
+ * RET bool - true if favor small, false else.
+ */
+bool slurm_get_priority_favor_small(void);
+
+/* slurm_get_priority_max_age
+ * returns the priority age max in seconds from slurmctld_conf object
+ * RET uint32_t - max_age in secs.
+ */
+uint32_t slurm_get_priority_max_age(void);
+
+/* slurm_get_priority_reset_period
+ * returns the priority usage reset period in seconds from slurmctld_conf object
+ * RET uint16_t - flag, see PRIORITY_RESET_* in slurm/slurm.h.
+ */
+uint16_t slurm_get_priority_reset_period(void);
+
+/* slurm_get_priority_type
+ * returns the priority type from slurmctld_conf object
+ * RET char *    - priority type, MUST be xfreed by caller
+ */
+char *slurm_get_priority_type(void);
+
+/* slurm_get_priority_weight_age
+ * returns the priority weight for age from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_age(void);
+
+/* slurm_get_priority_weight_fairshare
+ * returns the priority weight for fairshare from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_fairshare(void);
+
+/* slurm_get_priority_weight_job_size
+ * returns the priority weight for job size from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_job_size(void);
+
+/* slurm_get_priority_weight_partition
+ * returns the priority weight for partitions from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_partition(void);
+
+/* slurm_get_priority_weight_qos
+ * returns the priority weight for QOS from slurmctld_conf object
+ * RET uint32_t - factor weight.
+ */
+uint32_t slurm_get_priority_weight_qos(void);
+
 /* slurm_get_private_data
  * get private data from slurmctld_conf object
  * RET uint16_t   - private_data
@@ -227,11 +302,18 @@ extern uint16_t slurm_get_fast_schedule(void);
  */
 extern uint16_t slurm_get_track_wckey(void);
 
+/* slurm_get_topology_plugin
+ * returns the value of topology_plugin in slurmctld_conf object
+ * RET char *    - topology type, MUST be xfreed by caller
+ */
+extern char * slurm_get_topology_plugin(void);
+
 /* slurm_set_tree_width
  * sets the value of tree_width in slurmctld_conf object
  * RET 0 or error code
  */
 extern int slurm_set_tree_width(uint16_t tree_width);
+
 /* slurm_get_tree_width
  * returns the value of tree_width in slurmctld_conf object
  */
@@ -249,12 +331,30 @@ char *slurm_get_accounting_storage_type(void);
  */
 char *slurm_get_accounting_storage_user(void);
 
+/* slurm_set_accounting_storage_user
+ * IN: char *user (name of file or database)
+ * RET 0 or error code
+ */
+int slurm_set_accounting_storage_user(char *user);
+
+/* slurm_get_accounting_storage_backup_host
+ * returns the storage host from slurmctld_conf object
+ * RET char *    - storage backup host,  MUST be xfreed by caller
+ */
+char *slurm_get_accounting_storage_backup_host(void);
+
 /* slurm_get_accounting_storage_host
  * returns the storage host from slurmctld_conf object
  * RET char *    - storage host,  MUST be xfreed by caller
  */
 char *slurm_get_accounting_storage_host(void);
 
+/* slurm_set_accounting_storage_host
+ * IN: char *host (name of file or database)
+ * RET 0 or error code
+ */
+int slurm_set_accounting_storage_host(char *host);
+
 /* slurm_get_accounting_storage_enforce
  * returns what level to enforce associations at
  */
@@ -365,6 +465,10 @@ char *slurm_get_proctrack_type(void);
  * RET uint16_t  - Value of SchedulerRootFilter */
 extern uint16_t slurm_get_root_filter(void);
 
+/* slurm_get_sched_params
+ * RET char * - Value of SchedulerParameters, MUST be xfreed by caller */
+extern char *slurm_get_sched_params(void);
+
 /* slurm_get_sched_port
  * RET uint16_t  - Value of SchedulerPort */
 extern uint16_t slurm_get_sched_port(void);
@@ -376,11 +480,17 @@ extern uint16_t slurm_get_sched_port(void);
 uint16_t inline slurm_get_slurmd_port(void);
 
 /* slurm_get_slurm_user_id
- * returns slurmd uid from slurmctld_conf object 
+ * returns slurm uid from slurmctld_conf object 
  * RET uint32_t	- slurm user id
  */
 uint32_t slurm_get_slurm_user_id(void);
 
+/* slurm_get_slurmd_user_id
+ * returns slurmd uid from slurmctld_conf object 
+ * RET uint32_t	- slurmd user id
+ */
+uint32_t slurm_get_slurmd_user_id(void);
+
 /* slurm_get_sched_type
  * get sched type from slurmctld_conf object
  * RET char *   - sched type, MUST be xfreed by caller
@@ -393,6 +503,11 @@ char *slurm_get_sched_type(void);
  */
 char *slurm_get_select_type(void);
 
+/* slurm_get_srun_io_timeout
+ * get default srun I/O task timeout value from slurmctld_conf object
+ */
+uint16_t slurm_get_srun_io_timeout(void);
+
 /* slurm_get_switch_type
  * get switch type from slurmctld_conf object
  * RET char *   - switch type, MUST be xfreed by caller
@@ -438,11 +553,22 @@ uint16_t slurm_get_task_plugin_param(void);
 
 /* In the socket implementation it creates a socket, binds to it, and 
  *	listens for connections.
+ *
  * IN port		- port to bind the msg server to
  * RET slurm_fd		- file descriptor of the connection created
  */
 slurm_fd inline slurm_init_msg_engine_port(uint16_t port);
 
+/* In the socket implementation it creates a socket, binds to it, and 
+ *	listens for connections.
+ *
+ * IN  addr_name        - address to bind the msg server to (NULL means any)
+ * IN port		- port to bind the msg server to
+ * RET slurm_fd		- file descriptor of the connection created
+ */
+slurm_fd inline slurm_init_msg_engine_addrname_port(char *addr_name,
+						    uint16_t port);
+
 /* In the socket implementation it creates a socket, binds to it, and 
  *	listens for connections.
  * IN slurm_address 	- slurm_addr to bind the msg server to 
diff --git a/src/common/slurm_protocol_common.h b/src/common/slurm_protocol_common.h
index 11efc3867569cb5b07de9dc6dbbfb16086fb4283..593d04beadeee86db48403989c5cf04ff88c33d9 100644
--- a/src/common/slurm_protocol_common.h
+++ b/src/common/slurm_protocol_common.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -62,7 +63,12 @@
 #define SLURM_PROTOCOL_MAX_MESSAGE_BUFFER_SIZE (512*1024)
 
 /* slurm protocol header defines, based upon config.h, 16 bits */ 
+/* A new SLURM_PROTOCOL_VERSION needs to be made each time the version
+ * changes so the slurmdbd can talk all versions for update messages.
+ * In slurm_protocol_util.h init_header(), and check_header_version()
+ * need to be updated also when changes are added */
 #define SLURM_PROTOCOL_VERSION ((SLURM_API_MAJOR << 8) | SLURM_API_AGE)
+#define SLURM_1_3_PROTOCOL_VERSION ((13 << 8) | 0)
 
 /* used to set flags to empty */
 #define SLURM_PROTOCOL_NO_FLAGS 0 
diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c
index e71d6b6374f2afb974c39fa6cff78874542d2781..8e15bf5df0f5e35c80c061374f62fe42576967ed 100644
--- a/src/common/slurm_protocol_defs.c
+++ b/src/common/slurm_protocol_defs.c
@@ -4,13 +4,14 @@
  *	the slurm daemons directly, not for user client use.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -67,6 +68,9 @@ static void _slurm_free_node_info_members (node_info_t * node);
 static void _free_all_partitions (partition_info_msg_t *msg);
 static void _slurm_free_partition_info_members (partition_info_t * part);
 
+static void  _free_all_reservations(reserve_info_msg_t *msg);
+static void _slurm_free_reserve_info_members (reserve_info_t * part);
+
 static void _free_all_step_info (job_step_info_response_msg_t *msg);
 static void _slurm_free_job_step_info_members (job_step_info_t * msg);
 static void _make_lower(char *change);
@@ -111,6 +115,12 @@ extern void slurm_destroy_char(void *object)
 	xfree(tmp);
 }
 
+extern void slurm_destroy_uint32_ptr(void *object)
+{
+	uint32_t *tmp = (uint32_t *)object;
+	xfree(tmp);
+}
+
 /* returns number of objects added to list */
 extern int slurm_addto_char_list(List char_list, char *names)
 {
@@ -285,35 +295,40 @@ void slurm_free_job_desc_msg(job_desc_msg_t * msg)
 	int i;
 
 	if (msg) {
-		select_g_free_jobinfo(&msg->select_jobinfo);
+		xfree(msg->account);
 		xfree(msg->alloc_node);
+		for (i = 0; i < msg->argc; i++)
+			xfree(msg->argv[i]);
+		xfree(msg->argv);
+		xfree(msg->blrtsimage);
+		xfree(msg->ckpt_dir);
+		xfree(msg->comment);
+		xfree(msg->cpu_bind);
+		xfree(msg->dependency);
 		for (i = 0; i < msg->env_size; i++)
 			xfree(msg->environment[i]);
 		xfree(msg->environment);
+		xfree(msg->err);
+		xfree(msg->exc_nodes);
 		xfree(msg->features);
+		xfree(msg->in);
 		xfree(msg->licenses);
+		xfree(msg->linuximage);
 		xfree(msg->mail_user);
+		xfree(msg->mem_bind);
+		xfree(msg->mloaderimage);
 		xfree(msg->name);
+		xfree(msg->network);
+		xfree(msg->out);
 		xfree(msg->partition);
+		xfree(msg->ramdiskimage);
 		xfree(msg->req_nodes);
-		xfree(msg->exc_nodes);
+		xfree(msg->reservation);
+		xfree(msg->resp_host);
 		xfree(msg->script);
-		for (i = 0; i < msg->argc; i++)
-			xfree(msg->argv[i]);
-		xfree(msg->argv);
-		xfree(msg->err);
-		xfree(msg->in);
-		xfree(msg->out);
+		select_g_free_jobinfo(&msg->select_jobinfo);
+		xfree(msg->wckey);
 		xfree(msg->work_dir);
-		xfree(msg->account);
-		xfree(msg->network);
-		xfree(msg->comment);
-		xfree(msg->dependency);
-		xfree(msg->resp_host);
-		xfree(msg->blrtsimage);
-		xfree(msg->linuximage);
-		xfree(msg->mloaderimage);
-		xfree(msg->ramdiskimage);
 		xfree(msg);
 	}
 }
@@ -324,6 +339,7 @@ void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg)
 
 	if (msg) {
 		xfree(msg->nodes);
+		xfree(msg->cpu_bind);
 		xfree(msg->cpus_per_node);
 		xfree(msg->cpu_count_reps);
 		xfree(msg->script);
@@ -331,6 +347,8 @@ void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg)
 		xfree(msg->in);
 		xfree(msg->out);
 		xfree(msg->work_dir);
+		xfree(msg->ckpt_dir);
+		xfree(msg->restart_dir);
 
 		for (i = 0; i < msg->argc; i++)
 			xfree(msg->argv[i]);
@@ -360,26 +378,29 @@ void slurm_free_job_info(job_info_t * job)
 void slurm_free_job_info_members(job_info_t * job)
 {
 	if (job) {
-		xfree(job->nodes);
-		xfree(job->partition);
 		xfree(job->account);
-		xfree(job->name);
 		xfree(job->alloc_node);
-		xfree(job->node_inx);
-		xfree(job->cpus_per_node);
+		xfree(job->command);
+		xfree(job->comment);
 		xfree(job->cpu_count_reps);
-		select_g_free_jobinfo(&job->select_jobinfo);
-		xfree(job->features);
-		xfree(job->req_nodes);
-		xfree(job->req_node_inx);
+		xfree(job->cpus_per_node);
+		xfree(job->dependency);
 		xfree(job->exc_nodes);
 		xfree(job->exc_node_inx);
+		xfree(job->features);
+		xfree(job->licenses);
+		xfree(job->name);
 		xfree(job->network);
-		xfree(job->comment);
-		xfree(job->dependency);
+		xfree(job->node_inx);
+		xfree(job->nodes);
+		xfree(job->partition);
+		xfree(job->resv_name);
+		xfree(job->req_nodes);
+		xfree(job->req_node_inx);
+		select_g_free_jobinfo(&job->select_jobinfo);
+		xfree(job->state_desc);
+		xfree(job->wckey);
 		xfree(job->work_dir);
-		xfree(job->command);
-		xfree(job->licenses);
 	}
 }
 
@@ -402,8 +423,8 @@ void slurm_free_node_registration_status_msg(
 void slurm_free_update_node_msg(update_node_msg_t * msg)
 {
 	if (msg) {
-		xfree(msg->node_names);
 		xfree(msg->features);
+		xfree(msg->node_names);
 		xfree(msg->reason);
 		xfree(msg);
 	}
@@ -412,9 +433,10 @@ void slurm_free_update_node_msg(update_node_msg_t * msg)
 void slurm_free_update_part_msg(update_part_msg_t * msg)
 {
 	if (msg) {
+		xfree(msg->allow_alloc_nodes);
+		xfree(msg->allow_groups);
 		xfree(msg->name);
 		xfree(msg->nodes);
-		xfree(msg->allow_groups);
 		xfree(msg);
 	}
 }
@@ -427,6 +449,32 @@ void slurm_free_delete_part_msg(delete_part_msg_t * msg)
 	}
 }
 
+void slurm_free_resv_desc_msg(resv_desc_msg_t * msg)
+{
+	if (msg) {
+		xfree(msg->accounts);
+		xfree(msg->features);
+		xfree(msg->partition);
+		xfree(msg->name);
+		xfree(msg->node_list);
+		xfree(msg->users);
+		xfree(msg);
+	}
+}
+
+void slurm_free_resv_name_msg(reservation_name_msg_t * msg)
+{
+	if (msg) {
+		xfree(msg->name);
+		xfree(msg);
+	}
+}
+
+void slurm_free_resv_info_request_msg(resv_info_request_msg_t * msg)
+{
+	xfree(msg);
+}
+
 void slurm_free_job_step_create_request_msg(job_step_create_request_msg_t *
 					    msg)
 {
@@ -435,7 +483,7 @@ void slurm_free_job_step_create_request_msg(job_step_create_request_msg_t *
 		xfree(msg->name);
 		xfree(msg->network);
 		xfree(msg->node_list);
-		xfree(msg->ckpt_path);
+		xfree(msg->ckpt_dir);
 		xfree(msg);
 	}
 }
@@ -443,9 +491,7 @@ void slurm_free_job_step_create_request_msg(job_step_create_request_msg_t *
 void slurm_free_complete_job_allocation_msg(
 	complete_job_allocation_msg_t * msg)
 {
-	if (msg) {
-		xfree(msg);
-	}
+	xfree(msg);
 }
 
 void slurm_free_complete_batch_script_msg(complete_batch_script_msg_t * msg)
@@ -536,7 +582,8 @@ void slurm_free_launch_tasks_request_msg(launch_tasks_request_msg_t * msg)
 	xfree(msg->task_epilog);
 	xfree(msg->complete_nodelist);
 
-	xfree(msg->ckpt_path);
+	xfree(msg->ckpt_dir);
+	xfree(msg->restart_dir);
 
 	if (msg->switch_job)
 		switch_free_jobinfo(msg->switch_job);
@@ -585,7 +632,10 @@ void slurm_free_kill_tasks_msg(kill_tasks_msg_t * msg)
 
 void slurm_free_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg)
 {
-	xfree(msg);
+	if (msg) {
+		xfree(msg->image_dir);
+		xfree(msg);
+	}
 }
 
 void slurm_free_epilog_complete_msg(epilog_complete_msg_t * msg)
@@ -627,6 +677,14 @@ void inline slurm_free_srun_node_fail_msg(srun_node_fail_msg_t * msg)
 	}
 }
 
+void inline slurm_free_srun_step_missing_msg(srun_step_missing_msg_t * msg)
+{
+	if (msg) {
+		xfree(msg->nodelist);
+		xfree(msg);
+	}
+}
+
 void inline slurm_free_srun_timeout_msg(srun_timeout_msg_t * msg)
 {
 	xfree(msg);
@@ -642,7 +700,10 @@ void inline slurm_free_srun_user_msg(srun_user_msg_t * user_msg)
 
 void inline slurm_free_checkpoint_msg(checkpoint_msg_t *msg)
 {
-	xfree(msg);
+	if (msg) {
+		xfree(msg->image_dir);
+		xfree(msg);
+	}
 }
 
 void inline slurm_free_checkpoint_comp_msg(checkpoint_comp_msg_t *msg)
@@ -697,8 +758,14 @@ extern char *job_reason_string(enum job_state_reason inx)
 			return "BeginTime";
 		case WAIT_LICENSES:
 			return "Licenses";
-		case WAIT_ASSOC_LIMIT:
-			return "AssociationLimit";
+		case WAIT_ASSOC_JOB_LIMIT:
+			return "AssociationJobLimit";
+		case WAIT_ASSOC_RESOURCE_LIMIT:
+			return "AssociationResourceLimit";
+		case WAIT_ASSOC_TIME_LIMIT:
+			return "AssociationTimeLimit";
+		case WAIT_RESERVATION:
+			return "Reservation";
 		case FAIL_DOWN_PARTITION:
 			return "PartitionDown";
 		case FAIL_DOWN_NODE:
@@ -781,6 +848,34 @@ private_data_string(uint16_t private_data, char *str, int str_len)
 		strcat(str, "none");
 }
 
+extern void
+accounting_enforce_string(uint16_t enforce, char *str, int str_len)
+{
+	if (str_len > 0)
+		str[0] = '\0';
+	if (str_len < 26) {
+		error("enforce: output buffer too small");
+		return;
+	}
+
+	if (enforce & ACCOUNTING_ENFORCE_ASSOCS)
+		strcat(str, "associations"); //12 len
+	if (enforce & ACCOUNTING_ENFORCE_LIMITS) {
+		if (str[0])
+			strcat(str, ",");
+		strcat(str, "limits"); //7 len
+	}
+	if (enforce & ACCOUNTING_ENFORCE_WCKEYS) {
+		if (str[0])
+			strcat(str, ",");
+		strcat(str, "wckeys"); //7 len
+	}
+	// total len 26
+
+	if (str[0] == '\0')
+		strcat(str, "none");
+}
+
 char *job_state_string(enum job_states inx)
 {
 	if (inx & JOB_COMPLETING)
@@ -835,18 +930,62 @@ char *job_state_string_compact(enum job_states inx)
 	}
 }
 
+extern char *reservation_flags_string(uint16_t flags)
+{
+	char *flag_str = xstrdup("");
+
+	if (flags & RESERVE_FLAG_MAINT)
+		xstrcat(flag_str, "MAINT");
+	if (flags & RESERVE_FLAG_NO_MAINT) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "NO_MAINT");
+	}
+	if (flags & RESERVE_FLAG_DAILY) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "DAILY");
+	}
+	if (flags & RESERVE_FLAG_NO_DAILY) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "NO_DAILY");
+	}
+	if (flags & RESERVE_FLAG_WEEKLY) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "WEEKLY");
+	}
+	if (flags & RESERVE_FLAG_NO_WEEKLY) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "NO_WEEKLY");
+	}
+	if (flags & RESERVE_FLAG_SPEC_NODES) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "SPEC_NODES");
+	}
+	return flag_str;
+}
+
 char *node_state_string(enum node_states inx)
 {
+	int  base         = (inx & NODE_STATE_BASE);
 	bool comp_flag    = (inx & NODE_STATE_COMPLETING);
 	bool drain_flag   = (inx & NODE_STATE_DRAIN);
 	bool fail_flag    = (inx & NODE_STATE_FAIL);
+	bool maint_flag   = (inx & NODE_STATE_MAINT);
 	bool no_resp_flag = (inx & NODE_STATE_NO_RESPOND);
 	bool power_flag   = (inx & NODE_STATE_POWER_SAVE);
 
-	inx = (uint16_t) (inx & NODE_STATE_BASE);
-
+	if (maint_flag) {
+		if (no_resp_flag)
+			return "MAINT*";
+		return "MAINT";
+	}
 	if (drain_flag) {
-		if (comp_flag || (inx == NODE_STATE_ALLOCATED)) {
+		if (comp_flag || (base == NODE_STATE_ALLOCATED)) {
 			if (no_resp_flag)
 				return "DRAINING*";
 			return "DRAINING";
@@ -857,7 +996,7 @@ char *node_state_string(enum node_states inx)
 		}
 	}
 	if (fail_flag) {
-		if (comp_flag || (inx == NODE_STATE_ALLOCATED)) {
+		if (comp_flag || (base == NODE_STATE_ALLOCATED)) {
 			if (no_resp_flag)
 				return "FAILING*";
 			return "FAILING";
@@ -867,12 +1006,16 @@ char *node_state_string(enum node_states inx)
 			return "FAIL";
 		}
 	}
-	if (inx == NODE_STATE_DOWN) {
+	if (inx == NODE_STATE_POWER_SAVE)
+		return "POWER_DOWN";
+	if (inx == NODE_STATE_POWER_UP)
+		return "POWER_UP";
+	if (base == NODE_STATE_DOWN) {
 		if (no_resp_flag)
 			return "DOWN*";
 		return "DOWN";
 	}
-	if (inx == NODE_STATE_ALLOCATED) {
+	if (base == NODE_STATE_ALLOCATED) {
 		if (no_resp_flag)
 			return "ALLOCATED*";
 		if (comp_flag)
@@ -884,14 +1027,21 @@ char *node_state_string(enum node_states inx)
 			return "COMPLETING*";
 		return "COMPLETING";
 	}
-	if (inx == NODE_STATE_IDLE) {
+	if (base == NODE_STATE_IDLE) {
 		if (no_resp_flag)
 			return "IDLE*";
 		if (power_flag)
 			return "IDLE~";
 		return "IDLE";
 	}
-	if (inx == NODE_STATE_UNKNOWN) {
+	if (base == NODE_STATE_FUTURE) {
+		if (no_resp_flag)
+			return "FUTURE*";
+		if (power_flag)
+			return "FUTURE~";
+		return "FUTURE";
+	}
+	if (base == NODE_STATE_UNKNOWN) {
 		if (no_resp_flag)
 			return "UNKNOWN*";
 		return "UNKNOWN";
@@ -904,11 +1054,17 @@ char *node_state_string_compact(enum node_states inx)
 	bool comp_flag    = (inx & NODE_STATE_COMPLETING);
 	bool drain_flag   = (inx & NODE_STATE_DRAIN);
 	bool fail_flag    = (inx & NODE_STATE_FAIL);
+	bool maint_flag   = (inx & NODE_STATE_MAINT);
 	bool no_resp_flag = (inx & NODE_STATE_NO_RESPOND);
 	bool power_flag   = (inx & NODE_STATE_POWER_SAVE);
 
 	inx = (uint16_t) (inx & NODE_STATE_BASE);
 
+	if (maint_flag) {
+		if (no_resp_flag)
+			return "MAINT*";
+		return "MAINT";
+	}
 	if (drain_flag) {
 		if (comp_flag || (inx == NODE_STATE_ALLOCATED)) {
 			if (no_resp_flag)
@@ -955,6 +1111,13 @@ char *node_state_string_compact(enum node_states inx)
 			return "IDLE~";
 		return "IDLE";
 	}
+	if (inx == NODE_STATE_FUTURE) {
+		if (no_resp_flag)
+			return "FUTR*";
+		if (power_flag)
+			return "FUTR~";
+		return "FUTR";
+	}
 	if (inx == NODE_STATE_UNKNOWN) {
 		if (no_resp_flag)
 			return "UNK*";
@@ -1010,6 +1173,7 @@ void slurm_free_job_step_create_response_msg(
 		job_step_create_response_msg_t * msg)
 {
 	if (msg) {
+		xfree(msg->resv_ports);
 		slurm_step_layout_destroy(msg->step_layout);
 		slurm_cred_destroy(msg->cred);
 		if (msg->switch_job)
@@ -1056,8 +1220,10 @@ void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr)
 		xfree(config_ptr->control_machine);
 		xfree(config_ptr->crypto_type);
 		xfree(config_ptr->epilog);
+		xfree(config_ptr->epilog_slurmctld);
 		xfree(config_ptr->health_check_program);
 		xfree(config_ptr->job_acct_gather_type);
+		xfree(config_ptr->job_ckpt_dir);
 		xfree(config_ptr->job_comp_host);
 		xfree(config_ptr->job_comp_loc);
 		xfree(config_ptr->job_comp_pass);
@@ -1068,11 +1234,13 @@ void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr)
 		xfree(config_ptr->licenses);
 		xfree(config_ptr->mail_prog);
 		xfree(config_ptr->mpi_default);
+		xfree(config_ptr->mpi_params);
 		xfree(config_ptr->node_prefix);
 		xfree(config_ptr->plugindir);
 		xfree(config_ptr->plugstack);
 		xfree(config_ptr->proctrack_type);
 		xfree(config_ptr->prolog);
+		xfree(config_ptr->prolog_slurmctld);
 		xfree(config_ptr->propagate_rlimits);
 		xfree(config_ptr->propagate_rlimits_except);
 		xfree(config_ptr->resume_program);
@@ -1080,6 +1248,8 @@ void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr)
 		xfree(config_ptr->sched_params);
 		xfree(config_ptr->schedtype);
 		xfree(config_ptr->select_type);
+		if(config_ptr->select_conf_key_pairs) 
+			list_destroy((List)config_ptr->select_conf_key_pairs);
 		xfree(config_ptr->slurm_conf);
 		xfree(config_ptr->slurm_user_name);
 		xfree(config_ptr->slurmctld_pidfile);
@@ -1098,7 +1268,9 @@ void slurm_free_ctl_conf(slurm_ctl_conf_info_msg_t * config_ptr)
 		xfree(config_ptr->task_plugin);
 		xfree(config_ptr->task_prolog);
 		xfree(config_ptr->tmp_fs);
+		xfree(config_ptr->topology_plugin);
 		xfree(config_ptr->unkillable_program);
+		xfree(config_ptr->z_char);
 		xfree(config_ptr);
 	}
 }
@@ -1182,7 +1354,7 @@ static void _slurm_free_job_step_info_members (job_step_info_t * msg)
 	if (msg != NULL) {
 		xfree(msg->partition);
 		xfree(msg->nodes);
-		xfree(msg->ckpt_path);
+		xfree(msg->ckpt_dir);
 	}
 }
 
@@ -1261,13 +1433,79 @@ static void  _free_all_partitions(partition_info_msg_t *msg)
 static void _slurm_free_partition_info_members(partition_info_t * part)
 {
 	if (part) {
-		xfree(part->name);
+		xfree(part->allow_alloc_nodes);
 		xfree(part->allow_groups);
+		xfree(part->name);
 		xfree(part->nodes);
 		xfree(part->node_inx);
 	}
 }
 
+/*
+ * slurm_free_reserve_info_msg - free the reservation information 
+ *	response message
+ * IN msg - pointer to reservation information response message
+ * NOTE: buffer is loaded by slurm_load_reservation
+ */
+void slurm_free_reservation_info_msg(reserve_info_msg_t * msg)
+{
+	if (msg) {
+		if (msg->reservation_array) {
+			_free_all_reservations(msg);
+			xfree(msg->reservation_array);
+		}
+		xfree(msg);
+	}
+}
+
+static void  _free_all_reservations(reserve_info_msg_t *msg)
+{
+	int i;
+
+	if ((msg == NULL) ||
+	    (msg->reservation_array == NULL))
+		return;
+
+	for (i = 0; i < msg->record_count; i++)
+		_slurm_free_reserve_info_members(
+			&msg->reservation_array[i]);
+
+}
+
+static void _slurm_free_reserve_info_members(reserve_info_t * resv)
+{
+	if (resv) {
+		xfree(resv->accounts);
+		xfree(resv->features);
+		xfree(resv->name);
+		xfree(resv->node_inx);
+		xfree(resv->node_list);
+		xfree(resv->partition);
+		xfree(resv->users);
+	}
+}
+
+/*
+ * slurm_free_topo_info_msg - free the switch topology configuration 
+ *	information response message
+ * IN msg - pointer to switch topology configuration response message
+ * NOTE: buffer is loaded by slurm_load_topo.
+ */
+extern void slurm_free_topo_info_msg(topo_info_response_msg_t *msg)
+{
+	int i;
+
+	if (msg) {
+		for (i = 0; i < msg->record_count; i++) {
+			xfree(msg->topo_array[i].name);
+			xfree(msg->topo_array[i].nodes);
+			xfree(msg->topo_array[i].switches);
+		}
+		xfree(msg);
+	}
+}
+
+
 extern void slurm_free_file_bcast_msg(file_bcast_msg_t *msg)
 {
 	if (msg) {
@@ -1315,6 +1553,68 @@ void slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg)
 	xfree(msg);
 }
 
+void inline slurm_destroy_association_shares_object(void *object) 
+{
+	association_shares_object_t *obj_ptr = 
+		(association_shares_object_t *)object;
+	
+	if(obj_ptr) {
+		xfree(obj_ptr->cluster);
+		xfree(obj_ptr->name);
+		xfree(obj_ptr->parent);
+		xfree(obj_ptr);
+	}
+}
+
+void inline slurm_free_shares_request_msg(shares_request_msg_t *msg)
+{
+	if(msg) {
+		if(msg->acct_list)
+			list_destroy(msg->acct_list);
+		if(msg->user_list)
+			list_destroy(msg->user_list);
+		xfree(msg);
+	}
+}
+
+void inline slurm_free_shares_response_msg(shares_response_msg_t *msg)
+{
+	if(msg) {
+		if(msg->assoc_shares_list)
+			list_destroy(msg->assoc_shares_list);
+		xfree(msg);
+	}
+}
+
+void inline slurm_destroy_priority_factors_object(void *object)
+{
+	priority_factors_object_t *obj_ptr =
+		(priority_factors_object_t *)object;
+	xfree(obj_ptr);
+}
+
+void inline slurm_free_priority_factors_request_msg(
+	priority_factors_request_msg_t *msg)
+{
+	if(msg) {
+		if(msg->job_id_list)
+			list_destroy(msg->job_id_list);
+		if(msg->uid_list)
+			list_destroy(msg->uid_list);
+		xfree(msg);
+	}
+}
+
+void inline slurm_free_priority_factors_response_msg(
+	priority_factors_response_msg_t *msg)
+{
+	if(msg) {
+		if(msg->priority_factors_list)
+			list_destroy(msg->priority_factors_list);
+		xfree(msg);
+	}
+}
+
 
 void inline slurm_free_accounting_update_msg(accounting_update_msg_t *msg)
 {
@@ -1377,12 +1677,24 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_UPDATE_NODE:
 		slurm_free_update_node_msg(data);
 		break;
+	case REQUEST_CREATE_PARTITION:
 	case REQUEST_UPDATE_PARTITION:
 		slurm_free_update_part_msg(data);
 		break;
 	case REQUEST_DELETE_PARTITION:		
 		slurm_free_delete_part_msg(data);
 		break;
+	case REQUEST_CREATE_RESERVATION:
+	case REQUEST_UPDATE_RESERVATION:
+		slurm_free_resv_desc_msg(data);
+		break;
+	case REQUEST_DELETE_RESERVATION:
+	case RESPONSE_CREATE_RESERVATION:		
+		slurm_free_resv_name_msg(data);
+		break;
+	case REQUEST_RESERVATION_INFO:
+		slurm_free_resv_info_request_msg(data);
+		break;
 	case REQUEST_NODE_REGISTRATION_STATUS:
 		slurm_free_node_registration_status_msg(data);
 		break;
@@ -1403,6 +1715,18 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_JOB_INFO_SINGLE:
 		slurm_free_job_id_msg(data);
 		break;
+	case REQUEST_SHARE_INFO:
+		slurm_free_shares_request_msg(data);
+		break;
+	case RESPONSE_SHARE_INFO:
+		slurm_free_shares_response_msg(data);
+		break;
+	case REQUEST_PRIORITY_FACTORS:
+		slurm_free_priority_factors_request_msg(data);
+		break;
+	case RESPONSE_PRIORITY_FACTORS:
+		slurm_free_priority_factors_response_msg(data);
+		break;
 	case REQUEST_NODE_SELECT_INFO:
 		slurm_free_node_select_msg(data);
 		break;
@@ -1463,16 +1787,21 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_PING:		
 	case REQUEST_RECONFIGURE:
 	case REQUEST_CONTROL:
+	case REQUEST_TAKEOVER:
 	case REQUEST_SHUTDOWN_IMMEDIATE:
 	case RESPONSE_FORWARD_FAILED:
 	case REQUEST_DAEMON_STATUS:
 	case REQUEST_HEALTH_CHECK:
 	case ACCOUNTING_FIRST_REG:
+	case REQUEST_TOPO_INFO:
 		/* No body to free */
 		break;
 	case ACCOUNTING_UPDATE_MSG:
 		slurm_free_accounting_update_msg(data);
 		break;
+	case RESPONSE_TOPO_INFO:
+		slurm_free_topo_info_msg(data);
+		break;
 	default:
 		error("invalid type trying to be freed %u", type);
 		break; 
diff --git a/src/common/slurm_protocol_defs.h b/src/common/slurm_protocol_defs.h
index cfb2f9c5a1e52cd9f59255c5225614533d2d5f02..c014f1a21026ef8990b0fac891a3d8d33c5aab23 100644
--- a/src/common/slurm_protocol_defs.h
+++ b/src/common/slurm_protocol_defs.h
@@ -2,13 +2,14 @@
  *  slurm_protocol_defs.h - definitions used for RPCs
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -95,6 +96,7 @@ typedef enum {
 	REQUEST_CONTROL,
 	REQUEST_SET_DEBUG_LEVEL,
 	REQUEST_HEALTH_CHECK,
+	REQUEST_TAKEOVER,
 	
 	REQUEST_BUILD_INFO = 2001,
 	RESPONSE_BUILD_INFO,
@@ -117,11 +119,24 @@ typedef enum {
 	REQUEST_TRIGGER_CLEAR,
 	RESPONSE_TRIGGER_GET,
 	REQUEST_JOB_INFO_SINGLE,
+	REQUEST_SHARE_INFO,
+	RESPONSE_SHARE_INFO,
+	REQUEST_RESERVATION_INFO,
+	RESPONSE_RESERVATION_INFO,
+	REQUEST_PRIORITY_FACTORS,
+	RESPONSE_PRIORITY_FACTORS,
+	REQUEST_TOPO_INFO,
+	RESPONSE_TOPO_INFO,
 
 	REQUEST_UPDATE_JOB = 3001,
 	REQUEST_UPDATE_NODE,
-	REQUEST_UPDATE_PARTITION,
+	REQUEST_CREATE_PARTITION,
 	REQUEST_DELETE_PARTITION,
+	REQUEST_UPDATE_PARTITION,
+	REQUEST_CREATE_RESERVATION,
+	RESPONSE_CREATE_RESERVATION,
+	REQUEST_DELETE_RESERVATION,
+	REQUEST_UPDATE_RESERVATION,
 
 	REQUEST_RESOURCE_ALLOCATION = 4001,
 	RESPONSE_RESOURCE_ALLOCATION,
@@ -195,6 +210,7 @@ typedef enum {
 	SRUN_JOB_COMPLETE,
 	SRUN_USER_MSG,
 	SRUN_EXEC,
+	SRUN_STEP_MISSING,
 
 	PMI_KVS_PUT_REQ = 7201,
 	PMI_KVS_PUT_RESP,
@@ -294,6 +310,56 @@ typedef struct ret_data_info {
  * Slurm Protocol Data Structures
 \*****************************************************************************/
 
+typedef struct association_shares_object {
+	uint32_t assoc_id;	/* association ID */
+
+	char *cluster;          /* cluster name */
+	char *name;             /* name */
+	char *parent;           /* parent name */
+
+	double shares_norm;     /* normalized shares */
+	uint32_t shares_raw;	/* number of shares allocated */
+
+	double usage_efctv;	/* effective, normalized usage */
+	double usage_norm;	/* normalized usage */
+	uint64_t usage_raw;	/* measure of resource usage */
+
+	uint16_t user;          /* 1 if user association 0 if account
+				 * association */
+} association_shares_object_t;
+
+typedef struct shares_request_msg {
+	List acct_list;
+	List user_list;
+} shares_request_msg_t;
+
+typedef struct shares_response_msg {
+	List assoc_shares_list; /* list of association_shares_object_t *'s */
+	uint64_t tot_shares;
+} shares_response_msg_t;
+
+typedef struct priority_factors_object {
+	uint32_t job_id;
+	uint32_t user_id;
+
+	double	 priority_age;
+	double	 priority_fs;
+	double	 priority_js;
+	double	 priority_part;
+	double	 priority_qos;
+
+	uint16_t nice;
+} priority_factors_object_t;
+
+typedef struct priority_factors_request_msg {
+	List	 job_id_list;
+	List	 uid_list;
+} priority_factors_request_msg_t;
+
+typedef struct priority_factors_response_msg {
+	List	 priority_factors_list;	/* priority_factors_object_t list */
+} priority_factors_response_msg_t;
+
 typedef struct job_step_kill_msg {
 	uint32_t job_id;
 	uint32_t job_step_id;
@@ -342,6 +408,10 @@ typedef struct part_info_request_msg {
 	uint16_t show_flags;
 } part_info_request_msg_t;
 
+typedef struct resv_info_request_msg {
+        time_t last_update;
+} resv_info_request_msg_t;
+
 typedef struct complete_job_allocation {
 	uint32_t job_id;
 	uint32_t job_rc;
@@ -380,8 +450,8 @@ typedef struct kill_tasks_msg {
 typedef struct checkpoint_tasks_msg {
 	uint32_t job_id;
 	uint32_t job_step_id;
-	uint32_t signal;
 	time_t timestamp;
+	char *image_dir;
 } checkpoint_tasks_msg_t;
 
 typedef struct epilog_complete_msg {
@@ -404,37 +474,40 @@ typedef struct set_debug_level_msg {
 } set_debug_level_msg_t;
 
 typedef struct job_step_specs {
-	uint32_t job_id;	/* job ID */
-	uint32_t user_id;	/* user the job runs as */
-	uint32_t node_count;	/* count of required nodes */
-	uint32_t cpu_count;	/* count of required processors */
-	uint32_t num_tasks;	/* number of tasks required */
-	uint16_t relative;	/* first node to use of job's allocation */
-	uint16_t task_dist;	/* see enum task_dist_state */
-	uint16_t plane_size;	/* plane size when task_dist =
-				   SLURM_DIST_PLANE */
-	uint16_t port;		/* port to contact initiating srun */
 	uint16_t ckpt_interval;	/* checkpoint creation interval (minutes) */
+	char *ckpt_dir; 	/* path to store checkpoint image files */
+	uint32_t cpu_count;	/* count of required processors */
 	uint16_t exclusive;	/* 1 if CPUs not shared with other steps */
+	char *host;		/* host to contact initiating srun */
 	uint16_t immediate;	/* 1 if allocate to run or fail immediately,
 				 * 0 if to be queued awaiting resources */
-	uint16_t mem_per_task;	/* MB memory required per task, 0=no limit */
-	char *host;		/* host to contact initiating srun */
-	char *node_list;	/* list of required nodes */
-	char *network;		/* network use spec */
+	uint32_t job_id;	/* job ID */
+	uint32_t mem_per_task;	/* MB memory required per task, 0=no limit */
 	char *name;		/* name of the job step, default "" */
-	char *ckpt_path;	/* path to store checkpoint image files */
+	char *network;		/* network use spec */
+	uint32_t node_count;	/* count of required nodes */
+	uint8_t no_kill;	/* 1 if no kill on node failure */
+	char *node_list;	/* list of required nodes */
+	uint32_t num_tasks;	/* number of tasks required */
 	uint8_t overcommit;     /* flag, 1 to allow overcommit of processors,
 				   0 to disallow overcommit. default is 0 */
+	uint16_t plane_size;	/* plane size when task_dist =
+				   SLURM_DIST_PLANE */
+	uint16_t port;		/* port to contact initiating srun */
+	uint16_t relative;	/* first node to use of job's allocation */
+	uint16_t resv_port_cnt;	/* reserve ports for MPI if set */
+	uint16_t task_dist;	/* see enum task_dist_state */
+	uint32_t user_id;	/* user the job runs as */
 } job_step_create_request_msg_t;
 
 typedef struct job_step_create_response_msg {
-	uint32_t job_step_id;	/* assigned job step id */
+	uint32_t job_step_id;		/* assigned job step id */
+	char *resv_ports;		/* reserved ports */
 	slurm_step_layout_t *step_layout; /* information about how the 
-                                             step is laid out */
-	slurm_cred_t cred;      /* slurm job credential */
+                                           * step is laid out */
+	slurm_cred_t cred;    	  /* slurm job credential */
 	switch_jobinfo_t switch_job;	/* switch context, opaque 
-                                           data structure */
+                                         * data structure */
 } job_step_create_response_msg_t;
 
 typedef struct launch_tasks_request_msg {
@@ -444,8 +517,9 @@ typedef struct launch_tasks_request_msg {
 	uint32_t  nprocs;	/* number of processes in this job step   */
 	uint32_t  uid;
 	uint32_t  gid;
-	uint32_t  job_mem;	/* MB of memory reserved by job, 0 if no limit */
-	uint32_t  task_mem;	/* MB of memory reserved per task, 0 if no limit */
+	uint32_t  job_mem;	/* MB of memory reserved by job per node OR
+				 * real memory per CPU | MEM_PER_CPU,
+				 * default=0 (no limit) */
 	uint16_t  *tasks_to_launch;
 	uint32_t  envc;
 	uint32_t  argc;
@@ -455,9 +529,6 @@ typedef struct launch_tasks_request_msg {
 	uint16_t  max_cores;
 	uint16_t  max_threads;
 	uint16_t  cpus_per_task;
-	uint16_t  ntasks_per_node;
-	uint16_t  ntasks_per_socket;
-	uint16_t  ntasks_per_core;
 	char    **env;
 	char    **argv;
 	char     *cwd;
@@ -470,8 +541,6 @@ typedef struct launch_tasks_request_msg {
 
         /* Distribution at the lowest level of logical processor (lllp) */
 	uint16_t task_dist;  /* --distribution=, -m dist	*/
-	uint16_t plane_size; /* lllp distribution -> plane_size for
-			      * when -m plane=<# of lllp per plane> */      
 	uint16_t  task_flags;
 	uint32_t **global_task_ids;
 	slurm_addr orig_addr;	  /* where message really came from for io */ 
@@ -488,6 +557,7 @@ typedef struct launch_tasks_request_msg {
 	char     *efname; /* stderr filename pattern */
 	char     *ifname; /* stdin filename pattern */
 	uint8_t   buffered_stdio; /* 1 for line-buffered, 0 for unbuffered */
+	uint8_t   labelio;  /* prefix output lines with the task number */
 	uint16_t  num_io_port;
 	uint16_t  *io_port;  /* array of available client IO listen ports */
 	/**********  END  "normal" IO only options **********/
@@ -501,7 +571,8 @@ typedef struct launch_tasks_request_msg {
 	switch_jobinfo_t switch_job;	/* switch credential for the job */
 	job_options_t options;  /* Arbitrary job options */
 	char *complete_nodelist;
-	char *ckpt_path;	/* checkpoint path */
+	char *ckpt_dir;		/* checkpoint path */
+	char *restart_dir;	/* restart from checkpoint if set */
 } launch_tasks_request_msg_t;
 
 typedef struct task_user_managed_io_msg {
@@ -571,15 +642,20 @@ typedef struct batch_job_launch_msg {
 	uint32_t uid;
 	uint32_t gid;
 	uint32_t nprocs;	/* number of tasks in this job         */
-	uint16_t num_cpu_groups;/* elements in below cpu arrays */
-	uint32_t *cpus_per_node;/* cpus per node */
+	uint32_t num_cpu_groups;/* elements in below cpu arrays */
+	uint16_t cpu_bind_type;	/* Internal for slurmd/task_affinity   */
+	char     *cpu_bind;	/* Internal for slurmd/task_affinity   */
+	uint16_t *cpus_per_node;/* cpus per node */
 	uint32_t *cpu_count_reps;/* how many nodes have same cpu count */
+	uint16_t cpus_per_task;	/* number of CPUs requested per task */
 	char *nodes;		/* list of nodes allocated to job_step */
 	char *script;		/* the actual job script, default NONE */
 	char *err;		/* pathname of stderr */
 	char *in;		/* pathname of stdin */
 	char *out;		/* pathname of stdout */
 	char *work_dir;		/* full pathname of working directory */
+	char *ckpt_dir;		/* location to store checkpoint image */
+	char *restart_dir;	/* retart execution from image in this dir */
 	uint32_t argc;
 	char **argv;
 	uint32_t envc;		/* element count in environment */
@@ -589,8 +665,9 @@ typedef struct batch_job_launch_msg {
 	slurm_cred_t cred;
 	uint8_t open_mode;	/* stdout/err append or truncate */
 	uint8_t overcommit;	/* if resources being over subscribed */
-	uint16_t acctg_freq;	/* accounting polling interval */
-	uint32_t job_mem;	/* memory limit for job */
+	uint16_t acctg_freq;	/* accounting polling interval	*/
+	uint32_t job_mem;	/* memory limit for job		*/
+	uint16_t restart_cnt;	/* batch job restart count	*/
 } batch_job_launch_msg_t;
 
 typedef struct job_id_request_msg {
@@ -614,6 +691,8 @@ typedef struct checkpoint_msg {
 	uint16_t data;		/* operation specific data */
 	uint32_t job_id;	/* slurm job_id */
 	uint32_t step_id;	/* slurm step_id */
+	char *image_dir;	/* locate to store the context images. 
+				 * NULL for default */
 } checkpoint_msg_t;
 
 typedef struct checkpoint_comp_msg {
@@ -716,7 +795,7 @@ typedef struct slurm_node_registration_status_msg {
 	uint32_t tmp_disk;
 	uint32_t job_count;	/* number of associate job_id's */
 	uint32_t *job_id;	/* IDs of running job (if any) */
-	uint16_t *step_id;	/* IDs of running job steps (if any) */
+	uint32_t *step_id;	/* IDs of running job steps (if any) */
 	uint32_t status;	/* node status code, same as return codes */
 	uint16_t startup;	/* slurmd just restarted */
 	switch_node_info_t switch_nodeinfo;	/* set only if startup != 0 */
@@ -754,6 +833,7 @@ extern void slurm_msg_t_init (slurm_msg_t *msg);
 extern void slurm_msg_t_copy(slurm_msg_t *dest, slurm_msg_t *src);
 
 extern void slurm_destroy_char(void *object);
+extern void slurm_destroy_uint32_ptr(void *object);
 extern int slurm_addto_char_list(List char_list, char *names);
 extern int slurm_sort_char_list_asc(char *name_a, char *name_b);
 extern int slurm_sort_char_list_desc(char *name_a, char *name_b);
@@ -768,7 +848,16 @@ void inline slurm_free_job_step_info_request_msg(
 		job_step_info_request_msg_t *msg);
 void inline slurm_free_node_info_request_msg(node_info_request_msg_t *msg);
 void inline slurm_free_part_info_request_msg(part_info_request_msg_t *msg);
+void inline slurm_free_resv_info_request_msg(resv_info_request_msg_t *msg);
 void inline slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg);
+void inline slurm_destroy_association_shares_object(void *object);
+void inline slurm_free_shares_request_msg(shares_request_msg_t *msg);
+void inline slurm_free_shares_response_msg(shares_response_msg_t *msg);
+void inline slurm_destroy_priority_factors_object(void *object);
+void inline slurm_free_priority_factors_request_msg(
+	priority_factors_request_msg_t *msg);
+void inline slurm_free_priority_factors_response_msg(
+	priority_factors_response_msg_t *msg);
 
 #define	slurm_free_timelimit_msg(msg) \
 	slurm_free_kill_job_msg(msg)
@@ -795,6 +884,9 @@ void inline slurm_free_job_launch_msg(batch_job_launch_msg_t * msg);
 void inline slurm_free_update_node_msg(update_node_msg_t * msg);
 void inline slurm_free_update_part_msg(update_part_msg_t * msg);
 void inline slurm_free_delete_part_msg(delete_part_msg_t * msg);
+void inline slurm_free_resv_desc_msg(resv_desc_msg_t * msg);
+void inline slurm_free_resv_name_msg(reservation_name_msg_t * msg);
+void inline slurm_free_resv_info_request_msg(resv_info_request_msg_t * msg);
 void inline
 slurm_free_job_step_create_request_msg(job_step_create_request_msg_t * msg);
 void inline
@@ -824,6 +916,7 @@ void inline slurm_free_srun_job_complete_msg(srun_job_complete_msg_t * msg);
 void inline slurm_free_srun_exec_msg(srun_exec_msg_t *msg);
 void inline slurm_free_srun_ping_msg(srun_ping_msg_t * msg);
 void inline slurm_free_srun_node_fail_msg(srun_node_fail_msg_t * msg);
+void inline slurm_free_srun_step_missing_msg(srun_step_missing_msg_t * msg);
 void inline slurm_free_srun_timeout_msg(srun_timeout_msg_t * msg);
 void inline slurm_free_srun_user_msg(srun_user_msg_t * msg);
 void inline slurm_free_checkpoint_msg(checkpoint_msg_t *msg);
@@ -864,6 +957,11 @@ extern char *job_state_string_compact(enum job_states inx);
 extern char *node_state_string(enum node_states inx);
 extern char *node_state_string_compact(enum node_states inx);
 extern void  private_data_string(uint16_t private_data, char *str, int str_len);
+extern void  accounting_enforce_string(uint16_t enforce,
+				       char *str, int str_len);
+
+/* user needs to xfree after */
+extern char *reservation_flags_string(uint16_t flags);
 
 #define safe_read(fd, buf, size) do {					\
 		int remaining = size;					\
diff --git a/src/common/slurm_protocol_interface.h b/src/common/slurm_protocol_interface.h
index 036f89db87a4dd6b43de782a10b8b48b2fcf1f52..22280876f54f7f44a354f0e4641f0763ba997081 100644
--- a/src/common/slurm_protocol_interface.h
+++ b/src/common/slurm_protocol_interface.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Chris Dunlap <cdunlap@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_protocol_mongo_common.h b/src/common/slurm_protocol_mongo_common.h
index 60e5ef42cfc5b00a99ae028d08392b86fc17c255..b94a891a8e0c11bf0b7c67fedca5b59d8d483f4f 100644
--- a/src/common/slurm_protocol_mongo_common.h
+++ b/src/common/slurm_protocol_mongo_common.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov> 
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index bf08d0c8f2c9cf5678792c10b9493ca901b9c49d..cb240584c1e58ac0f6d640dda29fe5518587804c 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -2,13 +2,14 @@
  *  slurm_protocol_pack.c - functions to pack and unpack structures for RPCs
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -69,6 +70,25 @@
 #define _pack_node_select_info_msg(msg,buf)	_pack_buffer_msg(msg,buf)
 #define _pack_node_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
 #define _pack_partition_info_msg(msg,buf)	_pack_buffer_msg(msg,buf)
+#define _pack_reserve_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
+
+static void _pack_assoc_shares_object(void *in, Buf buffer);
+static int _unpack_assoc_shares_object(void **object, Buf buffer);
+static void _pack_shares_request_msg(shares_request_msg_t * msg, Buf buffer);
+static int _unpack_shares_request_msg(shares_request_msg_t ** msg, Buf buffer);
+static void _pack_shares_response_msg(shares_response_msg_t * msg, Buf buffer);
+static int _unpack_shares_response_msg(shares_response_msg_t ** msg,
+				       Buf buffer);
+static void _pack_priority_factors_object(void *in, Buf buffer);
+static int _unpack_priority_factors_object(void **object, Buf buffer);
+static void _pack_priority_factors_request_msg(
+	priority_factors_request_msg_t * msg, Buf buffer);
+static int _unpack_priority_factors_request_msg(
+	priority_factors_request_msg_t ** msg, Buf buffer);
+static void _pack_priority_factors_response_msg(
+	priority_factors_response_msg_t * msg, Buf buffer);
+static int _unpack_priority_factors_response_msg(
+	priority_factors_response_msg_t ** msg, Buf buffer);
 
 static void _pack_update_node_msg(update_node_msg_t * msg, Buf buffer);
 static int _unpack_update_node_msg(update_node_msg_t ** msg, Buf buffer);
@@ -143,15 +163,24 @@ static int _unpack_job_step_create_response_msg(
 
 static void _pack_part_info_request_msg(part_info_request_msg_t * msg, 
 					Buf buffer);
-
 static int _unpack_part_info_request_msg(part_info_request_msg_t ** 
 					 msg, Buf buffer);
 
+static void _pack_resv_info_request_msg(resv_info_request_msg_t * msg, 
+					Buf buffer);
+static int _unpack_resv_info_request_msg(resv_info_request_msg_t ** 
+					 msg, Buf buffer);
+
 static int _unpack_partition_info_msg(partition_info_msg_t ** msg,
 				      Buf buffer);
 static int _unpack_partition_info_members(partition_info_t * part,
 					  Buf buffer);
 
+static int _unpack_reserve_info_msg(reserve_info_msg_t ** msg,
+				    Buf buffer);
+static int _unpack_reserve_info_members(reserve_info_t * resv,
+					Buf buffer);
+
 static void _pack_launch_tasks_request_msg(launch_tasks_request_msg_t *
 					   msg, Buf buffer);
 static int _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
@@ -286,6 +315,11 @@ static void _pack_srun_node_fail_msg(srun_node_fail_msg_t * msg, Buf buffer);
 static int  _unpack_srun_node_fail_msg(srun_node_fail_msg_t ** msg_ptr, 
 				       Buf buffer);
 
+static void _pack_srun_step_missing_msg(srun_step_missing_msg_t * msg, 
+					Buf buffer);
+static int  _unpack_srun_step_missing_msg(srun_step_missing_msg_t ** msg_ptr, 
+					  Buf buffer);
+
 static void _pack_srun_timeout_msg(srun_timeout_msg_t * msg, Buf buffer);
 static int  _unpack_srun_timeout_msg(srun_timeout_msg_t ** msg_ptr, 
 					Buf buffer);
@@ -350,6 +384,14 @@ static void _pack_accounting_update_msg(accounting_update_msg_t *msg,
 static int _unpack_accounting_update_msg(accounting_update_msg_t **msg,
 					 Buf buffer);
 
+static void _pack_update_resv_msg(resv_desc_msg_t * msg, Buf buffer);
+static int  _unpack_update_resv_msg(resv_desc_msg_t ** msg, Buf buffer);
+static void _pack_resv_name_msg(reservation_name_msg_t * msg, Buf buffer);
+static int  _unpack_resv_name_msg(reservation_name_msg_t ** msg, Buf buffer);
+
+static void _pack_topo_info_msg(topo_info_response_msg_t *msg, Buf buffer);
+static int  _unpack_topo_info_msg(topo_info_response_msg_t **msg,
+				 Buf buffer);
 /* pack_header
  * packs a slurm protocol header that proceeds every slurm message
  * IN header - the header structure to pack
@@ -443,6 +485,10 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 		_pack_part_info_request_msg((part_info_request_msg_t *)
 					    msg->data, buffer);
 		break;
+	case REQUEST_RESERVATION_INFO:
+		_pack_resv_info_request_msg((resv_info_request_msg_t *)
+					    msg->data, buffer);
+		break;
 	case REQUEST_BUILD_INFO:
 	case REQUEST_ACCTING_INFO:
 		_pack_last_update_msg((last_update_msg_t *)
@@ -484,9 +530,11 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 	case REQUEST_SHUTDOWN_IMMEDIATE:
 	case REQUEST_PING:
 	case REQUEST_CONTROL:
+	case REQUEST_TAKEOVER:
 	case REQUEST_DAEMON_STATUS:
 	case REQUEST_HEALTH_CHECK:
 	case ACCOUNTING_FIRST_REG:
+	case REQUEST_TOPO_INFO:
 		/* Message contains no body/information */
 		break;
 	case REQUEST_SHUTDOWN:
@@ -515,6 +563,7 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 		_pack_update_node_msg((update_node_msg_t *) msg->data,
 				      buffer);
 		break;
+	case REQUEST_CREATE_PARTITION:
 	case REQUEST_UPDATE_PARTITION:
 		_pack_update_partition_msg((update_part_msg_t *) msg->
 					   data, buffer);
@@ -523,6 +572,19 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 		_pack_delete_partition_msg((delete_part_msg_t *) msg->
 					   data, buffer);
 		break;
+	case REQUEST_CREATE_RESERVATION:
+	case REQUEST_UPDATE_RESERVATION:
+		_pack_update_resv_msg((resv_desc_msg_t *) msg->
+				      data, buffer);
+		break;
+	case RESPONSE_RESERVATION_INFO:
+		_pack_reserve_info_msg((slurm_msg_t *) msg, buffer);
+		break;
+	case REQUEST_DELETE_RESERVATION:
+	case RESPONSE_CREATE_RESERVATION:
+		_pack_resv_name_msg((reservation_name_msg_t *) msg->
+				     data, buffer);
+		break;
 	case REQUEST_REATTACH_TASKS:
 		_pack_reattach_tasks_request_msg(
 			(reattach_tasks_request_msg_t *) msg->data, buffer);
@@ -665,6 +727,10 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 		_pack_srun_node_fail_msg((srun_node_fail_msg_t *)msg->data, 
 					 buffer);
 		break;
+	case SRUN_STEP_MISSING:
+		_pack_srun_step_missing_msg((srun_step_missing_msg_t *)
+					    msg->data, buffer);
+		break;
 	case SRUN_TIMEOUT:
 		_pack_srun_timeout_msg((srun_timeout_msg_t *)msg->data, buffer);
 		break;
@@ -697,6 +763,24 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 		_pack_job_ready_msg((job_id_msg_t *)msg->data, buffer);
 		break;
 
+	case REQUEST_SHARE_INFO:
+		_pack_shares_request_msg((shares_request_msg_t *)msg->data,
+					 buffer);
+		break;
+	case RESPONSE_SHARE_INFO:
+		_pack_shares_response_msg((shares_response_msg_t *)msg->data,
+					  buffer);
+		break;
+	case REQUEST_PRIORITY_FACTORS:
+		_pack_priority_factors_request_msg(
+			(priority_factors_request_msg_t*)msg->data,
+			buffer);
+		break;
+	case RESPONSE_PRIORITY_FACTORS:
+		_pack_priority_factors_response_msg(
+			(priority_factors_response_msg_t*)msg->data,
+			buffer);
+		break;
 	case REQUEST_NODE_SELECT_INFO:
 		_pack_node_select_info_req_msg(
 			(node_info_select_request_msg_t *) msg->data, buffer);
@@ -739,6 +823,10 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 			(accounting_update_msg_t *)msg->data,
 			buffer);
 		break;
+	case RESPONSE_TOPO_INFO:
+		_pack_topo_info_msg(
+			(topo_info_response_msg_t *)msg->data, buffer);
+		break;
 	default:
 		debug("No pack method for msg type %u", msg->msg_type);
 		return EINVAL;
@@ -770,6 +858,10 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 		rc = _unpack_part_info_request_msg((part_info_request_msg_t **)
 						   & (msg->data), buffer);
 		break;
+	case REQUEST_RESERVATION_INFO:
+		rc = _unpack_resv_info_request_msg((resv_info_request_msg_t **)
+						   & (msg->data), buffer);
+		break;
 	case REQUEST_BUILD_INFO:
 	case REQUEST_ACCTING_INFO:
 		rc = _unpack_last_update_msg((last_update_msg_t **) &
@@ -815,9 +907,11 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 	case REQUEST_SHUTDOWN_IMMEDIATE:
 	case REQUEST_PING:
 	case REQUEST_CONTROL:
+	case REQUEST_TAKEOVER:
 	case REQUEST_DAEMON_STATUS:
 	case REQUEST_HEALTH_CHECK:
 	case ACCOUNTING_FIRST_REG:
+	case REQUEST_TOPO_INFO:
 		/* Message contains no body/information */
 		break;
 	case REQUEST_SHUTDOWN:
@@ -847,6 +941,7 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 		rc = _unpack_update_node_msg((update_node_msg_t **) &
 					     (msg->data), buffer);
 		break;
+	case REQUEST_CREATE_PARTITION:
 	case REQUEST_UPDATE_PARTITION:
 		rc = _unpack_update_partition_msg((update_part_msg_t **) &
 						  (msg->data), buffer);
@@ -855,6 +950,20 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 		rc = _unpack_delete_partition_msg((delete_part_msg_t **) &
 						  (msg->data), buffer);
 		break;
+	case REQUEST_CREATE_RESERVATION:
+	case REQUEST_UPDATE_RESERVATION:
+		rc = _unpack_update_resv_msg((resv_desc_msg_t **)
+					     &(msg->data), buffer);
+		break;
+	case REQUEST_DELETE_RESERVATION:
+	case RESPONSE_CREATE_RESERVATION:
+		rc = _unpack_resv_name_msg((reservation_name_msg_t **)
+					     &(msg->data), buffer);
+		break;
+	case RESPONSE_RESERVATION_INFO:
+		rc = _unpack_reserve_info_msg((reserve_info_msg_t **)
+					     &(msg->data), buffer);
+		break;
 	case REQUEST_LAUNCH_TASKS:
 		rc = _unpack_launch_tasks_request_msg(
 			(launch_tasks_request_msg_t **)
@@ -1013,6 +1122,10 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 		rc = _unpack_srun_node_fail_msg((srun_node_fail_msg_t **)
 						& msg->data, buffer);
 		break;
+	case SRUN_STEP_MISSING:
+		rc = _unpack_srun_step_missing_msg((srun_step_missing_msg_t **)
+						    & msg->data, buffer);
+		break;
 	case SRUN_TIMEOUT:
 		rc = _unpack_srun_timeout_msg((srun_timeout_msg_t **)
 					      & msg->data, buffer);
@@ -1049,6 +1162,26 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 		rc = _unpack_job_ready_msg((job_id_msg_t **)
 					   & msg->data, buffer);
 		break;
+	case REQUEST_SHARE_INFO:
+		rc = _unpack_shares_request_msg(
+			(shares_request_msg_t **)&msg->data,
+			buffer);
+		break;
+	case RESPONSE_SHARE_INFO:
+		rc = _unpack_shares_response_msg(
+			(shares_response_msg_t **)&msg->data,
+			buffer);
+		break;
+	case REQUEST_PRIORITY_FACTORS:
+		_unpack_priority_factors_request_msg(
+			(priority_factors_request_msg_t**)&msg->data,
+			buffer);
+		break;
+	case RESPONSE_PRIORITY_FACTORS:
+		_unpack_priority_factors_response_msg(
+			(priority_factors_response_msg_t**)&msg->data,
+			buffer);
+		break;
 	case REQUEST_NODE_SELECT_INFO:
 		rc = _unpack_node_select_info_req_msg(
 			(node_info_select_request_msg_t **) &msg->data,
@@ -1098,6 +1231,10 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 			(accounting_update_msg_t **)&msg->data,
 			buffer);
 		break;
+	case RESPONSE_TOPO_INFO:
+		rc = _unpack_topo_info_msg(
+			(topo_info_response_msg_t **)&msg->data, buffer);
+		break;
 	default:
 		debug("No unpack method for msg type %u", msg->msg_type);
 		return EINVAL;
@@ -1109,6 +1246,395 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 	return rc;
 }
 
+static void _pack_assoc_shares_object(void *in, Buf buffer)
+{
+	association_shares_object_t *object = (association_shares_object_t *)in;
+
+	if(!object) {
+		pack32(0, buffer);
+
+		packnull(buffer);
+		packnull(buffer);
+		packnull(buffer);
+
+		packdouble(0, buffer);
+		pack32(0, buffer);
+
+		packdouble(0, buffer);
+		packdouble(0, buffer);
+		pack64(0, buffer);
+
+		pack16(0, buffer);
+		
+		return;
+	}
+
+	pack32(object->assoc_id, buffer);
+
+	packstr(object->cluster, buffer);
+	packstr(object->name, buffer);
+	packstr(object->parent, buffer);
+
+	packdouble(object->shares_norm, buffer);
+	pack32(object->shares_raw, buffer);
+
+	packdouble(object->usage_efctv, buffer);
+	packdouble(object->usage_norm, buffer);
+	pack64(object->usage_raw, buffer);
+
+	pack16(object->user, buffer);
+}
+
+static int _unpack_assoc_shares_object(void **object, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	association_shares_object_t *object_ptr =
+		xmalloc(sizeof(association_shares_object_t));
+
+	*object = (void *) object_ptr;
+	safe_unpack32(&object_ptr->assoc_id, buffer);
+	
+	safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->parent, &uint32_tmp, buffer);
+
+	safe_unpackdouble(&object_ptr->shares_norm, buffer);
+	safe_unpack32(&object_ptr->shares_raw, buffer);
+
+	safe_unpackdouble(&object_ptr->usage_efctv, buffer);
+	safe_unpackdouble(&object_ptr->usage_norm, buffer);
+	safe_unpack64(&object_ptr->usage_raw, buffer);
+
+	safe_unpack16(&object_ptr->user, buffer);
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	destroy_update_shares_rec(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+static void _pack_shares_request_msg(shares_request_msg_t * msg, Buf buffer)
+{
+	uint32_t count = NO_VAL;
+	char *tmp_info = NULL;
+	ListIterator itr = NULL;
+
+	xassert(msg != NULL);
+
+	if(msg->acct_list) 
+		count = list_count(msg->acct_list);
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(msg->acct_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = NO_VAL;
+
+
+	if(msg->user_list) 
+		count = list_count(msg->user_list);
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(msg->user_list);
+		while((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = NO_VAL;
+}
+
+static int _unpack_shares_request_msg(shares_request_msg_t ** msg, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	uint32_t count = NO_VAL;
+	int i;
+	char *tmp_info = NULL;
+	shares_request_msg_t *object_ptr = NULL;
+
+	xassert(msg != NULL);
+
+	object_ptr = xmalloc(sizeof(shares_request_msg_t));
+	*msg = object_ptr;
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->acct_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info,
+					       &uint32_tmp, buffer);
+			list_append(object_ptr->acct_list, tmp_info);
+		}
+	}
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->user_list = list_create(slurm_destroy_char);
+		for(i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info,
+					       &uint32_tmp, buffer);
+			list_append(object_ptr->user_list, tmp_info);
+		}
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_shares_request_msg(object_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+static void _pack_shares_response_msg(shares_response_msg_t * msg, Buf buffer)
+{
+	ListIterator itr = NULL;
+	association_shares_object_t *share = NULL;
+	uint32_t count = NO_VAL;
+
+	xassert(msg != NULL);
+	if(msg->assoc_shares_list) 
+		count = list_count(msg->assoc_shares_list);
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(msg->assoc_shares_list);
+		while((share = list_next(itr))) 
+			_pack_assoc_shares_object(share, buffer);
+		list_iterator_destroy(itr);
+	}
+	count = NO_VAL;
+	pack64(msg->tot_shares, buffer);
+
+}
+
+static int _unpack_shares_response_msg(shares_response_msg_t ** msg,
+				       Buf buffer)
+{
+	uint32_t count = NO_VAL;
+	int i = 0;
+	void *tmp_info = NULL;
+	shares_response_msg_t *object_ptr = NULL;
+	xassert(msg != NULL);
+
+	object_ptr = xmalloc(sizeof(shares_response_msg_t));
+	*msg = object_ptr;
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->assoc_shares_list = 
+			list_create(slurm_destroy_association_shares_object);
+		for(i=0; i<count; i++) {
+			if(_unpack_assoc_shares_object(&tmp_info, buffer) 
+			   != SLURM_SUCCESS)
+				goto unpack_error;
+			list_append(object_ptr->assoc_shares_list, tmp_info);
+		}
+	}
+
+	safe_unpack64(&object_ptr->tot_shares, buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_shares_response_msg(object_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+
+}
+
+static void _pack_priority_factors_object(void *in, Buf buffer)
+{
+	priority_factors_object_t *object = (priority_factors_object_t *)in;
+
+	if(!object) {
+		pack32(0, buffer);
+		pack32(0, buffer);
+
+		packdouble(0, buffer);
+		packdouble(0, buffer);
+		packdouble(0, buffer);
+		packdouble(0, buffer);
+		packdouble(0, buffer);
+
+		pack16(0, buffer);
+
+		return;
+	}
+
+	pack32(object->job_id, buffer);
+	pack32(object->user_id, buffer);
+
+	packdouble(object->priority_age, buffer);
+	packdouble(object->priority_fs, buffer);
+	packdouble(object->priority_js, buffer);
+	packdouble(object->priority_part, buffer);
+	packdouble(object->priority_qos, buffer);
+
+	pack16(object->nice, buffer);
+}
+
+static int _unpack_priority_factors_object(void **object, Buf buffer)
+{
+	priority_factors_object_t *object_ptr =
+		xmalloc(sizeof(priority_factors_object_t));
+
+	*object = (void *) object_ptr;
+	safe_unpack32(&object_ptr->job_id, buffer);
+	safe_unpack32(&object_ptr->user_id, buffer);
+
+	safe_unpackdouble(&object_ptr->priority_age, buffer);
+	safe_unpackdouble(&object_ptr->priority_fs, buffer);
+	safe_unpackdouble(&object_ptr->priority_js, buffer);
+	safe_unpackdouble(&object_ptr->priority_part, buffer);
+	safe_unpackdouble(&object_ptr->priority_qos, buffer);
+
+	safe_unpack16(&object_ptr->nice, buffer);
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	xfree(object);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+static void 
+_pack_priority_factors_request_msg(priority_factors_request_msg_t * msg,
+				   Buf buffer)
+{
+	uint32_t count = NO_VAL;
+	uint32_t* tmp = NULL;
+	ListIterator itr = NULL;
+
+	xassert(msg != NULL);
+
+	if(msg->job_id_list)
+		count = list_count(msg->job_id_list);
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(msg->job_id_list);
+		while((tmp = list_next(itr))) {
+			pack32(*tmp, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+
+	count = NO_VAL;
+	if(msg->uid_list)
+		count = list_count(msg->uid_list);
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(msg->uid_list);
+		while((tmp = list_next(itr))) {
+			pack32(*tmp, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+
+}
+
+static int
+_unpack_priority_factors_request_msg(priority_factors_request_msg_t ** msg,
+				     Buf buffer)
+{
+	uint32_t* uint32_tmp;
+	uint32_t count = NO_VAL;
+	int i;
+	priority_factors_request_msg_t *object_ptr = NULL;
+
+	xassert(msg != NULL);
+
+	object_ptr = xmalloc(sizeof(priority_factors_request_msg_t));
+	*msg = object_ptr;
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->job_id_list = list_create(slurm_destroy_uint32_ptr);
+		for(i=0; i<count; i++) {
+			uint32_tmp = xmalloc(sizeof(uint32_t));
+			safe_unpack32(uint32_tmp, buffer);
+			list_append(object_ptr->job_id_list, uint32_tmp);
+		}
+	}
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->uid_list = list_create(slurm_destroy_uint32_ptr);
+		for(i=0; i<count; i++) {
+			uint32_tmp = xmalloc(sizeof(uint32_t));
+			safe_unpack32(uint32_tmp, buffer);
+			list_append(object_ptr->uid_list, uint32_tmp);
+		}
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_priority_factors_request_msg(object_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+static void
+_pack_priority_factors_response_msg(priority_factors_response_msg_t * msg,
+						Buf buffer)
+{
+	ListIterator itr = NULL;
+	priority_factors_object_t *factors = NULL;
+	uint32_t count = NO_VAL;
+
+	xassert(msg != NULL);
+	if(msg->priority_factors_list)
+		count = list_count(msg->priority_factors_list);
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		itr = list_iterator_create(msg->priority_factors_list);
+		while((factors = list_next(itr)))
+			_pack_priority_factors_object(factors, buffer);
+		list_iterator_destroy(itr);
+	}
+}
+
+static void _priority_factors_resp_list_del(void *x)
+{
+	xfree(x);
+}
+
+static int
+_unpack_priority_factors_response_msg(priority_factors_response_msg_t ** msg,
+						  Buf buffer)
+{
+	uint32_t count = NO_VAL;
+	int i = 0;
+	void *tmp_info = NULL;
+	priority_factors_response_msg_t *object_ptr = NULL;
+	xassert(msg != NULL);
+
+	object_ptr = xmalloc(sizeof(priority_factors_response_msg_t));
+	*msg = object_ptr;
+
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		object_ptr->priority_factors_list =
+			list_create(_priority_factors_resp_list_del);
+		for(i=0; i<count; i++) {
+			if(_unpack_priority_factors_object(&tmp_info, buffer)
+			   != SLURM_SUCCESS)
+				goto unpack_error;
+			list_append(object_ptr->priority_factors_list,
+				    tmp_info);
+		}
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_priority_factors_response_msg(object_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+
+}
+
 static void
 _pack_update_node_msg(update_node_msg_t * msg, Buf buffer)
 {
@@ -1118,6 +1644,7 @@ _pack_update_node_msg(update_node_msg_t * msg, Buf buffer)
 	pack16(msg->node_state, buffer);
 	packstr(msg->features, buffer);
 	packstr(msg->reason, buffer);
+	pack32(msg->weight, buffer);
 }
 
 static int
@@ -1135,13 +1662,11 @@ _unpack_update_node_msg(update_node_msg_t ** msg, Buf buffer)
 	safe_unpack16(&tmp_ptr->node_state, buffer);
 	safe_unpackstr_xmalloc(&tmp_ptr->features, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&tmp_ptr->reason, &uint32_tmp, buffer);
+	safe_unpack32(&tmp_ptr->weight, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr->node_names);
-	xfree(tmp_ptr->features);
-	xfree(tmp_ptr->reason);
-	xfree(tmp_ptr);
+	slurm_free_update_node_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1169,7 +1694,7 @@ _pack_node_registration_status_msg(slurm_node_registration_status_msg_t *
 		pack32((uint32_t)msg->job_id[i], buffer);
 	}
 	for (i = 0; i < msg->job_count; i++) {
-		pack16((uint16_t)msg->step_id[i], buffer);
+		pack32(msg->step_id[i], buffer);
 	}
 	pack16((uint16_t)msg->startup, buffer);
 	if (msg->startup)
@@ -1209,9 +1734,9 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t
 		safe_unpack32(&node_reg_ptr->job_id[i], buffer);
 	}
 	node_reg_ptr->step_id =
-		xmalloc(sizeof(uint16_t) * node_reg_ptr->job_count);
+		xmalloc(sizeof(uint32_t) * node_reg_ptr->job_count);
 	for (i = 0; i < node_reg_ptr->job_count; i++) {
-		safe_unpack16(&node_reg_ptr->step_id[i], buffer);
+		safe_unpack32(&node_reg_ptr->step_id[i], buffer);
 	}
 
 	safe_unpack16(&node_reg_ptr->startup, buffer);
@@ -1223,13 +1748,7 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(node_reg_ptr->arch);
-	xfree(node_reg_ptr->job_id);
-	xfree(node_reg_ptr->node_name);
-	xfree(node_reg_ptr->os);
-	xfree(node_reg_ptr->step_id);
-	switch_g_free_node_info(&node_reg_ptr->switch_nodeinfo);
-	xfree(node_reg_ptr);
+	slurm_free_node_registration_status_msg(node_reg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1244,9 +1763,9 @@ _pack_resource_allocation_response_msg(resource_allocation_response_msg_t *
 	pack32(msg->job_id, buffer);
 	packstr(msg->node_list, buffer);
 
-	pack16(msg->num_cpu_groups, buffer);
+	pack32(msg->num_cpu_groups, buffer);
 	if (msg->num_cpu_groups) {
-		pack32_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
+		pack16_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
 		pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
 	}
 
@@ -1272,15 +1791,13 @@ _unpack_resource_allocation_response_msg(resource_allocation_response_msg_t
 	safe_unpack32(&tmp_ptr->job_id, buffer);
 	safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint32_tmp, buffer);
 
-	safe_unpack16(&tmp_ptr->num_cpu_groups, buffer);
+	safe_unpack32(&tmp_ptr->num_cpu_groups, buffer);
 	if (tmp_ptr->num_cpu_groups > 0) {
-		safe_unpack32_array((uint32_t **) &
-				    (tmp_ptr->cpus_per_node), &uint32_tmp,
+		safe_unpack16_array(&tmp_ptr->cpus_per_node, &uint32_tmp,
 				    buffer);
 		if (tmp_ptr->num_cpu_groups != uint32_tmp)
 			goto unpack_error;
-		safe_unpack32_array((uint32_t **) &
-				    (tmp_ptr->cpu_count_reps), &uint32_tmp,
+		safe_unpack32_array(&tmp_ptr->cpu_count_reps, &uint32_tmp,
 				    buffer);
 		if (tmp_ptr->num_cpu_groups != uint32_tmp)
 			goto unpack_error;
@@ -1298,11 +1815,7 @@ _unpack_resource_allocation_response_msg(resource_allocation_response_msg_t
 	return SLURM_SUCCESS;
 
 unpack_error:
-	select_g_free_jobinfo(&tmp_ptr->select_jobinfo);
-	xfree(tmp_ptr->node_list);
-	xfree(tmp_ptr->cpus_per_node);
-	xfree(tmp_ptr->cpu_count_reps);
-	xfree(tmp_ptr);
+	slurm_free_resource_allocation_response_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1317,9 +1830,9 @@ _pack_job_alloc_info_response_msg(job_alloc_info_response_msg_t * msg,
 	pack32(msg->job_id, buffer);
 	packstr(msg->node_list, buffer);
 
-	pack16(msg->num_cpu_groups, buffer);
+	pack32(msg->num_cpu_groups, buffer);
 	if (msg->num_cpu_groups) {
-		pack32_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
+		pack16_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
 		pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
 	}
 
@@ -1347,21 +1860,16 @@ _unpack_job_alloc_info_response_msg(job_alloc_info_response_msg_t ** msg,
 	safe_unpack32(&tmp_ptr->job_id, buffer);
 	safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint32_tmp, buffer);
 
-	safe_unpack16(&tmp_ptr->num_cpu_groups, buffer);
+	safe_unpack32(&tmp_ptr->num_cpu_groups, buffer);
 	if (tmp_ptr->num_cpu_groups > 0) {
-		safe_unpack32_array((uint32_t **) &
-				    (tmp_ptr->cpus_per_node), &uint32_tmp,
+		safe_unpack16_array(&tmp_ptr->cpus_per_node, &uint32_tmp,
 				    buffer);
 		if (tmp_ptr->num_cpu_groups != uint32_tmp)
 			goto unpack_error;
-		safe_unpack32_array((uint32_t **) &
-				    (tmp_ptr->cpu_count_reps), &uint32_tmp,
+		safe_unpack32_array(&tmp_ptr->cpu_count_reps, &uint32_tmp,
 				    buffer);
 		if (tmp_ptr->num_cpu_groups != uint32_tmp)
 			goto unpack_error;
-	} else {
-		tmp_ptr->cpus_per_node = NULL;
-		tmp_ptr->cpu_count_reps = NULL;
 	}
 
 	safe_unpack32(&tmp_ptr->node_cnt, buffer);
@@ -1381,11 +1889,7 @@ _unpack_job_alloc_info_response_msg(job_alloc_info_response_msg_t ** msg,
 	return SLURM_SUCCESS;
 
 unpack_error:
-	select_g_free_jobinfo(&tmp_ptr->select_jobinfo);
-	xfree(tmp_ptr->node_list);
-	xfree(tmp_ptr->cpus_per_node);
-	xfree(tmp_ptr->cpu_count_reps);
-	xfree(tmp_ptr);
+	slurm_free_job_alloc_info_response_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1417,7 +1921,7 @@ _unpack_submit_response_msg(submit_response_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr);
+	slurm_free_submit_response_response_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1447,8 +1951,7 @@ _unpack_node_info_msg(node_info_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(node);
-	xfree(*msg);
+	slurm_free_node_info_msg(*msg);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1504,6 +2007,7 @@ _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer)
 	packstr(msg->allow_groups, buffer);
 	pack16(msg-> default_part, buffer);
 	pack32(msg-> max_time,     buffer);
+	pack32(msg-> default_time, buffer);
 	pack32(msg-> max_nodes,    buffer);
 	pack32(msg-> min_nodes,    buffer);
 	packstr(msg->name,         buffer);
@@ -1514,6 +2018,8 @@ _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer)
 	pack16(msg-> priority,     buffer);
 	pack16(msg-> root_only,    buffer);
 	pack16(msg-> state_up,     buffer);
+
+	packstr(msg->allow_alloc_nodes, buffer);
 }
 
 static int
@@ -1531,6 +2037,7 @@ _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer)
 	safe_unpackstr_xmalloc(&tmp_ptr->allow_groups, &uint32_tmp, buffer);
 	safe_unpack16(&tmp_ptr->default_part, buffer);
 	safe_unpack32(&tmp_ptr->max_time, buffer);
+	safe_unpack32(&tmp_ptr->default_time, buffer);
 	safe_unpack32(&tmp_ptr->max_nodes, buffer);
 	safe_unpack32(&tmp_ptr->min_nodes, buffer);
 	safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer);
@@ -1541,13 +2048,65 @@ _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer)
 	safe_unpack16(&tmp_ptr->priority,  buffer);
 	safe_unpack16(&tmp_ptr->root_only, buffer);
 	safe_unpack16(&tmp_ptr->state_up,  buffer);
+
+	safe_unpackstr_xmalloc(&tmp_ptr->allow_alloc_nodes, &uint32_tmp,
+			       buffer);
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_update_part_msg(tmp_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+static void
+_pack_update_resv_msg(resv_desc_msg_t * msg, Buf buffer)
+{
+	xassert(msg != NULL);
+
+	packstr(msg->name,         buffer);
+	pack_time(msg->start_time, buffer);
+	pack_time(msg->end_time,   buffer);
+	pack32(msg->duration,      buffer);
+	pack16(msg->flags,         buffer);
+	pack32(msg->node_cnt,      buffer);
+	packstr(msg->node_list,    buffer);
+	packstr(msg->features,     buffer);
+	packstr(msg->partition,    buffer);
+
+	packstr(msg->users,        buffer);
+	packstr(msg->accounts,     buffer);
+}
+
+static int
+_unpack_update_resv_msg(resv_desc_msg_t ** msg, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	resv_desc_msg_t *tmp_ptr;
+
+	xassert(msg != NULL);
+
+	/* alloc memory for structure */
+	tmp_ptr = xmalloc(sizeof(resv_desc_msg_t));
+	*msg = tmp_ptr;
+
+	safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer);
+	safe_unpack_time(&tmp_ptr->start_time, buffer);
+	safe_unpack_time(&tmp_ptr->end_time,   buffer);
+	safe_unpack32(&tmp_ptr->duration,      buffer);
+	safe_unpack16(&tmp_ptr->flags,         buffer);
+	safe_unpack32(&tmp_ptr->node_cnt,      buffer);
+	safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&tmp_ptr->features,  &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&tmp_ptr->partition, &uint32_tmp, buffer);
+
+	safe_unpackstr_xmalloc(&tmp_ptr->users,     &uint32_tmp, buffer);	
+	safe_unpackstr_xmalloc(&tmp_ptr->accounts,  &uint32_tmp, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr->name);
-	xfree(tmp_ptr->nodes);
-	xfree(tmp_ptr->allow_groups);
-	xfree(tmp_ptr);
+	slurm_free_resv_desc_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1576,8 +2135,36 @@ _unpack_delete_partition_msg(delete_part_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr->name);
-	xfree(tmp_ptr);
+	slurm_free_delete_part_msg(tmp_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+static void
+_pack_resv_name_msg(reservation_name_msg_t * msg, Buf buffer)
+{
+	xassert(msg != NULL);
+
+	packstr(msg->name,         buffer);
+}
+
+static int
+_unpack_resv_name_msg(reservation_name_msg_t ** msg, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	reservation_name_msg_t *tmp_ptr;
+
+	xassert(msg != NULL);
+
+	/* alloc memory for structure */
+	tmp_ptr = xmalloc(sizeof(reservation_name_msg_t));
+	*msg = tmp_ptr;
+
+	safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_resv_name_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1593,6 +2180,7 @@ _pack_job_step_create_request_msg(job_step_create_request_msg_t
 	pack32(msg->node_count, buffer);
 	pack32(msg->cpu_count, buffer);
 	pack32(msg->num_tasks, buffer);
+	pack32(msg->mem_per_task, buffer);
 
 	pack16(msg->relative, buffer);
 	pack16(msg->task_dist, buffer);
@@ -1601,14 +2189,15 @@ _pack_job_step_create_request_msg(job_step_create_request_msg_t
 	pack16(msg->ckpt_interval, buffer);
 	pack16(msg->exclusive, buffer);
 	pack16(msg->immediate, buffer);
-	pack16(msg->mem_per_task, buffer);
+	pack16(msg->resv_port_cnt, buffer);
 
 	packstr(msg->host, buffer);
 	packstr(msg->name, buffer);
 	packstr(msg->network, buffer);
 	packstr(msg->node_list, buffer);
-	packstr(msg->ckpt_path, buffer);
+	packstr(msg->ckpt_dir, buffer);
 
+	pack8(msg->no_kill, buffer);
 	pack8(msg->overcommit, buffer);
 }
 
@@ -1629,6 +2218,7 @@ _unpack_job_step_create_request_msg(job_step_create_request_msg_t ** msg,
 	safe_unpack32(&(tmp_ptr->node_count), buffer);
 	safe_unpack32(&(tmp_ptr->cpu_count), buffer);
 	safe_unpack32(&(tmp_ptr->num_tasks), buffer);
+	safe_unpack32(&(tmp_ptr->mem_per_task), buffer);
 
 	safe_unpack16(&(tmp_ptr->relative), buffer);
 	safe_unpack16(&(tmp_ptr->task_dist), buffer);
@@ -1637,24 +2227,21 @@ _unpack_job_step_create_request_msg(job_step_create_request_msg_t ** msg,
 	safe_unpack16(&(tmp_ptr->ckpt_interval), buffer);
 	safe_unpack16(&(tmp_ptr->exclusive), buffer);
 	safe_unpack16(&(tmp_ptr->immediate), buffer);
-	safe_unpack16(&(tmp_ptr->mem_per_task), buffer);
+	safe_unpack16(&(tmp_ptr->resv_port_cnt), buffer);
 
 	safe_unpackstr_xmalloc(&(tmp_ptr->host), &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&(tmp_ptr->name), &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&(tmp_ptr->network), &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&(tmp_ptr->node_list), &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&(tmp_ptr->ckpt_path), &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&(tmp_ptr->ckpt_dir), &uint32_tmp, buffer);
 
+	safe_unpack8(&(tmp_ptr->no_kill), buffer);
 	safe_unpack8(&(tmp_ptr->overcommit), buffer);
 
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr->host);
-	xfree(tmp_ptr->name);
-	xfree(tmp_ptr->network);
-	xfree(tmp_ptr->node_list);
-	xfree(tmp_ptr);
+	slurm_free_job_step_create_request_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1695,8 +2282,7 @@ _unpack_kill_job_msg(kill_job_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr->nodes);
-	xfree(tmp_ptr);
+	slurm_free_kill_job_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1728,7 +2314,7 @@ _unpack_signal_job_msg(signal_job_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr);
+	slurm_free_signal_job_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1765,9 +2351,7 @@ _unpack_epilog_comp_msg(epilog_complete_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr->node_name);
-	switch_g_free_node_info(&tmp_ptr->switch_nodeinfo);
-	xfree(tmp_ptr);
+	slurm_free_epilog_complete_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1796,7 +2380,7 @@ _unpack_update_job_time_msg(job_time_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr);
+	slurm_free_update_job_time_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1807,7 +2391,8 @@ _pack_job_step_create_response_msg(job_step_create_response_msg_t * msg,
 {
 	xassert(msg != NULL);
 
-	pack32((uint32_t)msg->job_step_id, buffer);
+	packstr(msg->resv_ports, buffer);
+	pack32(msg->job_step_id, buffer);
 	pack_slurm_step_layout(msg->step_layout, buffer);
 	slurm_cred_pack(msg->cred, buffer);
 	switch_pack_jobinfo(msg->switch_job, buffer);
@@ -1819,12 +2404,14 @@ _unpack_job_step_create_response_msg(job_step_create_response_msg_t ** msg,
 				     Buf buffer)
 {
 	job_step_create_response_msg_t *tmp_ptr = NULL;
+	uint32_t uint32_tmp;
 	
 	/* alloc memory for structure */
 	xassert(msg != NULL);
 	tmp_ptr = xmalloc(sizeof(job_step_create_response_msg_t));
 	*msg = tmp_ptr;
 
+	safe_unpackstr_xmalloc(&tmp_ptr->resv_ports, &uint32_tmp, buffer);
 	safe_unpack32(&tmp_ptr->job_step_id, buffer);
 	if (unpack_slurm_step_layout(&tmp_ptr->step_layout, buffer))
 		goto unpack_error;
@@ -1841,7 +2428,7 @@ _unpack_job_step_create_response_msg(job_step_create_response_msg_t ** msg,
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr);
+	slurm_free_job_step_create_response_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1862,7 +2449,7 @@ _unpack_partition_info_msg(partition_info_msg_t ** msg, Buf buffer)
 	partition = (*msg)->partition_array =
 		xmalloc(sizeof(partition_info_t) * (*msg)->record_count);
 
-	/* load individual job info */
+	/* load individual partition info */
 	for (i = 0; i < (*msg)->record_count; i++) {
 		if (_unpack_partition_info_members(&partition[i], buffer))
 			goto unpack_error;
@@ -1870,8 +2457,7 @@ _unpack_partition_info_msg(partition_info_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(partition);
-	xfree(*msg);
+	slurm_free_partition_info_msg(*msg);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -1887,6 +2473,7 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer)
 	if (part->name == NULL)
 		part->name = xmalloc(1);	/* part->name = "" implicit */
 	safe_unpack32(&part->max_time,     buffer);
+	safe_unpack32(&part->default_time, buffer);
 	safe_unpack32(&part->max_nodes,    buffer);
 	safe_unpack32(&part->min_nodes,    buffer);
 	safe_unpack32(&part->total_nodes,  buffer);
@@ -1902,6 +2489,7 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer)
 
 	safe_unpack16(&part->state_up, buffer);
 	safe_unpackstr_xmalloc(&part->allow_groups, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&part->allow_alloc_nodes, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&part->nodes, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
 	if (node_inx_str == NULL)
@@ -1916,53 +2504,79 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer)
 unpack_error:
 	xfree(part->name);
 	xfree(part->allow_groups);
+	xfree(part->allow_alloc_nodes);
 	xfree(part->nodes);
 	xfree(node_inx_str);
 	return SLURM_ERROR;
 }
 
-/* pack_job_step_info_members
- * pack selected fields of the description of a job into a buffer
- * IN job_id, step_id, user_id, start_time, partition, nodes - job info
- * IN/OUT buffer - destination of the pack, contains pointers that are 
- *			automatically updated
- */
-/* void */
-/* pack_job_step_info_members(uint32_t job_id, uint16_t step_id, */
-/* 			   uint32_t user_id, uint32_t num_tasks, */
-/* 			   time_t start_time, char *partition,  */
-/* 			   char *nodes, char *name, char *network, */
-/* 			   Buf buffer) */
-/* { */
-/* 	pack32((uint32_t)job_id, buffer); */
-/* 	pack16((uint16_t)step_id, buffer); */
-/* 	pack32((uint32_t)user_id, buffer); */
-/* 	pack32((uint32_t)num_tasks, buffer); */
-
-/* 	pack_time(start_time, buffer); */
-/* 	packstr(partition, buffer); */
-/* 	packstr(nodes, buffer); */
-/* 	packstr(name, buffer); */
-/* 	packstr(network, buffer); */
-/* } */
-
-/* pack_job_step_info
- * packs a slurm job steps info
- * IN step - pointer to the job step info
- * IN/OUT buffer - destination of the pack, contains pointers that are 
- *			automatically updated
- */
-/* void */
-/* pack_job_step_info(job_step_info_t * step, Buf buffer) */
-/* { */
-/* 	pack_job_step_info_members(step->job_id, */
-/* 				   step->step_id, */
-/* 				   step->user_id, */
-/* 				   step->num_tasks, */
-/* 				   step->start_time, */
-/* 				   step->partition, step->nodes,  */
-/* 				   step->name, step->network, buffer); */
-/* } */
+static int
+_unpack_reserve_info_msg(reserve_info_msg_t ** msg, Buf buffer)
+{
+	int i;
+	reserve_info_t *reserve = NULL;
+
+	xassert(msg != NULL);
+	*msg = xmalloc(sizeof(reserve_info_msg_t));
+
+	/* load buffer's header (data structure version and time) */
+	safe_unpack32(&((*msg)->record_count), buffer);
+	safe_unpack_time(&((*msg)->last_update), buffer);
+
+	reserve = (*msg)->reservation_array =
+		xmalloc(sizeof(reserve_info_t) * (*msg)->record_count);
+
+	/* load individual reservation records */
+	for (i = 0; i < (*msg)->record_count; i++) {
+		if (_unpack_reserve_info_members(&reserve[i], buffer))
+			goto unpack_error;
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_reservation_info_msg(*msg);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+
+static int
+_unpack_reserve_info_members(reserve_info_t * resv, Buf buffer)
+{
+	char *node_inx_str = NULL;
+	uint32_t uint32_tmp;
+
+	safe_unpackstr_xmalloc(&resv->accounts,	&uint32_tmp, buffer);
+	safe_unpack_time(&resv->end_time,	buffer);
+	safe_unpackstr_xmalloc(&resv->features,	&uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&resv->name,	&uint32_tmp, buffer);
+	safe_unpack32(&resv->node_cnt,		buffer);
+	safe_unpackstr_xmalloc(&resv->node_list,&uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&resv->partition,&uint32_tmp, buffer);
+	safe_unpack_time(&resv->start_time,	buffer);
+	safe_unpack16(&resv->flags,		buffer);
+	safe_unpackstr_xmalloc(&resv->users,	&uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&node_inx_str,   &uint32_tmp, buffer);
+	if (node_inx_str == NULL)
+		resv->node_inx = bitfmt2int("");
+	else {
+		resv->node_inx = bitfmt2int(node_inx_str);
+		xfree(node_inx_str);
+		node_inx_str = NULL;
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	xfree(resv->accounts);
+	xfree(resv->features);
+	xfree(resv->name);
+	xfree(node_inx_str);
+	xfree(resv->node_inx);
+	xfree(resv->node_list);
+	xfree(resv->partition);
+	xfree(resv->users);
+	return SLURM_ERROR;
+}
 
 /* _unpack_job_step_info_members
  * unpacks a set of slurm job step info for one job step
@@ -1977,7 +2591,7 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer)
 	char *node_inx_str;
 
 	safe_unpack32(&step->job_id, buffer);
-	safe_unpack16(&step->step_id, buffer);
+	safe_unpack32(&step->step_id, buffer);
 	safe_unpack16(&step->ckpt_interval, buffer);
 	safe_unpack32(&step->user_id, buffer);
 	safe_unpack32(&step->num_tasks, buffer);
@@ -1985,11 +2599,12 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer)
 	safe_unpack_time(&step->start_time, buffer);
 	safe_unpack_time(&step->run_time, buffer);
 	safe_unpackstr_xmalloc(&step->partition, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&step->resv_ports, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&step->nodes, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&step->name, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&step->network, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&step->ckpt_path, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&step->ckpt_dir, &uint32_tmp, buffer);
 	if (node_inx_str == NULL)
 		step->node_inx = bitfmt2int("");
 	else {
@@ -2031,8 +2646,7 @@ _unpack_job_step_info_response_msg(job_step_info_response_msg_t
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(step);
-	xfree(*msg);
+	slurm_free_job_step_info_response_msg(*msg);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -2067,8 +2681,7 @@ _unpack_job_info_msg(job_info_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(job);
-	xfree(*msg);
+	slurm_free_job_info_msg(*msg);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -2086,6 +2699,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer)
 	char *node_inx_str;
 	multi_core_data_t *mc_ptr;
 
+	safe_unpack32(&job->assoc_id, buffer);
 	safe_unpack32(&job->job_id, buffer);
 	safe_unpack32(&job->user_id, buffer);
 	safe_unpack32(&job->group_id, buffer);
@@ -2093,6 +2707,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer)
 	safe_unpack16(&job->job_state,    buffer);
 	safe_unpack16(&job->batch_flag,   buffer);
 	safe_unpack16(&job->state_reason, buffer);
+	safe_unpack16(&job->restart_cnt, buffer);
 
 	safe_unpack32(&job->alloc_sid,    buffer);
 	safe_unpack32(&job->time_limit,   buffer);
@@ -2102,6 +2717,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer)
 	safe_unpack_time(&job->end_time, buffer);
 	safe_unpack_time(&job->suspend_time, buffer);
 	safe_unpack_time(&job->pre_sus_time, buffer);
+
 	safe_unpack32(&job->priority, buffer);
 
 	safe_unpackstr_xmalloc(&job->nodes, &uint32_tmp, buffer);
@@ -2109,14 +2725,19 @@ _unpack_job_info_members(job_info_t * job, Buf buffer)
 	safe_unpackstr_xmalloc(&job->account, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job->network, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job->comment, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&job->licenses,   &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&job->licenses, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&job->state_desc, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&job->resv_name,  &uint32_tmp, buffer);
 
 	safe_unpack32(&job->exit_code, buffer);
-	safe_unpack16(&job->num_cpu_groups, buffer);
-	safe_unpack32_array(&job->cpus_per_node, &uint32_tmp, buffer);
-	safe_unpack32_array(&job->cpu_count_reps, &uint32_tmp, buffer);
+	safe_unpack32(&job->num_cpu_groups, buffer);
+	if (job->num_cpu_groups) {
+		safe_unpack16_array(&job->cpus_per_node, &uint32_tmp, buffer);
+		safe_unpack32_array(&job->cpu_count_reps, &uint32_tmp, buffer);
+	}
 
 	safe_unpackstr_xmalloc(&job->name, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&job->wckey, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job->alloc_node, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
 	if (node_inx_str == NULL)
@@ -2137,13 +2758,13 @@ _unpack_job_info_members(job_info_t * job, Buf buffer)
 	safe_unpackstr_xmalloc(&job->dependency, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job->command,    &uint32_tmp, buffer);
 
-	safe_unpack32(&job->num_nodes, buffer);
-	safe_unpack32(&job->max_nodes, buffer);
-	safe_unpack16(&job->requeue,   buffer);
+	safe_unpack32(&job->num_nodes,   buffer);
+	safe_unpack32(&job->max_nodes,   buffer);
+	safe_unpack16(&job->requeue,     buffer);
 
 	/*** unpack pending job details ***/
-	safe_unpack16(&job->shared, buffer);
-	safe_unpack16(&job->contiguous, buffer);
+	safe_unpack16(&job->shared,        buffer);
+	safe_unpack16(&job->contiguous,    buffer);
 	safe_unpack16(&job->cpus_per_task, buffer);
 	safe_unpack16(&job->job_min_procs, buffer);
 
@@ -2183,40 +2804,23 @@ _unpack_job_info_members(job_info_t * job, Buf buffer)
 		job->ntasks_per_core   = mc_ptr->ntasks_per_core;
 		xfree(mc_ptr);
 	}
-
+	
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(job->nodes);
-	xfree(job->partition);
-	xfree(job->account);
-	xfree(job->network);
-	xfree(job->comment);
-	xfree(job->dependency);
-	xfree(job->cpus_per_node);
-	xfree(job->cpu_count_reps);
-	xfree(job->name);
-	xfree(job->alloc_node);
-	xfree(job->node_inx);
-	select_g_free_jobinfo(&job->select_jobinfo);
-	xfree(job->features);
-	xfree(job->work_dir);
-	xfree(job->command);
-	xfree(job->licenses);
-	xfree(job->req_nodes);
-	xfree(job->req_node_inx);
-	xfree(job->exc_nodes);
-	xfree(job->exc_node_inx);
-
+	slurm_free_job_info_members(job);
 	return SLURM_ERROR;
 }
 
 static void
 _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 {
+	uint32_t count = NO_VAL;
+
 	pack_time(build_ptr->last_update, buffer);
 
 	pack16(build_ptr->accounting_storage_enforce, buffer);
+	packstr(build_ptr->accounting_storage_backup_host, buffer);
 	packstr(build_ptr->accounting_storage_host, buffer);
 	packstr(build_ptr->accounting_storage_loc, buffer);
 	packstr(build_ptr->accounting_storage_pass, buffer);
@@ -2228,20 +2832,24 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 
 	packstr(build_ptr->backup_addr, buffer);
 	packstr(build_ptr->backup_controller, buffer);
+	pack16(build_ptr->batch_start_timeout, buffer);
 	pack_time(build_ptr->boot_time, buffer);
 
 	pack16(build_ptr->cache_groups, buffer);
 	packstr(build_ptr->checkpoint_type, buffer);
 	packstr(build_ptr->cluster_name, buffer);
+	pack16(build_ptr->complete_wait, buffer);
 	packstr(build_ptr->control_addr, buffer);
 	packstr(build_ptr->control_machine, buffer);
 	packstr(build_ptr->crypto_type, buffer);
 
 	pack32(build_ptr->def_mem_per_task, buffer);
+	pack32(build_ptr->debug_flags, buffer);
 	pack16(build_ptr->disable_root_jobs, buffer);
 
 	packstr(build_ptr->epilog, buffer);
 	pack32(build_ptr->epilog_msg_time, buffer);
+	packstr(build_ptr->epilog_slurmctld, buffer);
 
 	pack16(build_ptr->fast_schedule, buffer);
 	pack32(build_ptr->first_job_id, buffer);
@@ -2256,6 +2864,8 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 	pack16(build_ptr->job_acct_gather_freq, buffer);
 	packstr(build_ptr->job_acct_gather_type, buffer);
 
+	packstr(build_ptr->job_ckpt_dir, buffer);
+	
 	packstr(build_ptr->job_comp_host, buffer);
 	packstr(build_ptr->job_comp_loc, buffer);
 	packstr(build_ptr->job_comp_pass, buffer);
@@ -2268,6 +2878,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 	pack16(build_ptr->job_file_append, buffer);
 	pack16(build_ptr->job_requeue, buffer);
 
+	pack16(build_ptr->kill_on_bad_exit, buffer);
 	pack16(build_ptr->kill_wait, buffer);
 
 	packstr(build_ptr->licenses, buffer);
@@ -2277,35 +2888,72 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 	pack32(build_ptr->max_mem_per_task, buffer);
 	pack16(build_ptr->min_job_age, buffer);
 	packstr(build_ptr->mpi_default, buffer);
+	packstr(build_ptr->mpi_params, buffer);
 	pack16(build_ptr->msg_timeout, buffer);
 
 	pack32(build_ptr->next_job_id, buffer);
 	packstr(build_ptr->node_prefix, buffer);
 
+	pack16(build_ptr->over_time_limit, buffer);
+
 	packstr(build_ptr->plugindir, buffer);
 	packstr(build_ptr->plugstack, buffer);
+
+	pack32(build_ptr->priority_decay_hl, buffer);
+	pack16(build_ptr->priority_favor_small, buffer);
+	pack32(build_ptr->priority_max_age, buffer);
+	pack16(build_ptr->priority_reset_period, buffer);
+	packstr(build_ptr->priority_type, buffer);
+	pack32(build_ptr->priority_weight_age, buffer);
+	pack32(build_ptr->priority_weight_fs, buffer);
+	pack32(build_ptr->priority_weight_js, buffer);
+	pack32(build_ptr->priority_weight_part, buffer);
+	pack32(build_ptr->priority_weight_qos, buffer);
+
 	pack16(build_ptr->private_data, buffer);
 	packstr(build_ptr->proctrack_type, buffer);
 	packstr(build_ptr->prolog, buffer);
+	packstr(build_ptr->prolog_slurmctld, buffer);
 	pack16(build_ptr->propagate_prio_process, buffer);
         packstr(build_ptr->propagate_rlimits, buffer);
         packstr(build_ptr->propagate_rlimits_except, buffer);
 
 	packstr(build_ptr->resume_program, buffer);
 	pack16(build_ptr->resume_rate, buffer);
+	pack16(build_ptr->resume_timeout, buffer);
+	pack16(build_ptr->resv_over_run, buffer);
 	pack16(build_ptr->ret2service, buffer);
 
+	packstr(build_ptr->salloc_default_command, buffer);
 	packstr(build_ptr->sched_params, buffer);
 	pack16(build_ptr->schedport, buffer);
 	pack16(build_ptr->schedrootfltr, buffer);
 	pack16(build_ptr->sched_time_slice, buffer);
 	packstr(build_ptr->schedtype, buffer);
 	packstr(build_ptr->select_type, buffer);
+	if(build_ptr->select_conf_key_pairs) 
+		count = list_count((List)build_ptr->select_conf_key_pairs);
+	
+	pack32(count, buffer);
+	if(count && count != NO_VAL) {
+		ListIterator itr = list_iterator_create(
+			(List)build_ptr->select_conf_key_pairs);
+		config_key_pair_t *key_pair = NULL;
+		while((key_pair = list_next(itr))) {
+			pack_config_key_pair(key_pair,
+					     SLURMDBD_VERSION, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
+	count = NO_VAL;
+	
 	pack16(build_ptr->select_type_param, buffer);
 
 	packstr(build_ptr->slurm_conf, buffer);
 	pack32(build_ptr->slurm_user_id, buffer);
 	packstr(build_ptr->slurm_user_name, buffer);
+	pack32(build_ptr->slurmd_user_id, buffer);
+	packstr(build_ptr->slurmd_user_name, buffer);
 
 	pack16(build_ptr->slurmctld_debug, buffer);
 	packstr(build_ptr->slurmctld_logfile, buffer);
@@ -2323,6 +2971,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 	pack16(build_ptr->slurmd_timeout, buffer);
 
 	packstr(build_ptr->srun_epilog, buffer);
+	pack16(build_ptr->srun_io_timeout, buffer);
 	packstr(build_ptr->srun_prolog, buffer);
 	packstr(build_ptr->state_save_location, buffer);
 	packstr(build_ptr->suspend_exc_nodes, buffer);
@@ -2330,6 +2979,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 	packstr(build_ptr->suspend_program, buffer);
 	pack16(build_ptr->suspend_rate, buffer);
 	pack16(build_ptr->suspend_time, buffer);
+	pack16(build_ptr->suspend_timeout, buffer);
 	packstr(build_ptr->switch_type, buffer);
 
 	packstr(build_ptr->task_epilog, buffer);
@@ -2337,6 +2987,8 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 	packstr(build_ptr->task_plugin, buffer);
 	pack16(build_ptr->task_plugin_param, buffer);
 	packstr(build_ptr->tmp_fs, buffer);
+	packstr(build_ptr->topology_plugin, buffer);
+	pack16(build_ptr->track_wckey, buffer);
 	pack16(build_ptr->tree_width, buffer);
 
 	pack16(build_ptr->use_pam, buffer);
@@ -2344,12 +2996,17 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer)
 	pack16(build_ptr->unkillable_timeout, buffer);
 
 	pack16(build_ptr->wait_time, buffer);
+
+	pack16(build_ptr->z_16, buffer);
+	pack32(build_ptr->z_32, buffer);
+	packstr(build_ptr->z_char, buffer);
 }
 
 static int
 _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 			   build_buffer_ptr, Buf buffer)
 {
+	uint32_t count = NO_VAL;
 	uint32_t uint32_tmp;
 	slurm_ctl_conf_info_msg_t *build_ptr;
 
@@ -2362,6 +3019,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 	safe_unpack_time(&build_ptr->last_update, buffer);
 
 	safe_unpack16(&build_ptr->accounting_storage_enforce, buffer);
+	safe_unpackstr_xmalloc(&build_ptr->accounting_storage_backup_host, 
+			       &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->accounting_storage_host, 
 			       &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->accounting_storage_loc,
@@ -2379,12 +3038,14 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 	safe_unpackstr_xmalloc(&build_ptr->backup_addr, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->backup_controller, &uint32_tmp,
 			       buffer);
+	safe_unpack16(&build_ptr->batch_start_timeout, buffer);
 	safe_unpack_time(&build_ptr->boot_time, buffer);
 
 	safe_unpack16(&build_ptr->cache_groups, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->checkpoint_type, &uint32_tmp,
 			       buffer);
 	safe_unpackstr_xmalloc(&build_ptr->cluster_name, &uint32_tmp, buffer);
+	safe_unpack16(&build_ptr->complete_wait, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->control_addr, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->control_machine, &uint32_tmp,
 			       buffer);
@@ -2392,10 +3053,13 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 			       buffer);
 
 	safe_unpack32(&build_ptr->def_mem_per_task, buffer);
+	safe_unpack32(&build_ptr->debug_flags, buffer);
 	safe_unpack16(&build_ptr->disable_root_jobs, buffer);
 
 	safe_unpackstr_xmalloc(&build_ptr->epilog, &uint32_tmp, buffer);
 	safe_unpack32(&build_ptr->epilog_msg_time, buffer);
+	safe_unpackstr_xmalloc(&build_ptr->epilog_slurmctld, &uint32_tmp, 
+			       buffer);
 
 	safe_unpack16(&build_ptr->fast_schedule, buffer);
 	safe_unpack32(&build_ptr->first_job_id, buffer);
@@ -2412,6 +3076,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 	safe_unpackstr_xmalloc(&build_ptr->job_acct_gather_type,
 			       &uint32_tmp, buffer);
 
+	safe_unpackstr_xmalloc(&build_ptr->job_ckpt_dir, &uint32_tmp, buffer);
+
 	safe_unpackstr_xmalloc(&build_ptr->job_comp_host, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->job_comp_loc,  &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->job_comp_pass, &uint32_tmp, buffer);
@@ -2427,6 +3093,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 	safe_unpack16(&build_ptr->job_file_append, buffer);
 	safe_unpack16(&build_ptr->job_requeue, buffer);
 
+	safe_unpack16(&build_ptr->kill_on_bad_exit, buffer);
 	safe_unpack16(&build_ptr->kill_wait, buffer);
 
 	safe_unpackstr_xmalloc(&build_ptr->licenses, &uint32_tmp, buffer);
@@ -2436,17 +3103,35 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 	safe_unpack32(&build_ptr->max_mem_per_task, buffer);
 	safe_unpack16(&build_ptr->min_job_age, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->mpi_default, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&build_ptr->mpi_params, &uint32_tmp, buffer);
 	safe_unpack16(&build_ptr->msg_timeout, buffer);
 
 	safe_unpack32(&build_ptr->next_job_id, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->node_prefix, &uint32_tmp, buffer);
 
+	safe_unpack16(&build_ptr->over_time_limit, buffer);
+
 	safe_unpackstr_xmalloc(&build_ptr->plugindir, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->plugstack, &uint32_tmp, buffer);
+
+	safe_unpack32(&build_ptr->priority_decay_hl, buffer);
+	safe_unpack16(&build_ptr->priority_favor_small, buffer);
+	safe_unpack32(&build_ptr->priority_max_age, buffer);
+	safe_unpack16(&build_ptr->priority_reset_period, buffer);
+	safe_unpackstr_xmalloc(&build_ptr->priority_type, &uint32_tmp, 
+			       buffer);
+	safe_unpack32(&build_ptr->priority_weight_age, buffer);
+	safe_unpack32(&build_ptr->priority_weight_fs, buffer);
+	safe_unpack32(&build_ptr->priority_weight_js, buffer);
+	safe_unpack32(&build_ptr->priority_weight_part, buffer);
+	safe_unpack32(&build_ptr->priority_weight_qos, buffer);
+
 	safe_unpack16(&build_ptr->private_data, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->proctrack_type, &uint32_tmp, 
 			       buffer);
 	safe_unpackstr_xmalloc(&build_ptr->prolog, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&build_ptr->prolog_slurmctld, &uint32_tmp, 
+			       buffer);
 	safe_unpack16(&build_ptr->propagate_prio_process, buffer);
         safe_unpackstr_xmalloc(&build_ptr->propagate_rlimits,
                                &uint32_tmp, buffer);
@@ -2456,14 +3141,33 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 	safe_unpackstr_xmalloc(&build_ptr->resume_program,
 			       &uint32_tmp, buffer);
 	safe_unpack16(&build_ptr->resume_rate, buffer);
+	safe_unpack16(&build_ptr->resume_timeout, buffer);
+	safe_unpack16(&build_ptr->resv_over_run, buffer);
 	safe_unpack16(&build_ptr->ret2service, buffer);
 
+	safe_unpackstr_xmalloc(&build_ptr->salloc_default_command, 
+			       &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->sched_params, &uint32_tmp, buffer);
 	safe_unpack16(&build_ptr->schedport, buffer);
 	safe_unpack16(&build_ptr->schedrootfltr, buffer);
 	safe_unpack16(&build_ptr->sched_time_slice, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->schedtype, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->select_type, &uint32_tmp, buffer);
+	safe_unpack32(&count, buffer);
+	if(count != NO_VAL) {
+		List tmp_list = list_create(destroy_config_key_pair);
+		config_key_pair_t *object = NULL;
+		int i;
+		for(i=0; i<count; i++) {
+			if(unpack_config_key_pair(
+				   (void *)&object, SLURMDBD_VERSION, buffer)
+			   == SLURM_ERROR)
+				goto unpack_error;
+			list_append(tmp_list, object);
+		}
+		build_ptr->select_conf_key_pairs = (void *)tmp_list;
+	}
+	
 	safe_unpack16(&build_ptr->select_type_param, buffer);
 
 	safe_unpackstr_xmalloc(&build_ptr->slurm_conf,
@@ -2471,6 +3175,9 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 	safe_unpack32(&build_ptr->slurm_user_id, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->slurm_user_name,
 			       &uint32_tmp, buffer);
+	safe_unpack32(&build_ptr->slurmd_user_id, buffer);
+	safe_unpackstr_xmalloc(&build_ptr->slurmd_user_name,
+			       &uint32_tmp, buffer);
 
 	safe_unpack16(&build_ptr->slurmctld_debug, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->slurmctld_logfile,
@@ -2493,6 +3200,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 	safe_unpack16(&build_ptr->slurmd_timeout, buffer);
 
 	safe_unpackstr_xmalloc(&build_ptr->srun_epilog, &uint32_tmp, buffer);
+	safe_unpack16(&build_ptr->srun_io_timeout, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->srun_prolog, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->state_save_location,
 			       &uint32_tmp, buffer);
@@ -2504,6 +3212,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 			       &uint32_tmp, buffer);
 	safe_unpack16(&build_ptr->suspend_rate, buffer);
 	safe_unpack16(&build_ptr->suspend_time, buffer);
+	safe_unpack16(&build_ptr->suspend_timeout, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->switch_type, &uint32_tmp, buffer);
 
 	safe_unpackstr_xmalloc(&build_ptr->task_epilog, &uint32_tmp, buffer);
@@ -2511,6 +3220,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 	safe_unpackstr_xmalloc(&build_ptr->task_plugin, &uint32_tmp, buffer);
 	safe_unpack16(&build_ptr->task_plugin_param, buffer);
 	safe_unpackstr_xmalloc(&build_ptr->tmp_fs, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&build_ptr->topology_plugin, &uint32_tmp, buffer);
+	safe_unpack16(&build_ptr->track_wckey, buffer);
 	safe_unpack16(&build_ptr->tree_width, buffer);
 
 	safe_unpack16(&build_ptr->use_pam, buffer);
@@ -2520,67 +3231,14 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **
 
 	safe_unpack16(&build_ptr->wait_time, buffer);
 
+	safe_unpack16(&build_ptr->z_16, buffer);
+	safe_unpack32(&build_ptr->z_32, buffer);
+	safe_unpackstr_xmalloc(&build_ptr->z_char, &uint32_tmp, buffer);
+
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(build_ptr->accounting_storage_host);
-	xfree(build_ptr->accounting_storage_loc);
-	xfree(build_ptr->accounting_storage_pass);
-	xfree(build_ptr->accounting_storage_type);
-	xfree(build_ptr->accounting_storage_user);
-	xfree(build_ptr->authtype);
-	xfree(build_ptr->backup_addr);
-	xfree(build_ptr->backup_controller);
-	xfree(build_ptr->checkpoint_type);
-	xfree(build_ptr->cluster_name);
-	xfree(build_ptr->control_addr);
-	xfree(build_ptr->control_machine);
-	xfree(build_ptr->crypto_type);
-	xfree(build_ptr->epilog);
-	xfree(build_ptr->health_check_program);
-	xfree(build_ptr->job_acct_gather_type);
-	xfree(build_ptr->job_comp_loc);
-	xfree(build_ptr->job_comp_pass);
-	xfree(build_ptr->job_comp_type);
-	xfree(build_ptr->job_comp_user);
-	xfree(build_ptr->job_credential_private_key);
-	xfree(build_ptr->job_credential_public_certificate);
-	xfree(build_ptr->health_check_program);
-	xfree(build_ptr->licenses);
-	xfree(build_ptr->mail_prog);
-	xfree(build_ptr->mpi_default);
-	xfree(build_ptr->node_prefix);
-	xfree(build_ptr->plugindir);
-	xfree(build_ptr->plugstack);
-	xfree(build_ptr->proctrack_type);
-	xfree(build_ptr->prolog);
-	xfree(build_ptr->propagate_rlimits);
-	xfree(build_ptr->propagate_rlimits_except);
-	xfree(build_ptr->resume_program);
-	xfree(build_ptr->sched_params);
-	xfree(build_ptr->schedtype);
-	xfree(build_ptr->select_type);
-	xfree(build_ptr->slurm_conf);
-	xfree(build_ptr->slurm_user_name);
-	xfree(build_ptr->slurmctld_logfile);
-	xfree(build_ptr->slurmctld_pidfile);
-	xfree(build_ptr->slurmd_logfile);
-	xfree(build_ptr->slurmd_pidfile);
-	xfree(build_ptr->slurmd_spooldir);
-	xfree(build_ptr->srun_epilog);
-	xfree(build_ptr->srun_prolog);
-	xfree(build_ptr->state_save_location);
-	xfree(build_ptr->suspend_exc_nodes);
-	xfree(build_ptr->suspend_exc_parts);
-	xfree(build_ptr->suspend_program);
-	xfree(build_ptr->switch_type);
-	xfree(build_ptr->node_prefix);
-	xfree(build_ptr->task_epilog);
-	xfree(build_ptr->task_prolog);
-	xfree(build_ptr->task_plugin);
-	xfree(build_ptr->tmp_fs);
-	xfree(build_ptr->unkillable_program);
-	xfree(build_ptr);
+	slurm_free_ctl_conf(build_ptr);
 	*build_buffer_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -2597,7 +3255,6 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer)
 	/* load the data values */
 	pack16(job_desc_ptr->contiguous, buffer);
 	pack16(job_desc_ptr->task_dist, buffer);
-	pack16(job_desc_ptr->plane_size, buffer);
 	pack16(job_desc_ptr->kill_on_node_fail, buffer);
 	packstr(job_desc_ptr->features, buffer);
 	pack32(job_desc_ptr->job_id, buffer);
@@ -2623,6 +3280,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer)
 	pack8(job_desc_ptr->overcommit,  buffer);
 	pack16(job_desc_ptr->acctg_freq, buffer);
 	pack32(job_desc_ptr->num_tasks,  buffer);
+	pack16(job_desc_ptr->ckpt_interval, buffer);
 
 	packstr(job_desc_ptr->req_nodes, buffer);
 	packstr(job_desc_ptr->exc_nodes, buffer);
@@ -2635,6 +3293,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer)
 	packstr(job_desc_ptr->in, buffer);
 	packstr(job_desc_ptr->out, buffer);
 	packstr(job_desc_ptr->work_dir, buffer);
+	packstr(job_desc_ptr->ckpt_dir, buffer);
 
 	pack16(job_desc_ptr->immediate, buffer);
 	pack16(job_desc_ptr->requeue, buffer);
@@ -2643,8 +3302,14 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer)
 	pack16(job_desc_ptr->ntasks_per_node, buffer);
 	pack16(job_desc_ptr->ntasks_per_socket, buffer);
 	pack16(job_desc_ptr->ntasks_per_core, buffer);
-	pack32(job_desc_ptr->time_limit, buffer);
 
+	pack16(job_desc_ptr->plane_size, buffer);
+	pack16(job_desc_ptr->cpu_bind_type, buffer);
+	pack16(job_desc_ptr->mem_bind_type, buffer);
+	packstr(job_desc_ptr->cpu_bind, buffer);
+	packstr(job_desc_ptr->mem_bind, buffer);
+
+	pack32(job_desc_ptr->time_limit, buffer);
 	pack32(job_desc_ptr->num_procs, buffer);
 	pack32(job_desc_ptr->min_nodes, buffer);
 	pack32(job_desc_ptr->max_nodes, buffer);
@@ -2665,6 +3330,9 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer)
 	packstr(job_desc_ptr->licenses, buffer);
 	pack16(job_desc_ptr->mail_type, buffer);
 	packstr(job_desc_ptr->mail_user, buffer);
+	packstr(job_desc_ptr->reservation, buffer);
+	packstr(job_desc_ptr->wckey, buffer);
+
 	if(job_desc_ptr->select_jobinfo)
 		select_g_pack_jobinfo(job_desc_ptr->select_jobinfo, buffer);
 	else if (select_g_alloc_jobinfo(&job_desc_ptr->select_jobinfo) 
@@ -2729,7 +3397,6 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer)
 	/* load the data values */
 	safe_unpack16(&job_desc_ptr->contiguous, buffer);
 	safe_unpack16(&job_desc_ptr->task_dist, buffer);
-	safe_unpack16(&job_desc_ptr->plane_size, buffer);
 	safe_unpack16(&job_desc_ptr->kill_on_node_fail, buffer);
 	safe_unpackstr_xmalloc(&job_desc_ptr->features, &uint32_tmp, buffer);
 	safe_unpack32(&job_desc_ptr->job_id, buffer);
@@ -2755,6 +3422,7 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer)
 	safe_unpack8(&job_desc_ptr->overcommit,  buffer);
 	safe_unpack16(&job_desc_ptr->acctg_freq, buffer);
 	safe_unpack32(&job_desc_ptr->num_tasks,  buffer);
+	safe_unpack16(&job_desc_ptr->ckpt_interval, buffer);
 
 	safe_unpackstr_xmalloc(&job_desc_ptr->req_nodes, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job_desc_ptr->exc_nodes, &uint32_tmp, buffer);
@@ -2767,6 +3435,7 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer)
 	safe_unpackstr_xmalloc(&job_desc_ptr->in, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job_desc_ptr->out, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&job_desc_ptr->work_dir, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&job_desc_ptr->ckpt_dir, &uint32_tmp, buffer);
 
 	safe_unpack16(&job_desc_ptr->immediate, buffer);
 	safe_unpack16(&job_desc_ptr->requeue, buffer);
@@ -2775,8 +3444,14 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer)
 	safe_unpack16(&job_desc_ptr->ntasks_per_node, buffer);
 	safe_unpack16(&job_desc_ptr->ntasks_per_socket, buffer);
 	safe_unpack16(&job_desc_ptr->ntasks_per_core, buffer);
-	safe_unpack32(&job_desc_ptr->time_limit, buffer);
 
+	safe_unpack16(&job_desc_ptr->plane_size, buffer);
+	safe_unpack16(&job_desc_ptr->cpu_bind_type, buffer);
+	safe_unpack16(&job_desc_ptr->mem_bind_type, buffer);
+	safe_unpackstr_xmalloc(&job_desc_ptr->cpu_bind, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&job_desc_ptr->mem_bind, &uint32_tmp, buffer);
+
+	safe_unpack32(&job_desc_ptr->time_limit, buffer);
 	safe_unpack32(&job_desc_ptr->num_procs, buffer);
 	safe_unpack32(&job_desc_ptr->min_nodes, buffer);
 	safe_unpack32(&job_desc_ptr->max_nodes, buffer);
@@ -2797,10 +3472,16 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer)
 	safe_unpackstr_xmalloc(&job_desc_ptr->licenses, &uint32_tmp, buffer);
 	safe_unpack16(&job_desc_ptr->mail_type, buffer);
 	safe_unpackstr_xmalloc(&job_desc_ptr->mail_user, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&job_desc_ptr->reservation, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&job_desc_ptr->wckey, &uint32_tmp, buffer);
 
 	if (select_g_alloc_jobinfo (&job_desc_ptr->select_jobinfo)
 	    ||  select_g_unpack_jobinfo(job_desc_ptr->select_jobinfo, buffer))
 		goto unpack_error;
+
+	/* These are set so we don't confuse them later for what is
+	 * set in the select_jobinfo structure.
+	 */
 #if SYSTEM_DIMENSIONS
 	job_desc_ptr->geometry[0] = (uint16_t)NO_VAL;
 #endif
@@ -2814,27 +3495,7 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-
-	xfree(job_desc_ptr->features);
-	xfree(job_desc_ptr->name);
-	xfree(job_desc_ptr->partition);
-	xfree(job_desc_ptr->dependency);
-	xfree(job_desc_ptr->account);
-	xfree(job_desc_ptr->comment);
-	xfree(job_desc_ptr->req_nodes);
-	xfree(job_desc_ptr->exc_nodes);
-	xfree(job_desc_ptr->environment);
-	xfree(job_desc_ptr->script);
-	xfree(job_desc_ptr->argv);
-	xfree(job_desc_ptr->err);
-	xfree(job_desc_ptr->in);
-	xfree(job_desc_ptr->out);
-	xfree(job_desc_ptr->work_dir);
-	xfree(job_desc_ptr->network);
-	xfree(job_desc_ptr->licenses);
-	xfree(job_desc_ptr->mail_user);
-	select_g_free_jobinfo(&job_desc_ptr->select_jobinfo);
-	xfree(job_desc_ptr);
+	slurm_free_job_desc_msg(job_desc_ptr);
 	*job_desc_buffer_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -2862,7 +3523,7 @@ _unpack_job_alloc_info_msg(job_alloc_info_msg_t **
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(job_desc_ptr);
+	slurm_free_job_alloc_info_msg(job_desc_ptr);
 	*job_desc_buffer_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -2887,7 +3548,7 @@ _unpack_last_update_msg(last_update_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(last_update_msg);
+	slurm_free_last_update_msg(last_update_msg);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -2912,7 +3573,7 @@ _unpack_return_code_msg(return_code_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(return_code_msg);
+	slurm_free_return_code_msg(return_code_msg);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -3055,7 +3716,7 @@ _unpack_task_exit_msg(task_exit_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_task_exit_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -3091,15 +3752,12 @@ _unpack_launch_tasks_response_msg(launch_tasks_response_msg_t **
 		goto unpack_error;
 	safe_unpack32_array(&msg->task_ids, &uint32_tmp, buffer);
 	if (msg->count_of_pids != uint32_tmp)
-		goto unpack_error2;
+		goto unpack_error;
 
 	return SLURM_SUCCESS;
 
-unpack_error2:
-	xfree(msg->count_of_pids);
 unpack_error:
-	xfree(msg->node_name);
-	xfree(msg);
+	slurm_free_launch_tasks_response_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -3115,18 +3773,13 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer)
 	pack32(msg->uid, buffer);
 	pack32(msg->gid, buffer);
 	pack32(msg->job_mem, buffer);
-	pack32(msg->task_mem, buffer);
 
 	pack32(msg->nnodes, buffer);
 	pack16(msg->max_sockets, buffer);
 	pack16(msg->max_cores, buffer);
 	pack16(msg->max_threads, buffer);
 	pack16(msg->cpus_per_task, buffer);
-	pack16(msg->ntasks_per_node, buffer);
-	pack16(msg->ntasks_per_socket, buffer);
-	pack16(msg->ntasks_per_core, buffer);
 	pack16(msg->task_dist, buffer);
-	pack16(msg->plane_size, buffer);
 
 	slurm_cred_pack(msg->cred, buffer);
 	for(i=0; i<msg->nnodes; i++) {
@@ -3155,6 +3808,7 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer)
 		packstr(msg->efname, buffer);
 		packstr(msg->ifname, buffer);
 		pack8(msg->buffered_stdio, buffer);
+		pack8(msg->labelio, buffer);
 		pack16(msg->num_io_port, buffer);
 		for(i = 0; i < msg->num_io_port; i++)
 			pack16(msg->io_port[i], buffer);
@@ -3169,7 +3823,8 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer)
 	pack8(msg->open_mode, buffer);
 	pack8(msg->pty, buffer);
 	pack16(msg->acctg_freq, buffer);
-	packstr(msg->ckpt_path, buffer);
+	packstr(msg->ckpt_dir, buffer);
+	packstr(msg->restart_dir, buffer);
 }
 
 static int
@@ -3190,18 +3845,13 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 	safe_unpack32(&msg->uid, buffer);
 	safe_unpack32(&msg->gid, buffer);
 	safe_unpack32(&msg->job_mem, buffer);
-	safe_unpack32(&msg->task_mem, buffer);
 
 	safe_unpack32(&msg->nnodes, buffer);
 	safe_unpack16(&msg->max_sockets, buffer);
 	safe_unpack16(&msg->max_cores, buffer);
 	safe_unpack16(&msg->max_threads, buffer);
 	safe_unpack16(&msg->cpus_per_task, buffer);
-	safe_unpack16(&msg->ntasks_per_node, buffer);
-	safe_unpack16(&msg->ntasks_per_socket, buffer);
-	safe_unpack16(&msg->ntasks_per_core, buffer);
 	safe_unpack16(&msg->task_dist, buffer);
-	safe_unpack16(&msg->plane_size, buffer);
 
 	if (!(msg->cred = slurm_cred_unpack(buffer)))
 		goto unpack_error;
@@ -3240,6 +3890,7 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 		safe_unpackstr_xmalloc(&msg->efname, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg->ifname, &uint32_tmp, buffer);
 		safe_unpack8(&msg->buffered_stdio, buffer);
+		safe_unpack8(&msg->labelio, buffer);
 		safe_unpack16(&msg->num_io_port, buffer);
 		if (msg->num_io_port > 0) {
 			msg->io_port =
@@ -3268,7 +3919,8 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 	safe_unpack8(&msg->open_mode, buffer);
 	safe_unpack8(&msg->pty, buffer);
 	safe_unpack16(&msg->acctg_freq, buffer);
-	safe_unpackstr_xmalloc(&msg->ckpt_path, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&msg->ckpt_dir, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&msg->restart_dir, &uint32_tmp, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -3327,7 +3979,7 @@ _unpack_cancel_tasks_msg(kill_tasks_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_kill_tasks_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -3335,28 +3987,30 @@ unpack_error:
 static void
 _pack_checkpoint_tasks_msg(checkpoint_tasks_msg_t * msg, Buf buffer)
 {
-	pack32((uint32_t)msg->job_id, buffer);
-	pack32((uint32_t)msg->job_step_id, buffer);
-	pack32((uint32_t)msg->signal, buffer);
-	pack_time((time_t)msg->timestamp, buffer);
+	pack32(msg->job_id, buffer);
+	pack32(msg->job_step_id, buffer);
+	pack_time(msg->timestamp, buffer);
+	packstr(msg->image_dir, buffer);
 }
 
 static int
 _unpack_checkpoint_tasks_msg(checkpoint_tasks_msg_t ** msg_ptr, Buf buffer)
 {
 	checkpoint_tasks_msg_t *msg;
+	uint32_t uint32_tmp;
 
 	msg = xmalloc(sizeof(checkpoint_tasks_msg_t));
 	*msg_ptr = msg;
 
 	safe_unpack32(&msg->job_id, buffer);
 	safe_unpack32(&msg->job_step_id, buffer);
-	safe_unpack32(&msg->signal, buffer);
 	safe_unpack_time(&msg->timestamp, buffer);
+	safe_unpackstr_xmalloc(&msg->image_dir, &uint32_tmp, buffer);
+	
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_checkpoint_tasks_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -3379,7 +4033,7 @@ _unpack_shutdown_msg(shutdown_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_shutdown_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -3420,7 +4074,7 @@ _unpack_job_step_kill_msg(job_step_kill_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_job_step_kill_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -3447,7 +4101,7 @@ _unpack_complete_job_allocation_msg(
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_complete_job_allocation_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -3479,7 +4133,7 @@ _unpack_complete_batch_script_msg(
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_complete_batch_script_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -3513,7 +4167,7 @@ _unpack_stat_jobacct_msg(stat_jobacct_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_stat_jobacct_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 
@@ -3541,7 +4195,7 @@ _unpack_job_step_id_msg(job_step_id_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_job_step_id_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 
@@ -3578,7 +4232,7 @@ _unpack_step_complete_msg(step_complete_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_step_complete_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -3604,7 +4258,7 @@ _unpack_job_info_request_msg(job_info_request_msg_t** msg,
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(job_info);
+	slurm_free_job_info_request_msg(job_info);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -3628,7 +4282,7 @@ _unpack_node_select_info_req_msg(node_info_select_request_msg_t **msg,
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(node_sel_info);
+	slurm_free_node_select_msg(node_sel_info);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -3657,7 +4311,7 @@ _unpack_job_step_info_req_msg(job_step_info_request_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(job_step_info);
+	slurm_free_job_step_info_request_msg(job_step_info);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -3682,7 +4336,7 @@ _unpack_node_info_request_msg(node_info_request_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(node_info);
+	slurm_free_node_info_request_msg(node_info);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -3707,7 +4361,30 @@ _unpack_part_info_request_msg(part_info_request_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(part_info);
+	slurm_free_part_info_request_msg(part_info);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+static void
+_pack_resv_info_request_msg(resv_info_request_msg_t * msg, Buf buffer)
+{
+	pack_time(msg->last_update, buffer);
+}
+
+static int
+_unpack_resv_info_request_msg(resv_info_request_msg_t ** msg, Buf buffer)
+{
+	resv_info_request_msg_t* resv_info;
+
+	resv_info = xmalloc(sizeof(resv_info_request_msg_t));
+	*msg = resv_info;
+
+	safe_unpack_time(&resv_info->last_update, buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_resv_info_request_msg(resv_info);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -3799,14 +4476,22 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer)
 	pack8(msg->overcommit, buffer);
 
 	pack16(msg->acctg_freq,     buffer);
-	pack16(msg->num_cpu_groups, buffer);
+	pack16(msg->cpu_bind_type,  buffer);
+	pack16(msg->cpus_per_task,  buffer);
+	pack16(msg->restart_cnt,    buffer);
 
-	pack32_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
-	pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
+	pack32(msg->num_cpu_groups, buffer);
+	if (msg->num_cpu_groups) {
+		pack16_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
+		pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
+	}
 
-	packstr(msg->nodes, buffer);
-	packstr(msg->script, buffer);
+	packstr(msg->cpu_bind, buffer);
+	packstr(msg->nodes,    buffer);
+	packstr(msg->script,   buffer);
 	packstr(msg->work_dir, buffer);
+	packstr(msg->ckpt_dir, buffer);
+	packstr(msg->restart_dir, buffer);
 
 	packstr(msg->err, buffer);
 	packstr(msg->in, buffer);
@@ -3845,22 +4530,28 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer)
 	safe_unpack8(&launch_msg_ptr->overcommit, buffer);
 
 	safe_unpack16(&launch_msg_ptr->acctg_freq,     buffer);
-	safe_unpack16(&launch_msg_ptr->num_cpu_groups, buffer);
+	safe_unpack16(&launch_msg_ptr->cpu_bind_type,  buffer);
+	safe_unpack16(&launch_msg_ptr->cpus_per_task,  buffer);
+	safe_unpack16(&launch_msg_ptr->restart_cnt,    buffer);
+
+	safe_unpack32(&launch_msg_ptr->num_cpu_groups, buffer);
+	if (launch_msg_ptr->num_cpu_groups) {
+		safe_unpack16_array(&(launch_msg_ptr->cpus_per_node), 
+				    &uint32_tmp, buffer);
+		if (launch_msg_ptr->num_cpu_groups != uint32_tmp)
+			goto unpack_error;
+		safe_unpack32_array(&(launch_msg_ptr->cpu_count_reps), 
+				    &uint32_tmp, buffer);
+		if (launch_msg_ptr->num_cpu_groups != uint32_tmp)
+			goto unpack_error;
+	}
 
-	safe_unpack32_array((uint32_t **) &(launch_msg_ptr->cpus_per_node), 
-			    &uint32_tmp,
-			    buffer);
-	if (launch_msg_ptr->num_cpu_groups != uint32_tmp)
-		goto unpack_error;
-	safe_unpack32_array((uint32_t **) &(launch_msg_ptr->cpu_count_reps), 
-			    &uint32_tmp,
-			    buffer);
-	if (launch_msg_ptr->num_cpu_groups != uint32_tmp)
-		goto unpack_error;
-	
+	safe_unpackstr_xmalloc(&launch_msg_ptr->cpu_bind, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&launch_msg_ptr->nodes,    &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&launch_msg_ptr->script,   &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&launch_msg_ptr->work_dir, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&launch_msg_ptr->ckpt_dir, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&launch_msg_ptr->restart_dir, &uint32_tmp, buffer);
 
 	safe_unpackstr_xmalloc(&launch_msg_ptr->err, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&launch_msg_ptr->in,  &uint32_tmp, buffer);
@@ -3914,7 +4605,7 @@ _unpack_job_id_request_msg(job_id_request_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr);
+	slurm_free_job_id_request_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -3944,7 +4635,7 @@ _unpack_job_id_response_msg(job_id_response_msg_t ** msg, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(tmp_ptr);
+	slurm_free_job_id_response_msg(tmp_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -3974,8 +4665,8 @@ _unpack_srun_exec_msg(srun_exec_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
+	slurm_free_srun_exec_msg(msg);
 	*msg_ptr = NULL;
-	xfree(msg);
 	return SLURM_ERROR;
 }
 
@@ -4002,8 +4693,8 @@ _unpack_srun_ping_msg(srun_ping_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
+	slurm_free_srun_ping_msg(msg);
 	*msg_ptr = NULL;
-	xfree(msg);
 	return SLURM_ERROR;
 }
 
@@ -4012,8 +4703,8 @@ _pack_srun_node_fail_msg(srun_node_fail_msg_t * msg, Buf buffer)
 {
 	xassert ( msg != NULL );
 
-	pack32((uint32_t)msg->job_id  , buffer ) ;
-	pack32((uint32_t)msg->step_id , buffer ) ;
+	pack32(msg->job_id  , buffer ) ;
+	pack32(msg->step_id , buffer ) ;
 	packstr(msg->nodelist, buffer ) ;
 }
 
@@ -4034,9 +4725,40 @@ _unpack_srun_node_fail_msg(srun_node_fail_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
+	slurm_free_srun_node_fail_msg(msg);
+	*msg_ptr = NULL;
+	return SLURM_ERROR;
+}
+
+static void 
+_pack_srun_step_missing_msg(srun_step_missing_msg_t * msg, Buf buffer)
+{
+	xassert ( msg != NULL );
+
+	pack32(msg->job_id  , buffer ) ;
+	pack32(msg->step_id , buffer ) ;
+	packstr(msg->nodelist, buffer ) ;
+}
+
+static int 
+_unpack_srun_step_missing_msg(srun_step_missing_msg_t ** msg_ptr, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	srun_step_missing_msg_t * msg;
+	xassert ( msg_ptr != NULL );
+
+	msg = xmalloc ( sizeof (srun_step_missing_msg_t) ) ;
+	*msg_ptr = msg;
+
+	safe_unpack32(&msg->job_id  , buffer ) ;
+	safe_unpack32(&msg->step_id , buffer ) ;
+	safe_unpackstr_xmalloc ( & msg->nodelist, &uint32_tmp, buffer);
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_srun_step_missing_msg(msg);
 	*msg_ptr = NULL;
-	xfree( msg->nodelist );
-	xfree( msg );
 	return SLURM_ERROR;
 }
 
@@ -4062,7 +4784,7 @@ _unpack_job_ready_msg(job_id_msg_t ** msg_ptr, Buf buffer)
 
 unpack_error:
 	*msg_ptr = NULL;
-	xfree(msg);
+	slurm_free_job_id_msg(msg);
 	return SLURM_ERROR;
 }
 
@@ -4092,7 +4814,7 @@ _unpack_srun_timeout_msg(srun_timeout_msg_t ** msg_ptr, Buf buffer)
 
 unpack_error:
 	*msg_ptr = NULL;
-	xfree(msg);
+	slurm_free_srun_timeout_msg(msg);
 	return SLURM_ERROR;
 }
 
@@ -4120,6 +4842,8 @@ _unpack_srun_user_msg(srun_user_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
+	slurm_free_srun_user_msg(msg_user);
+	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
 
@@ -4145,7 +4869,7 @@ static int  _unpack_suspend_msg(suspend_msg_t **msg_ptr, Buf buffer)
 
 unpack_error:
 	*msg_ptr = NULL;
-	xfree(msg);
+	slurm_free_suspend_msg(msg);
 	return SLURM_ERROR;
 }
 
@@ -4155,16 +4879,18 @@ _pack_checkpoint_msg(checkpoint_msg_t *msg, Buf buffer)
 {
 	xassert ( msg != NULL );
 
-	pack16((uint16_t)msg->op,      buffer ) ;
-	pack16((uint16_t)msg->data,    buffer ) ;
-	pack32((uint32_t)msg->job_id,  buffer ) ;
-	pack32((uint32_t)msg->step_id, buffer ) ;
+	pack16(msg->op,      buffer ) ;
+	pack16(msg->data,    buffer ) ;
+	pack32(msg->job_id,  buffer ) ;
+	pack32(msg->step_id, buffer ) ;
+	packstr((char *)msg->image_dir, buffer ) ;
 }
 
 static int
 _unpack_checkpoint_msg(checkpoint_msg_t **msg_ptr, Buf buffer)
 {
 	checkpoint_msg_t * msg;
+	uint32_t uint32_tmp;
 	xassert ( msg_ptr != NULL );
 
 	msg = xmalloc ( sizeof (checkpoint_msg_t) ) ;
@@ -4174,11 +4900,12 @@ _unpack_checkpoint_msg(checkpoint_msg_t **msg_ptr, Buf buffer)
 	safe_unpack16(&msg->data, buffer ) ;
 	safe_unpack32(&msg->job_id, buffer ) ;
 	safe_unpack32(&msg->step_id, buffer ) ;
+	safe_unpackstr_xmalloc(&msg->image_dir, &uint32_tmp, buffer ) ;
 	return SLURM_SUCCESS;
 
 unpack_error:
 	*msg_ptr = NULL;
-	xfree(msg);
+	slurm_free_checkpoint_msg(msg);
 	return SLURM_ERROR;
 }
 
@@ -4213,8 +4940,7 @@ _unpack_checkpoint_comp(checkpoint_comp_msg_t **msg_ptr, Buf buffer)
 
 unpack_error:
 	*msg_ptr = NULL;
-	xfree (msg->error_msg);
-	xfree (msg);
+	slurm_free_checkpoint_comp_msg(msg);
 	return SLURM_ERROR;
 }
 
@@ -4251,8 +4977,7 @@ _unpack_checkpoint_task_comp(checkpoint_task_comp_msg_t **msg_ptr, Buf buffer)
 
 unpack_error:
 	*msg_ptr = NULL;
-	xfree (msg->error_msg);
-	xfree (msg);
+	slurm_free_checkpoint_task_comp_msg(msg);
 	return SLURM_ERROR;
 }
 
@@ -4283,7 +5008,7 @@ _unpack_checkpoint_resp_msg(checkpoint_resp_msg_t **msg_ptr, Buf buffer)
 
 unpack_error:
 	*msg_ptr = NULL;
-	xfree(msg);
+	slurm_free_checkpoint_resp_msg(msg);
 	return SLURM_ERROR;
 }
 
@@ -4338,9 +5063,7 @@ static int _unpack_file_bcast(file_bcast_msg_t ** msg_ptr , Buf buffer )
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree( msg -> fname );
-	xfree( msg -> block );
-	xfree( msg );
+	slurm_free_file_bcast_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -4525,7 +5248,7 @@ static int  _unpack_kvs_get(kvs_get_msg_t **msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg);
+	slurm_free_get_kvs_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -4587,7 +5310,7 @@ unpack_multi_core_data (multi_core_data_t **mc_ptr, Buf buffer)
 	*mc_ptr = multi_core;
 	return SLURM_SUCCESS;
 
-  unpack_error:
+unpack_error:
 	xfree(multi_core);
 	return SLURM_ERROR;
 }
@@ -4646,11 +5369,7 @@ static int _unpack_slurmd_status(slurmd_status_t **msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg->hostname);
-	xfree(msg->slurmd_logfile);
-	xfree(msg->step_list);
-	xfree(msg->version);
-	xfree(msg);
+	slurm_free_slurmd_status(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -4681,8 +5400,7 @@ static int  _unpack_job_notify(job_notify_msg_t **msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(msg->message);
-	xfree(msg);
+	slurm_free_job_notify_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -4705,7 +5423,7 @@ _unpack_set_debug_level_msg(set_debug_level_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 	
  unpack_error:
-	xfree(msg);
+	slurm_free_set_debug_level_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -4734,8 +5452,7 @@ _unpack_will_run_response_msg(will_run_response_msg_t ** msg_ptr, Buf buffer)
 	return SLURM_SUCCESS;
 
   unpack_error:
-	xfree(msg->node_list);
-	xfree(msg);
+	slurm_free_will_run_response_msg(msg);
 	*msg_ptr = NULL;
 	return SLURM_ERROR;
 }
@@ -4788,9 +5505,52 @@ static int _unpack_accounting_update_msg(accounting_update_msg_t **msg,
 	return SLURM_SUCCESS;
 	
 unpack_error:
-	if(msg_ptr->update_list)
-		list_destroy(msg_ptr->update_list);
-	xfree(msg_ptr);
+	slurm_free_accounting_update_msg(msg_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+static void _pack_topo_info_msg(topo_info_response_msg_t *msg, Buf buffer)
+{
+	int i;
+
+	pack32(msg->record_count, buffer);
+	for (i=0; i<msg->record_count; i++) {
+		pack16(msg->topo_array[i].level,      buffer);
+		pack32(msg->topo_array[i].link_speed, buffer);
+  		packstr(msg->topo_array[i].name,      buffer);
+  		packstr(msg->topo_array[i].nodes,     buffer);
+  		packstr(msg->topo_array[i].switches,  buffer);
+	}
+}
+
+static int _unpack_topo_info_msg(topo_info_response_msg_t **msg,
+				 Buf buffer)
+{
+	int i = 0;
+	uint32_t uint32_tmp;
+	topo_info_response_msg_t *msg_ptr =
+		xmalloc(sizeof(topo_info_response_msg_t));
+
+	*msg = msg_ptr;
+	safe_unpack32(&msg_ptr->record_count, buffer);
+	msg_ptr->topo_array = xmalloc(sizeof(topo_info_t) * 
+				      msg_ptr->record_count);
+	for (i=0; i<msg_ptr->record_count; i++) {
+		safe_unpack16(&msg_ptr->topo_array[i].level,      buffer);
+		safe_unpack32(&msg_ptr->topo_array[i].link_speed, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->topo_array[i].name, 
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->topo_array[i].nodes, 
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->topo_array[i].switches, 
+				       &uint32_tmp, buffer);
+	}
+	
+	return SLURM_SUCCESS;
+	
+unpack_error:
+	slurm_free_topo_info_msg(msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
diff --git a/src/common/slurm_protocol_pack.h b/src/common/slurm_protocol_pack.h
index aae531ae04367b336038d7062e500fbe14ff6447..67d8439ada433b1224bb7399bdaa2807bb7bc1d2 100644
--- a/src/common/slurm_protocol_pack.h
+++ b/src/common/slurm_protocol_pack.h
@@ -1,13 +1,15 @@
 /****************************************************************************\
  *  slurm_protocol_pack.h - definitions for all pack and unpack functions
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -133,7 +135,7 @@ extern int unpack_msg ( slurm_msg_t * msgi , Buf buffer );
  * IN/OUT buffer - destination of the pack, contains pointers that are 
  *			automatically updated
  */ 
-/* void pack_job_step_info_members( uint32_t job_id, uint16_t step_id,  */
+/* void pack_job_step_info_members( uint32_t job_id, uint32_t step_id,  */
 /* 		uint32_t user_id, uint32_t num_tasks, time_t start_time,  */
 /* 		char *partition, char *nodes, char *name, char *network, */
 /* 		Buf buffer ); */
diff --git a/src/common/slurm_protocol_socket_common.h b/src/common/slurm_protocol_socket_common.h
index 1f0c9be3a83959b7a6bf003223514dab33e85ddb..15909be7c1c1c0ab47d2539cd36a8171075353e1 100644
--- a/src/common/slurm_protocol_socket_common.h
+++ b/src/common/slurm_protocol_socket_common.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_protocol_socket_implementation.c b/src/common/slurm_protocol_socket_implementation.c
index bf1f3190a98ba05e548f31bf33eb74ebef6aa825..087397c0eb8f88396185429c21eb20805aadefae 100644
--- a/src/common/slurm_protocol_socket_implementation.c
+++ b/src/common/slurm_protocol_socket_implementation.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_protocol_util.c b/src/common/slurm_protocol_util.c
index d2d8c4c06df4ae3dc897909728cfde3e072369b5..2fdf61a2a5b164c6db2ff6469082417f5c135802 100644
--- a/src/common/slurm_protocol_util.c
+++ b/src/common/slurm_protocol_util.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -44,6 +45,7 @@
 #include "src/common/slurm_protocol_util.h"
 #include "src/common/log.h"
 #include "src/common/xmalloc.h"
+#include "src/slurmdbd/read_config.h"
 
 /* 
  * check_header_version checks to see that the specified header was sent 
@@ -53,7 +55,11 @@
  */
 int check_header_version(header_t * header)
 {
-	if (header->version != SLURM_PROTOCOL_VERSION)
+	if(slurmdbd_conf) {
+		if (header->version != SLURM_PROTOCOL_VERSION
+		    && header->version != SLURM_1_3_PROTOCOL_VERSION)
+			slurm_seterrno_ret(SLURM_PROTOCOL_VERSION_ERROR);
+	} else if (header->version != SLURM_PROTOCOL_VERSION)
 		slurm_seterrno_ret(SLURM_PROTOCOL_VERSION_ERROR);
 
 	return SLURM_PROTOCOL_SUCCESS;
@@ -70,7 +76,20 @@ void init_header(header_t *header, slurm_msg_t *msg,
 		 uint16_t flags)
 {
 	memset(header, 0, sizeof(header));
-	header->version = SLURM_PROTOCOL_VERSION;
+	/* Since the slurmdbd could talk to a host of different
+	   versions of slurm this needs to be kept current when the
+	   protocol version changes. */
+	if(msg->msg_type == ACCOUNTING_UPDATE_MSG
+	   || msg->msg_type == ACCOUNTING_FIRST_REG) {
+		uint32_t rpc_version =
+			((accounting_update_msg_t *)msg->data)->rpc_version;
+		if(rpc_version < 5)
+			header->version = SLURM_1_3_PROTOCOL_VERSION;
+		else if(rpc_version >= 5)
+			header->version = SLURM_PROTOCOL_VERSION;
+	} else 
+		header->version = SLURM_PROTOCOL_VERSION;
+	
 	header->flags = flags;
 	header->msg_type = msg->msg_type;
 	header->body_length = 0;	/* over-written later */
diff --git a/src/common/slurm_protocol_util.h b/src/common/slurm_protocol_util.h
index b8698de0be465a8c15a9ed9930bb720f591d4a3c..c4385574f7e2fd1e1483616f4945bebae257fe57 100644
--- a/src/common/slurm_protocol_util.h
+++ b/src/common/slurm_protocol_util.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_resource_info.c b/src/common/slurm_resource_info.c
index 0f253018947b4594b8cc4d1a6ac456665e38b765..3e912427fdbbba345239195fe212e58ade1cfc64 100644
--- a/src/common/slurm_resource_info.c
+++ b/src/common/slurm_resource_info.c
@@ -4,10 +4,11 @@
  *****************************************************************************
  *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -42,15 +43,49 @@
 #  include <string.h>
 #endif
 
+#include <ctype.h>
 #include <sys/types.h>
-#include "src/common/log.h"
 #include <slurm/slurm.h>
+
+#include "src/common/log.h"
+#include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_resource_info.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
 
 #if(0)
 #define DEBUG 1
 #endif
 
+/*
+ * First clear all of the bits in "*data" which are set in "clear_mask".
+ * Then set all of the bits in "*data" that are set in "set_mask".
+ */
+static void _clear_then_set(int *data, int clear_mask, int set_mask)
+{
+	*data &= ~clear_mask;
+	*data |= set_mask;
+}
+
+/*
+ * _isvalue
+ * returns 1 is the argument appears to be a value, 0 otherwise
+ */
+static int _isvalue(char *arg) {
+    	if (isdigit(*arg)) {	 /* decimal values and 0x... hex values */
+	    	return 1;
+	}
+
+	while (isxdigit(*arg)) { /* hex values not preceded by 0x */
+		arg++;
+	}
+	if (*arg == ',' || *arg == '\0') { /* end of field or string */
+	    	return 1;
+	}
+
+	return 0;	/* not a value */
+}
+
 /*
  * slurm_get_avail_procs - Get the number of "available" cpus on a node
  *	given this number given the number of cpus_per_task and
@@ -72,7 +107,7 @@
  * IN alloc_cores    - Allocated cores (per socket) count to other jobs
  * IN cr_type        - Consumable Resource type
  *
- * Note: used in both the select/{linear,cons_res} plugins.
+ * Note: currently only used in the select/linear plugin.
  */
 int slurm_get_avail_procs(const uint16_t max_sockets,
 			  const uint16_t max_cores,
@@ -281,22 +316,31 @@ void slurm_sprint_cpu_bind_type(char *str, cpu_bind_type_t cpu_bind_type)
 
 	str[0] = '\0';
 
+	if (cpu_bind_type & CPU_BIND_VERBOSE)
+		strcat(str, "verbose,");
+
 	if (cpu_bind_type & CPU_BIND_TO_THREADS)
 		strcat(str, "threads,");
 	if (cpu_bind_type & CPU_BIND_TO_CORES)
 		strcat(str, "cores,");
 	if (cpu_bind_type & CPU_BIND_TO_SOCKETS)
 		strcat(str, "sockets,");
-	if (cpu_bind_type & CPU_BIND_VERBOSE)
-		strcat(str, "verbose,");
+	if (cpu_bind_type & CPU_BIND_TO_LDOMS)
+		strcat(str, "ldoms,");
 	if (cpu_bind_type & CPU_BIND_NONE)
 		strcat(str, "none,");
 	if (cpu_bind_type & CPU_BIND_RANK)
 		strcat(str, "rank,");
 	if (cpu_bind_type & CPU_BIND_MAP)
-		strcat(str, "mapcpu,");
+		strcat(str, "map_cpu,");
 	if (cpu_bind_type & CPU_BIND_MASK)
-		strcat(str, "maskcpu,");
+		strcat(str, "mask_cpu,");
+	if (cpu_bind_type & CPU_BIND_LDRANK)
+		strcat(str, "rank_ldom,");
+	if (cpu_bind_type & CPU_BIND_LDMAP)
+		strcat(str, "map_ldom,");
+	if (cpu_bind_type & CPU_BIND_LDMASK)
+		strcat(str, "mask_ldom,");
 
 	if (*str) {
 		str[strlen(str)-1] = '\0';	/* remove trailing ',' */
@@ -321,6 +365,7 @@ void slurm_sprint_mem_bind_type(char *str, mem_bind_type_t mem_bind_type)
 
 	if (mem_bind_type & MEM_BIND_VERBOSE)
 		strcat(str, "verbose,");
+
 	if (mem_bind_type & MEM_BIND_NONE)
 		strcat(str, "none,");
 	if (mem_bind_type & MEM_BIND_RANK)
@@ -328,9 +373,9 @@ void slurm_sprint_mem_bind_type(char *str, mem_bind_type_t mem_bind_type)
 	if (mem_bind_type & MEM_BIND_LOCAL)
 		strcat(str, "local,");
 	if (mem_bind_type & MEM_BIND_MAP)
-		strcat(str, "mapmem,");
+		strcat(str, "map_mem,");
 	if (mem_bind_type & MEM_BIND_MASK)
-		strcat(str, "maskmem,");
+		strcat(str, "mask_mem,");
 
 	if (*str) {
 		str[strlen(str)-1] = '\0';	/* remove trailing ',' */
@@ -338,3 +383,347 @@ void slurm_sprint_mem_bind_type(char *str, mem_bind_type_t mem_bind_type)
 	    	strcat(str, "(null type)");	/* no bits set */
 	}
 }
+
+void slurm_print_cpu_bind_help(void)
+{
+	printf(
+"CPU bind options:\n"
+"    --cpu_bind=         Bind tasks to CPUs\n"
+"        q[uiet]         quietly bind before task runs (default)\n"
+"        v[erbose]       verbosely report binding before task runs\n"
+"        no[ne]          don't bind tasks to CPUs (default)\n"
+"        rank            bind by task rank\n"
+"        map_cpu:<list>  specify a CPU ID binding for each task\n"
+"                        where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n"
+"        mask_cpu:<list> specify a CPU ID binding mask for each task\n"
+"                        where <list> is <mask1>,<mask2>,...<maskN>\n"
+"        rank_ldom       bind task by rank to CPUs in a NUMA locality domain\n"
+"        map_ldom:<list> specify a NUMA locality domain ID for each task\n"
+"                        where <list> is <ldom1>,<ldom2>,...<ldomN>\n"
+"        mask_ldom:<list>specify a NUMA locality domain ID mask for each task\n"
+"                        where <list> is <mask1>,<mask2>,...<maskN>\n"
+"        sockets         auto-generated masks bind to sockets\n"
+"        cores           auto-generated masks bind to cores\n"
+"        threads         auto-generated masks bind to threads\n"
+"        ldoms           auto-generated masks bind to NUMA locality domains\n"
+"        help            show this help message\n");
+}
+
+/*
+ * verify cpu_bind arguments
+ *
+ * we support different launch policy names
+ * we also allow a verbose setting to be specified
+ *     --cpu_bind=threads
+ *     --cpu_bind=cores
+ *     --cpu_bind=sockets
+ *     --cpu_bind=v
+ *     --cpu_bind=rank,v
+ *     --cpu_bind=rank
+ *     --cpu_bind={MAP_CPU|MASK_CPU}:0,1,2,3,4
+ *
+ *
+ * returns -1 on error, 0 otherwise
+ */
+int slurm_verify_cpu_bind(const char *arg, char **cpu_bind, 
+			  cpu_bind_type_t *flags)
+{
+	char *buf, *p, *tok;
+	int bind_bits =
+		CPU_BIND_NONE|CPU_BIND_RANK|CPU_BIND_MAP|CPU_BIND_MASK;
+	int bind_to_bits =
+		CPU_BIND_TO_SOCKETS|CPU_BIND_TO_CORES|CPU_BIND_TO_THREADS;
+	uint16_t task_plugin_param = slurm_get_task_plugin_param();
+
+	bind_bits    |= CPU_BIND_LDRANK|CPU_BIND_LDMAP|CPU_BIND_LDMASK;
+	bind_to_bits |= CPU_BIND_TO_LDOMS;
+
+	if (arg == NULL) {
+		if ((*flags != 0) || 		/* already set values */
+		    (task_plugin_param == 0))	/* no system defaults */
+			return 0;
+
+		/* set system defaults */
+		xfree(*cpu_bind);
+		if (task_plugin_param & CPU_BIND_NONE)
+			*flags = CPU_BIND_NONE;
+		else if (task_plugin_param & CPU_BIND_TO_SOCKETS)
+			*flags = CPU_BIND_TO_SOCKETS;
+		else if (task_plugin_param & CPU_BIND_TO_CORES)
+			*flags = CPU_BIND_TO_CORES;
+		else if (task_plugin_param & CPU_BIND_TO_THREADS)
+			*flags |= CPU_BIND_TO_THREADS;
+		else if (task_plugin_param & CPU_BIND_TO_LDOMS)
+			*flags |= CPU_BIND_TO_LDOMS;
+		if (task_plugin_param & CPU_BIND_VERBOSE)
+			*flags |= CPU_BIND_VERBOSE;
+	    	return 0;
+	}
+
+	/* Start with system default verbose flag (if set) */
+	if (task_plugin_param & CPU_BIND_VERBOSE)
+		*flags |= CPU_BIND_VERBOSE;
+
+    	buf = xstrdup(arg);
+    	p = buf;
+	/* change all ',' delimiters not followed by a digit to ';'  */
+	/* simplifies parsing tokens while keeping map/mask together */
+	while (p[0] != '\0') {
+	    	if ((p[0] == ',') && (!_isvalue(&(p[1]))))
+			p[0] = ';';
+		p++;
+	}
+
+	p = buf;
+	while ((tok = strsep(&p, ";"))) {
+		if (strcasecmp(tok, "help") == 0) {
+			slurm_print_cpu_bind_help();
+			return 1;
+		} else if ((strcasecmp(tok, "q") == 0) ||
+			   (strcasecmp(tok, "quiet") == 0)) {
+		        *flags &= ~CPU_BIND_VERBOSE;
+		} else if ((strcasecmp(tok, "v") == 0) ||
+			   (strcasecmp(tok, "verbose") == 0)) {
+		        *flags |= CPU_BIND_VERBOSE;
+		} else if ((strcasecmp(tok, "no") == 0) ||
+			   (strcasecmp(tok, "none") == 0)) {
+			_clear_then_set((int *)flags, bind_bits, CPU_BIND_NONE);
+			xfree(*cpu_bind);
+		} else if (strcasecmp(tok, "rank") == 0) {
+			_clear_then_set((int *)flags, bind_bits, CPU_BIND_RANK);
+			xfree(*cpu_bind);
+		} else if ((strncasecmp(tok, "map_cpu", 7) == 0) ||
+		           (strncasecmp(tok, "mapcpu", 6) == 0)) {
+			char *list;
+			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");
+			_clear_then_set((int *)flags, bind_bits, CPU_BIND_MAP);
+			xfree(*cpu_bind);
+			if (list && *list) {
+				*cpu_bind = xstrdup(list);
+			} else {
+				error("missing list for \"--cpu_bind="
+				      "map_cpu:<list>\"");
+				xfree(buf);
+				return 1;
+			}
+		} else if ((strncasecmp(tok, "mask_cpu", 8) == 0) ||
+		           (strncasecmp(tok, "maskcpu", 7) == 0)) {
+			char *list;
+			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");
+			_clear_then_set((int *)flags, bind_bits, CPU_BIND_MASK);
+			xfree(*cpu_bind);
+			if (list && *list) {
+				*cpu_bind = xstrdup(list);
+			} else {
+				error("missing list for \"--cpu_bind="
+				      "mask_cpu:<list>\"");
+				xfree(buf);
+				return -1;
+			}
+		} else if (strcasecmp(tok, "rank_ldom") == 0) {
+			_clear_then_set((int *)flags, bind_bits,
+					CPU_BIND_LDRANK);
+			xfree(*cpu_bind);
+		} else if ((strncasecmp(tok, "map_ldom", 8) == 0) ||
+		           (strncasecmp(tok, "mapldom", 7) == 0)) {
+			char *list;
+			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");
+			_clear_then_set((int *)flags, bind_bits,
+					CPU_BIND_LDMAP);
+			xfree(*cpu_bind);
+			if (list && *list) {
+				*cpu_bind = xstrdup(list);
+			} else {
+				error("missing list for \"--cpu_bind="
+				      "map_ldom:<list>\"");
+				xfree(buf);
+				return 1;
+			}
+		} else if ((strncasecmp(tok, "mask_ldom", 9) == 0) ||
+		           (strncasecmp(tok, "maskldom", 8) == 0)) {
+			char *list;
+			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");
+			_clear_then_set((int *)flags, bind_bits,
+					CPU_BIND_LDMASK);
+			xfree(*cpu_bind);
+			if (list && *list) {
+				*cpu_bind = xstrdup(list);
+			} else {
+				error("missing list for \"--cpu_bind="
+				      "mask_ldom:<list>\"");
+				xfree(buf);
+				return -1;
+			}
+		} else if ((strcasecmp(tok, "socket") == 0) ||
+		           (strcasecmp(tok, "sockets") == 0)) {
+			if (task_plugin_param & 
+			    (CPU_BIND_NONE | CPU_BIND_TO_CORES | 
+			     CPU_BIND_TO_THREADS | CPU_BIND_TO_LDOMS)) {
+				error("--cpu_bind=sockets incompatable with "
+				      "TaskPluginParam configuration "
+				      "parameter");
+				return -1;
+			}
+			_clear_then_set((int *)flags, bind_to_bits,
+				       CPU_BIND_TO_SOCKETS);
+		} else if ((strcasecmp(tok, "core") == 0) ||
+		           (strcasecmp(tok, "cores") == 0)) {
+			if (task_plugin_param & 
+			    (CPU_BIND_NONE | CPU_BIND_TO_SOCKETS | 
+			     CPU_BIND_TO_THREADS | CPU_BIND_TO_LDOMS)) {
+				error("--cpu_bind=cores incompatable with "
+				      "TaskPluginParam configuration "
+				      "parameter");
+				return -1;
+			}
+			_clear_then_set((int *)flags, bind_to_bits,
+				       CPU_BIND_TO_CORES);
+		} else if ((strcasecmp(tok, "thread") == 0) ||
+		           (strcasecmp(tok, "threads") == 0)) {
+			if (task_plugin_param & 
+			    (CPU_BIND_NONE | CPU_BIND_TO_SOCKETS | 
+			     CPU_BIND_TO_CORES | CPU_BIND_TO_LDOMS)) {
+				error("--cpu_bind=threads incompatable with "
+				      "TaskPluginParam configuration "
+				      "parameter");
+				return -1;
+			}
+			_clear_then_set((int *)flags, bind_to_bits,
+				       CPU_BIND_TO_THREADS);
+		} else if ((strcasecmp(tok, "ldom") == 0) ||
+		           (strcasecmp(tok, "ldoms") == 0)) {
+			if (task_plugin_param & 
+			    (CPU_BIND_NONE | CPU_BIND_TO_SOCKETS | 
+			     CPU_BIND_TO_CORES | CPU_BIND_TO_THREADS)) {
+				error("--cpu_bind=threads incompatable with "
+				      "TaskPluginParam configuration "
+				      "parameter");
+				return -1;
+			}
+			_clear_then_set((int *)flags, bind_to_bits,
+				       CPU_BIND_TO_LDOMS);
+		} else {
+			error("unrecognized --cpu_bind argument \"%s\"", tok);
+			xfree(buf);
+			return -1;
+		}
+	}
+	xfree(buf);
+
+	return 0;
+}
+
+void slurm_print_mem_bind_help(void)
+{
+			printf(
+"Memory bind options:\n"
+"    --mem_bind=         Bind memory to locality domains (ldom)\n"
+"        q[uiet]         quietly bind before task runs (default)\n"
+"        v[erbose]       verbosely report binding before task runs\n"
+"        no[ne]          don't bind tasks to memory (default)\n"
+"        rank            bind by task rank\n"
+"        local           bind to memory local to processor\n"
+"        map_mem:<list>  specify a memory binding for each task\n"
+"                        where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n"
+"        mask_mem:<list> specify a memory binding mask for each tasks\n"
+"                        where <list> is <mask1>,<mask2>,...<maskN>\n"
+"        help            show this help message\n");
+}
+
+/*
+ * verify mem_bind arguments
+ *
+ * we support different memory binding names
+ * we also allow a verbose setting to be specified
+ *     --mem_bind=v
+ *     --mem_bind=rank,v
+ *     --mem_bind=rank
+ *     --mem_bind={MAP_MEM|MASK_MEM}:0,1,2,3,4
+ *
+ * returns -1 on error, 0 otherwise
+ */
+int slurm_verify_mem_bind(const char *arg, char **mem_bind, 
+			  mem_bind_type_t *flags)
+{
+	char *buf, *p, *tok;
+	int bind_bits = MEM_BIND_NONE|MEM_BIND_RANK|MEM_BIND_LOCAL|
+		MEM_BIND_MAP|MEM_BIND_MASK;
+
+	if (arg == NULL) {
+	    	return 0;
+	}
+
+    	buf = xstrdup(arg);
+    	p = buf;
+	/* change all ',' delimiters not followed by a digit to ';'  */
+	/* simplifies parsing tokens while keeping map/mask together */
+	while (p[0] != '\0') {
+	    	if ((p[0] == ',') && (!_isvalue(&(p[1]))))
+			p[0] = ';';
+		p++;
+	}
+
+	p = buf;
+	while ((tok = strsep(&p, ";"))) {
+		if (strcasecmp(tok, "help") == 0) {
+			slurm_print_mem_bind_help();
+			return 1;
+			
+		} else if ((strcasecmp(tok, "q") == 0) ||
+			   (strcasecmp(tok, "quiet") == 0)) {
+		        *flags &= ~MEM_BIND_VERBOSE;
+		} else if ((strcasecmp(tok, "v") == 0) ||
+			   (strcasecmp(tok, "verbose") == 0)) {
+		        *flags |= MEM_BIND_VERBOSE;
+		} else if ((strcasecmp(tok, "no") == 0) ||
+			   (strcasecmp(tok, "none") == 0)) {
+			_clear_then_set((int *)flags, bind_bits, MEM_BIND_NONE);
+			xfree(*mem_bind);
+		} else if (strcasecmp(tok, "rank") == 0) {
+			_clear_then_set((int *)flags, bind_bits, MEM_BIND_RANK);
+			xfree(*mem_bind);
+		} else if (strcasecmp(tok, "local") == 0) {
+			_clear_then_set((int *)flags, bind_bits, MEM_BIND_LOCAL);
+			xfree(*mem_bind);
+		} else if ((strncasecmp(tok, "map_mem", 7) == 0) ||
+		           (strncasecmp(tok, "mapmem", 6) == 0)) {
+			char *list;
+			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");
+			_clear_then_set((int *)flags, bind_bits, MEM_BIND_MAP);
+			xfree(*mem_bind);
+			if (list && *list) {
+				*mem_bind = xstrdup(list);
+			} else {
+				error("missing list for \"--mem_bind=map_mem:<list>\"");
+				xfree(buf);
+				return 1;
+			}
+		} else if ((strncasecmp(tok, "mask_mem", 8) == 0) ||
+		           (strncasecmp(tok, "maskmem", 7) == 0)) {
+			char *list;
+			list = strsep(&tok, ":=");
+			list = strsep(&tok, ":=");
+			_clear_then_set((int *)flags, bind_bits, MEM_BIND_MASK);
+			xfree(*mem_bind);
+			if (list && *list) {
+				*mem_bind = xstrdup(list);
+			} else {
+				error("missing list for \"--mem_bind=mask_mem:<list>\"");
+				xfree(buf);
+				return 1;
+			}
+		} else {
+			error("unrecognized --mem_bind argument \"%s\"", tok);
+			xfree(buf);
+			return 1;
+		}
+	}
+
+	xfree(buf);
+	return 0;
+}
diff --git a/src/common/slurm_resource_info.h b/src/common/slurm_resource_info.h
index 25733ff8e54207fd21709532072228dae86f24b1..867f4b9d53125d2b8fdb5c4fa18d8450537f5312 100644
--- a/src/common/slurm_resource_info.h
+++ b/src/common/slurm_resource_info.h
@@ -4,10 +4,11 @@
  *****************************************************************************
  *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -66,7 +67,15 @@ int slurm_get_avail_procs(const uint16_t mxsockets,
 			  const select_type_plugin_info_t cr_type,
 			  uint32_t job_id, char *name);
 
+void slurm_print_cpu_bind_help(void);
+void slurm_print_mem_bind_help(void);
+
 void slurm_sprint_cpu_bind_type(char *str, cpu_bind_type_t cpu_bind_type);
 void slurm_sprint_mem_bind_type(char *str, mem_bind_type_t mem_bind_type);
 
+int slurm_verify_cpu_bind(const char *arg, char **cpu_bind, 
+			  cpu_bind_type_t *flags);
+int slurm_verify_mem_bind(const char *arg, char **mem_bind, 
+			  mem_bind_type_t *flags);
+
 #endif /* !_RES_INFO_H */
diff --git a/src/common/slurm_rlimits_info.c b/src/common/slurm_rlimits_info.c
index b79b872ac31aa44142e010499ffc218a29ef8323..43cea165beb3e48a0d1f94df8241eebd5498050b 100644
--- a/src/common/slurm_rlimits_info.c
+++ b/src/common/slurm_rlimits_info.c
@@ -6,7 +6,8 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_rlimits_info.h b/src/common/slurm_rlimits_info.h
index 1cffac395f5027b53199778f642d8be4179e8257..2abac295c1d91c7151c98e6e155e5b08e2c89695 100644
--- a/src/common/slurm_rlimits_info.h
+++ b/src/common/slurm_rlimits_info.h
@@ -6,7 +6,8 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_selecttype_info.c b/src/common/slurm_selecttype_info.c
index baa5213f460f73d3ce264e5be682c600cf940e55..937fbd48eab42df685ca4c08ce25bc3673c494a1 100644
--- a/src/common/slurm_selecttype_info.c
+++ b/src/common/slurm_selecttype_info.c
@@ -4,10 +4,11 @@
  *
  *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_selecttype_info.h b/src/common/slurm_selecttype_info.h
index f602ca13674ebdb891e5f187036d1d640212a713..2e410e91548b227ad1b1d23a9ac159affe0719ef 100644
--- a/src/common/slurm_selecttype_info.h
+++ b/src/common/slurm_selecttype_info.h
@@ -4,10 +4,11 @@
  *
  *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurm_step_layout.c b/src/common/slurm_step_layout.c
index 24784589fd3b35ef8923fbd7fd652c14edbcae3c..db7390cbe484cbf508e4dfca01ea4ded0c5bd1b8 100644
--- a/src/common/slurm_step_layout.c
+++ b/src/common/slurm_step_layout.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *  Written by Chris Holmes, <cholmes@hp.com>, who borrowed heavily
  *  from other parts of SLURM.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -55,16 +56,16 @@
 /* build maps for task layout on nodes */
 static int _init_task_layout(slurm_step_layout_t *step_layout, 
 			     const char *arbitrary_nodes, 
-			     uint32_t *cpus_per_node, uint32_t *cpu_count_reps,
+			     uint16_t *cpus_per_node, uint32_t *cpu_count_reps,
 			     uint16_t cpus_per_task,
 			     uint16_t task_dist, uint16_t plane_size);
 
 static int _task_layout_block(slurm_step_layout_t *step_layout, 
-			      uint32_t *cpus);
+			      uint16_t *cpus);
 static int _task_layout_cyclic(slurm_step_layout_t *step_layout, 
-			       uint32_t *cpus);
+			       uint16_t *cpus);
 static int _task_layout_plane(slurm_step_layout_t *step_layout,
-			       uint32_t *cpus);
+			       uint16_t *cpus);
 #ifndef HAVE_FRONT_END
 static int _task_layout_hostfile(slurm_step_layout_t *step_layout,
 				 const char *arbitrary_nodes);
@@ -89,7 +90,7 @@ static int _task_layout_hostfile(slurm_step_layout_t *step_layout,
  */
 slurm_step_layout_t *slurm_step_layout_create(
 	const char *tlist,
-	uint32_t *cpus_per_node, uint32_t *cpu_count_reps, 
+	uint16_t *cpus_per_node, uint32_t *cpu_count_reps, 
 	uint32_t num_hosts, 
 	uint32_t num_tasks,
 	uint16_t cpus_per_task,
@@ -99,10 +100,11 @@ slurm_step_layout_t *slurm_step_layout_create(
 	char *arbitrary_nodes = NULL;
 	slurm_step_layout_t *step_layout = 
 		xmalloc(sizeof(slurm_step_layout_t));
-		
+
+	step_layout->task_dist = task_dist;
 	if(task_dist == SLURM_DIST_ARBITRARY) {
 		hostlist_t hl = NULL;
-		char buf[8192];
+		char buf[65536];
 		/* set the node list for the task layout later if user
 		   supplied could be different that the job allocation */
 		arbitrary_nodes = xstrdup(tlist);
@@ -156,7 +158,7 @@ slurm_step_layout_t *slurm_step_layout_create(
  */
 slurm_step_layout_t *fake_slurm_step_layout_create(
 	const char *tlist,
-	uint32_t *cpus_per_node, 
+	uint16_t *cpus_per_node, 
 	uint32_t *cpu_count_reps,
 	uint32_t node_cnt, 
 	uint32_t task_cnt) 
@@ -165,14 +167,12 @@ slurm_step_layout_t *fake_slurm_step_layout_create(
 	int cpu_cnt = 0, cpu_inx = 0, i, j;
 /* 	char *name = NULL; */
 	hostlist_t hl = NULL;
-	slurm_step_layout_t *step_layout = 
-		xmalloc(sizeof(slurm_step_layout_t));
+	slurm_step_layout_t *step_layout = NULL;
 
-	if(node_cnt <= 0 || (task_cnt <= 0 && !cpus_per_node) || !tlist) {
+	if((node_cnt <= 0) || (task_cnt <= 0 && !cpus_per_node) || !tlist) {
 		error("there is a problem with your fake_step_layout request\n"
 		      "node_cnt = %u, task_cnt = %u, tlist = %s",
 		      node_cnt, task_cnt, tlist);
-		xfree(step_layout);
 		return NULL;
 	}
 
@@ -261,6 +261,7 @@ extern slurm_step_layout_t *slurm_step_layout_copy(
 	layout->node_list = xstrdup(step_layout->node_list);
 	layout->node_cnt = step_layout->node_cnt;
 	layout->task_cnt = step_layout->task_cnt;
+	layout->task_dist = step_layout->task_dist;
 	
 /* 	layout->node_addr = xmalloc(sizeof(slurm_addr) * layout->node_cnt); */
 /* 	memcpy(layout->node_addr, step_layout->node_addr,  */
@@ -293,6 +294,7 @@ extern void pack_slurm_step_layout(slurm_step_layout_t *step_layout,
 	packstr(step_layout->node_list, buffer);
 	pack32(step_layout->node_cnt, buffer);
 	pack32(step_layout->task_cnt, buffer);
+	pack16(step_layout->task_dist, buffer);
 /* 	slurm_pack_slurm_addr_array(step_layout->node_addr,  */
 /* 				    step_layout->node_cnt, buffer); */
 
@@ -323,6 +325,7 @@ extern int unpack_slurm_step_layout(slurm_step_layout_t **layout, Buf buffer)
 	safe_unpackstr_xmalloc(&step_layout->node_list, &uint32_tmp, buffer);
 	safe_unpack32(&step_layout->node_cnt, buffer);
 	safe_unpack32(&step_layout->task_cnt, buffer);
+	safe_unpack16(&step_layout->task_dist, buffer);
 	
 /* 	if (slurm_unpack_slurm_addr_array(&(step_layout->node_addr),  */
 /* 					  &uint32_tmp, buffer)) */
@@ -393,14 +396,14 @@ char *slurm_step_layout_host_name (slurm_step_layout_t *s, int taskid)
 /* build maps for task layout on nodes */
 static int _init_task_layout(slurm_step_layout_t *step_layout,
 			     const char *arbitrary_nodes,
-			     uint32_t *cpus_per_node, uint32_t *cpu_count_reps,
+			     uint16_t *cpus_per_node, uint32_t *cpu_count_reps,
 			     uint16_t cpus_per_task,
 			     uint16_t task_dist, uint16_t plane_size)
 {
 	int cpu_cnt = 0, cpu_inx = 0, i;
 	hostlist_t hl = NULL;
 /* 	char *name = NULL; */
-	uint32_t cpus[step_layout->node_cnt];
+	uint16_t cpus[step_layout->node_cnt];
 
 	if (step_layout->node_cnt == 0)
 		return SLURM_ERROR;
@@ -419,7 +422,7 @@ static int _init_task_layout(slurm_step_layout_t *step_layout,
 
 	hl = hostlist_create(step_layout->node_list);
 	/* make sure the number of nodes we think we have 
-	   is the correct number */
+	 * is the correct number */
 	i = hostlist_count(hl);
 	if(step_layout->node_cnt > i)
 		step_layout->node_cnt = i;
@@ -442,6 +445,12 @@ static int _init_task_layout(slurm_step_layout_t *step_layout,
 /* 		debug2("host %d = %s", i, name); */
 /* 		free(name); */
 		cpus[i] = (cpus_per_node[cpu_inx] / cpus_per_task);
+		if (cpus[i] == 0) {
+			/* this can be a result of a heterogeneous allocation
+			 * (e.g. 4 cpus on one node and 2 on the second with
+			 *  cpus_per_task=3)  */
+			cpus[i] = 1;
+		}
 		//info("got %d cpus", cpus[i]);
 		if ((++cpu_cnt) >= cpu_count_reps[cpu_inx]) {
 			/* move to next record */
@@ -552,7 +561,7 @@ static int _task_layout_hostfile(slurm_step_layout_t *step_layout,
 /* to effectively deal with heterogeneous nodes, we fake a cyclic
  * distribution to figure out how many tasks go on each node and
  * then make those assignments in a block fashion */
-static int _task_layout_block(slurm_step_layout_t *step_layout, uint32_t *cpus)
+static int _task_layout_block(slurm_step_layout_t *step_layout, uint16_t *cpus)
 {
 	int i, j, taskid = 0;
 	bool over_subscribe = false;
@@ -600,7 +609,7 @@ static int _task_layout_block(slurm_step_layout_t *step_layout, uint32_t *cpus)
  *                     12 13 14 15  etc.
  */
 static int _task_layout_cyclic(slurm_step_layout_t *step_layout, 
-			       uint32_t *cpus)
+			       uint16_t *cpus)
 {
 	int i, j, taskid = 0;
 	bool over_subscribe = false;
@@ -647,7 +656,7 @@ static int _task_layout_cyclic(slurm_step_layout_t *step_layout,
  *                     12 13 14 15  etc.
  */
 static int _task_layout_plane(slurm_step_layout_t *step_layout,
-			       uint32_t *cpus)
+			       uint16_t *cpus)
 {
 	int i, j, k, taskid = 0;
 
@@ -704,3 +713,44 @@ static int _task_layout_plane(slurm_step_layout_t *step_layout,
 	
 	return SLURM_SUCCESS;
 }
+
+extern char *slurm_step_layout_type_name(task_dist_states_t task_dist)
+{
+	switch(task_dist) {
+	case SLURM_DIST_CYCLIC:
+		return "Cyclic";
+		break;
+	case SLURM_DIST_BLOCK:	/* distribute tasks filling node by node */
+		return "Block";
+		break;
+	case SLURM_DIST_ARBITRARY:	/* arbitrary task distribution  */
+		return "Arbitrary";
+		break;
+	case SLURM_DIST_PLANE:	/* distribute tasks by filling up
+				   planes of lllp first and then by
+				   going across the nodes See
+				   documentation for more
+				   information */
+		return "Plane";
+		break;
+	case SLURM_DIST_CYCLIC_CYCLIC:/* distribute tasks 1 per node:
+				   round robin: same for lowest
+				   level of logical processor (lllp) */
+		return "CCyclic";
+		break;
+	case SLURM_DIST_CYCLIC_BLOCK: /* cyclic for node and block for lllp  */
+		return "CBlock";
+		break;
+	case SLURM_DIST_BLOCK_CYCLIC: /* block for node and cyclic for lllp  */
+		return "BCyclic";
+		break;
+	case SLURM_DIST_BLOCK_BLOCK:	/* block for node and block for lllp  */
+		return "BBlock";
+		break;
+	case SLURM_NO_LLLP_DIST:	/* No distribution specified for lllp */
+	case SLURM_DIST_UNKNOWN:
+	default:
+		return "Unknown";
+
+	}
+}
diff --git a/src/common/slurm_step_layout.h b/src/common/slurm_step_layout.h
index b8de30b2824e07f4cea08f86911477bfbf3d9aa1..76d942edaf51c9e2ad7896ac2456954f0ac7f9de 100644
--- a/src/common/slurm_step_layout.h
+++ b/src/common/slurm_step_layout.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *  Written by Chris Holmes, <cholmes@hp.com>, who borrowed heavily
  *  from other parts of SLURM.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -62,7 +63,7 @@
  * NOTE: allocates memory that should be xfreed by caller
  */
 extern slurm_step_layout_t *slurm_step_layout_create(const char *tlist,
-						     uint32_t *cpus_per_node, 
+						     uint16_t *cpus_per_node, 
 						     uint32_t *cpu_count_reps,
 						     uint32_t node_cnt, 
 						     uint32_t task_cnt,
@@ -86,7 +87,7 @@ extern slurm_step_layout_t *slurm_step_layout_create(const char *tlist,
  */
 extern slurm_step_layout_t *fake_slurm_step_layout_create(
 	const char *tlist,
-	uint32_t *cpus_per_node, 
+	uint16_t *cpus_per_node, 
 	uint32_t *cpu_count_reps,
 	uint32_t node_cnt, 
 	uint32_t task_cnt);
@@ -107,4 +108,5 @@ extern int slurm_step_layout_destroy(slurm_step_layout_t *step_layout);
 extern int slurm_step_layout_host_id (slurm_step_layout_t *s, int taskid);
 extern char *slurm_step_layout_host_name (slurm_step_layout_t *s, int hostid);
 
+extern char *slurm_step_layout_type_name(task_dist_states_t task_dist);
 #endif /* !_SLURM_STEP_LAYOUT_H */
diff --git a/src/common/slurm_strcasestr.c b/src/common/slurm_strcasestr.c
new file mode 100644
index 0000000000000000000000000000000000000000..f4e34b740ce738158d01329fdaf55ce68cae7ec5
--- /dev/null
+++ b/src/common/slurm_strcasestr.c
@@ -0,0 +1,66 @@
+/*****************************************************************************\
+ *  slurm_strcasestr.h - case insensitve version of strstr()
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by David Bremer <dbremer@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "slurm_strcasestr.h"
+#include <string.h>
+#include <ctype.h>
+
+/* case insensitve version of strstr() */
+char *
+slurm_strcasestr(char *haystack, char *needle)
+{
+	int hay_inx,  hay_size  = strlen(haystack);
+	int need_inx, need_size = strlen(needle);
+	char *hay_ptr = haystack;
+
+	for (hay_inx=0; hay_inx<hay_size; hay_inx++) {
+		for (need_inx=0; need_inx<need_size; need_inx++) {
+			if (tolower((int) hay_ptr[need_inx]) != 
+			    tolower((int) needle [need_inx]))
+				break;		/* mis-match */
+		}
+
+		if (need_inx == need_size)	/* it matched */
+			return hay_ptr;
+		else				/* keep looking */
+			hay_ptr++;
+	}
+
+	return NULL;	/* no match anywhere in string */
+}
+
diff --git a/src/common/slurm_strcasestr.h b/src/common/slurm_strcasestr.h
new file mode 100644
index 0000000000000000000000000000000000000000..79e7309093360eeaa96ccfa5da336a6b1a413bb9
--- /dev/null
+++ b/src/common/slurm_strcasestr.h
@@ -0,0 +1,46 @@
+/*****************************************************************************\
+ *  slurm_strcasestr.h - case insensitve version of strstr()
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by David Bremer <dbremer@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _SLURM_STRCASESTR_H
+#define _SLURM_STRCASESTR_H
+
+/* case insensitve version of strstr().  This exists for our AIX
+   builds which are missing the gnu strcasestr function. */
+char *slurm_strcasestr(char *haystack, char *needle);
+
+#endif
diff --git a/src/common/slurm_xlator.h b/src/common/slurm_xlator.h
index 4513785d1496a8be5b860533fcc6f43b46d7ecee..3a2853884807dbb781706bf4acef5f10a3be9dc8 100644
--- a/src/common/slurm_xlator.h
+++ b/src/common/slurm_xlator.h
@@ -30,10 +30,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c
index 33e0e58195b974a27a1058be300fdb4e579fc79a..b7c38cbe8d898d8802dd26c71817d5f41da33fce 100644
--- a/src/common/slurmdbd_defs.c
+++ b/src/common/slurmdbd_defs.c
@@ -1,13 +1,14 @@
 /****************************************************************************\
  *  slurmdbd_defs.c - functions for use with Slurm DBD RPCs
  *****************************************************************************
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -75,7 +76,7 @@
 #define DBD_MAGIC		0xDEAD3219
 #define MAX_AGENT_QUEUE		10000
 #define MAX_DBD_MSG_LEN		16384
-#define SLURMDBD_TIMEOUT	300	/* Seconds SlurmDBD for response */
+#define SLURMDBD_TIMEOUT	900	/* Seconds SlurmDBD for response */
 
 uint16_t running_cache = 0;
 pthread_mutex_t assoc_cache_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -265,8 +266,8 @@ extern int slurm_send_recv_slurmdbd_msg(uint16_t rpc_version,
 	rc = unpack_slurmdbd_msg(rpc_version, resp, buffer);
 
 	/* check for the rc of the start job message */
-	if (rc == SLURM_SUCCESS && resp->msg_type == DBD_JOB_START_RC) 
-		rc = ((dbd_job_start_rc_msg_t *)resp->data)->return_code;
+	if (rc == SLURM_SUCCESS && resp->msg_type == DBD_ID_RC) 
+		rc = ((dbd_id_rc_msg_t *)resp->data)->return_code;
 	
 	free_buf(buffer);
 	slurm_mutex_unlock(&slurmdbd_lock);
@@ -325,7 +326,7 @@ static void _open_slurmdbd_fd(void)
 	slurm_addr dbd_addr;
 	uint16_t slurmdbd_port;
 	char *   slurmdbd_host;
-
+	bool try_backup = true;
 	if (slurmdbd_fd >= 0) {
 		debug("Attempt to re-open slurmdbd socket");
 		/* clear errno (checked after this for errors) */
@@ -335,13 +336,16 @@ static void _open_slurmdbd_fd(void)
 
 	slurmdbd_host = slurm_get_accounting_storage_host();
 	slurmdbd_port = slurm_get_accounting_storage_port();
-	if (slurmdbd_host == NULL)
+	if (slurmdbd_host == NULL) {
 		slurmdbd_host = xstrdup(DEFAULT_STORAGE_HOST);
-	
+		slurm_set_accounting_storage_host(slurmdbd_host);
+	}
+
 	if (slurmdbd_port == 0) {
 		slurmdbd_port = SLURMDBD_PORT;
 		slurm_set_accounting_storage_port(slurmdbd_port);
 	}
+again:
 	slurm_set_addr(&dbd_addr, slurmdbd_port, slurmdbd_host);
 	if (dbd_addr.sin_port == 0)
 		error("Unable to locate SlurmDBD host %s:%u", 
@@ -349,9 +353,17 @@ static void _open_slurmdbd_fd(void)
 	else {
 		slurmdbd_fd = slurm_open_msg_conn(&dbd_addr);
 
-		if (slurmdbd_fd < 0)
-			error("slurmdbd: slurm_open_msg_conn: %m");
-		else {
+		if (slurmdbd_fd < 0) {
+			debug("slurmdbd: slurm_open_msg_conn to %s:%u: %m",
+			      slurmdbd_host, slurmdbd_port);
+			if(try_backup) {
+				try_backup = false;
+				xfree(slurmdbd_host);
+				if((slurmdbd_host = 
+				    slurm_get_accounting_storage_backup_host()))
+					goto again;			
+			}
+		} else {
 			fd_set_nonblocking(slurmdbd_fd);
 			if (_send_init_msg() != SLURM_SUCCESS)  {
 				error("slurmdbd: Sending DdbInit msg: %m");
@@ -385,11 +397,12 @@ extern Buf pack_slurmdbd_msg(uint16_t rpc_version, slurmdbd_msg_t *req)
 	case DBD_GOT_LIST:
 	case DBD_ADD_QOS:
 	case DBD_GOT_QOS:
+	case DBD_GOT_RESVS:
 	case DBD_ADD_WCKEYS:
 	case DBD_GOT_WCKEYS:
 	case DBD_GOT_TXN:
 	case DBD_GOT_USERS:
-	case DBD_UPDATE_SHARES_USED:
+	case DBD_GOT_CONFIG:
 		slurmdbd_pack_list_msg(
 			rpc_version, req->msg_type, 
 			(dbd_list_msg_t *)req->data, buffer);
@@ -414,6 +427,7 @@ extern Buf pack_slurmdbd_msg(uint16_t rpc_version, slurmdbd_msg_t *req)
 	case DBD_GET_CLUSTERS:
 	case DBD_GET_JOBS_COND:
 	case DBD_GET_QOS:
+	case DBD_GET_RESVS:
 	case DBD_GET_WCKEYS:
 	case DBD_GET_TXN:
 	case DBD_GET_USERS:
@@ -462,10 +476,10 @@ extern Buf pack_slurmdbd_msg(uint16_t rpc_version, slurmdbd_msg_t *req)
 					    (dbd_job_start_msg_t *)req->data, 
 					    buffer);
 		break;
-	case DBD_JOB_START_RC:
-		slurmdbd_pack_job_start_rc_msg(
+	case DBD_ID_RC:
+		slurmdbd_pack_id_rc_msg(
 			rpc_version,
-			(dbd_job_start_rc_msg_t *)req->data, buffer);
+			(dbd_id_rc_msg_t *)req->data, buffer);
 		break;		
 	case DBD_JOB_SUSPEND:
 		slurmdbd_pack_job_suspend_msg(
@@ -509,6 +523,16 @@ extern Buf pack_slurmdbd_msg(uint16_t rpc_version, slurmdbd_msg_t *req)
 					     (dbd_roll_usage_msg_t *)
 					     req->data, buffer);
 		break;
+	case DBD_ADD_RESV:
+	case DBD_REMOVE_RESV:
+	case DBD_MODIFY_RESV:
+		slurmdbd_pack_rec_msg(
+			rpc_version, req->msg_type,
+			(dbd_rec_msg_t *)req->data, buffer);
+		break;
+	case DBD_GET_CONFIG:
+		/* No message to pack */
+		break;
 	default:
 		error("slurmdbd: Invalid message type pack %u(%s:%u)",
 		      req->msg_type,
@@ -539,11 +563,12 @@ extern int unpack_slurmdbd_msg(uint16_t rpc_version,
 	case DBD_GOT_LIST:
 	case DBD_ADD_QOS:
 	case DBD_GOT_QOS:
+	case DBD_GOT_RESVS:
 	case DBD_ADD_WCKEYS:
 	case DBD_GOT_WCKEYS:
 	case DBD_GOT_TXN:
 	case DBD_GOT_USERS:
-	case DBD_UPDATE_SHARES_USED:
+	case DBD_GOT_CONFIG:
 		rc = slurmdbd_unpack_list_msg(
 			rpc_version, resp->msg_type,
 			(dbd_list_msg_t **)&resp->data, buffer);
@@ -569,6 +594,7 @@ extern int unpack_slurmdbd_msg(uint16_t rpc_version,
 	case DBD_GET_JOBS_COND:
 	case DBD_GET_USERS:
 	case DBD_GET_QOS:
+	case DBD_GET_RESVS:
 	case DBD_GET_WCKEYS:
 	case DBD_GET_TXN:
 	case DBD_REMOVE_ACCOUNTS:
@@ -619,10 +645,10 @@ extern int unpack_slurmdbd_msg(uint16_t rpc_version,
 			rpc_version,
 			(dbd_job_start_msg_t **)&resp->data, buffer);
 		break;
-	case DBD_JOB_START_RC:
-		rc = slurmdbd_unpack_job_start_rc_msg(
+	case DBD_ID_RC:
+		rc = slurmdbd_unpack_id_rc_msg(
 			rpc_version,
-			(dbd_job_start_rc_msg_t **)&resp->data, buffer);
+			(dbd_id_rc_msg_t **)&resp->data, buffer);
 		break;		
 	case DBD_JOB_SUSPEND:
 		rc = slurmdbd_unpack_job_suspend_msg(
@@ -668,6 +694,16 @@ extern int unpack_slurmdbd_msg(uint16_t rpc_version,
 			rpc_version,
 			(dbd_roll_usage_msg_t **)&resp->data, buffer);
 		break;
+	case DBD_ADD_RESV:
+	case DBD_REMOVE_RESV:
+	case DBD_MODIFY_RESV:
+		rc = slurmdbd_unpack_rec_msg(
+			rpc_version, resp->msg_type,
+			(dbd_rec_msg_t **)&resp->data, buffer);
+		break;
+	case DBD_GET_CONFIG:
+		/* No message to unpack */
+		break;
 	default:
 		error("slurmdbd: Invalid message type unpack %u(%s)",
 		      resp->msg_type,
@@ -737,8 +773,8 @@ extern slurmdbd_msg_type_t str_2_slurmdbd_msg_type(char *msg_type)
 		return DBD_JOB_COMPLETE;
 	} else if(!strcasecmp(msg_type, "Job Start")) {
 		return DBD_JOB_START;
-	} else if(!strcasecmp(msg_type, "Job Start RC")) {
-		return DBD_JOB_START_RC;
+	} else if(!strcasecmp(msg_type, "ID RC")) {
+		return DBD_ID_RC;
 	} else if(!strcasecmp(msg_type, "Job Suspend")) {
 		return DBD_JOB_SUSPEND;
 	} else if(!strcasecmp(msg_type, "Modify Accounts")) {
@@ -775,8 +811,6 @@ extern slurmdbd_msg_type_t str_2_slurmdbd_msg_type(char *msg_type)
 		return DBD_STEP_COMPLETE;
 	} else if(!strcasecmp(msg_type, "Step Start")) {
 		return DBD_STEP_START;
-	} else if(!strcasecmp(msg_type, "Update Shares Used")) {
-		return DBD_UPDATE_SHARES_USED;
 	} else if(!strcasecmp(msg_type, "Get Jobs Conditional")) {
 		return DBD_GET_JOBS_COND;
 	} else if(!strcasecmp(msg_type, "Get Transations")) {
@@ -803,6 +837,20 @@ extern slurmdbd_msg_type_t str_2_slurmdbd_msg_type(char *msg_type)
 		return DBD_GET_WCKEY_USAGE;
 	} else if(!strcasecmp(msg_type, "Got WCKey Usage")) {
 		return DBD_GOT_WCKEY_USAGE;
+	} else if(!strcasecmp(msg_type, "Add Reservation")) {
+		return DBD_ADD_RESV;
+	} else if(!strcasecmp(msg_type, "Remove Reservation")) {
+		return DBD_REMOVE_RESV;
+	} else if(!strcasecmp(msg_type, "Modify Reservation")) {
+		return DBD_MODIFY_RESV;
+	} else if(!strcasecmp(msg_type, "Get Reservations")) {
+		return DBD_GET_RESVS;
+	} else if(!strcasecmp(msg_type, "Got Reservations")) {
+		return DBD_GOT_RESVS;
+	} else if(!strcasecmp(msg_type, "Get Config")) {
+		return DBD_GET_CONFIG;
+	} else if(!strcasecmp(msg_type, "Got Config")) {
+		return DBD_GOT_CONFIG;
 	} else {
 		return NO_VAL;		
 	}
@@ -969,11 +1017,11 @@ extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type, int get_enum)
 		} else
 			return "Job Start";
 		break;
-	case DBD_JOB_START_RC:
+	case DBD_ID_RC:
 		if(get_enum) {
-			return "DBD_JOB_START_RC";
+			return "DBD_ID_RC";
 		} else
-			return "Job Start RC";
+			return "ID RC";
 		break;
 	case DBD_JOB_SUSPEND:
 		if(get_enum) {
@@ -1083,12 +1131,6 @@ extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type, int get_enum)
 		} else
 			return "Step Start";
 		break;
-	case DBD_UPDATE_SHARES_USED:
-		if(get_enum) {
-			return "DBD_UPDATE_SHARES_USED";
-		} else
-			return "Update Shares Used";
-		break;
 	case DBD_GET_JOBS_COND:
 		if(get_enum) {
 			return "DBD_GET_JOBS_COND";
@@ -1167,6 +1209,48 @@ extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type, int get_enum)
 		} else
 			return "Got WCKey Usage";
 		break;
+	case DBD_ADD_RESV:
+		if(get_enum) {
+			return "DBD_ADD_RESV";
+		} else
+			return "Add Reservation";
+		break;
+	case DBD_REMOVE_RESV:
+		if(get_enum) {
+			return "DBD_REMOVE_RESV";
+		} else
+			return "Remove Reservation";
+		break;
+	case DBD_MODIFY_RESV:
+		if(get_enum) {
+			return "DBD_MODIFY_RESV";
+		} else
+			return "Modify Reservation";
+		break;
+	case DBD_GET_RESVS:
+		if(get_enum) {
+			return "DBD_GET_RESVS";
+		} else
+			return "Get Reservations";
+		break;
+	case DBD_GOT_RESVS:
+		if(get_enum) {
+			return "DBD_GOT_RESVS";
+		} else
+			return "Got Reservations";
+		break;
+	case DBD_GET_CONFIG:
+		if(get_enum) {
+			return "DBD_GET_CONFIG";
+		} else
+			return "Get Config";
+		break;
+	case DBD_GOT_CONFIG:
+		if(get_enum) {
+			return "DBD_GOT_CONFIG";
+		} else
+			return "Got Config";
+		break;
 	default:
 		return "Unknown";
 		break;
@@ -1293,7 +1377,7 @@ static int _get_return_code(uint16_t rpc_version, int read_timeout)
 	Buf buffer;
 	uint16_t msg_type;
 	dbd_rc_msg_t *msg;
-	dbd_job_start_rc_msg_t *js_msg;
+	dbd_id_rc_msg_t *id_msg;
 	int rc = SLURM_ERROR;
 
 	buffer = _recv_msg(read_timeout);
@@ -1302,14 +1386,13 @@ static int _get_return_code(uint16_t rpc_version, int read_timeout)
 
 	safe_unpack16(&msg_type, buffer);
 	switch(msg_type) {
-	case DBD_JOB_START_RC:
-		if (slurmdbd_unpack_job_start_rc_msg(rpc_version, 
-						     &js_msg, buffer)
+	case DBD_ID_RC:
+		if (slurmdbd_unpack_id_rc_msg(rpc_version, &id_msg, buffer)
 		    == SLURM_SUCCESS) {
-			rc = js_msg->return_code;
-			slurmdbd_free_job_start_rc_msg(rpc_version, js_msg);
+			rc = id_msg->return_code;
+			slurmdbd_free_id_rc_msg(rpc_version, id_msg);
 			if (rc != SLURM_SUCCESS)
-				error("slurmdbd: DBD_JOB_START_RC is %d", rc);
+				error("slurmdbd: DBD_ID_RC is %d", rc);
 		} else
 			error("slurmdbd: unpack message error");
 		break;
@@ -1462,7 +1545,7 @@ static bool _fd_readable(slurm_fd fd, int read_timeout)
 	return false;
 }
 
-/* Wait until a file is writable, 
+/* Wait until a file is writeable, 
  * RET 1 if file can be written now,
  *     0 if can not be written to within 5 seconds
  *     -1 if file has been closed POLLHUP
@@ -1644,7 +1727,7 @@ static void *_agent(void *x)
 			if(slurmdbd_fd >= 0 && running_cache)
 				pthread_cond_signal(&assoc_cache_cond);
 			slurm_mutex_unlock(&assoc_cache_mutex);
-
+			
 			continue;
 		}
 
@@ -1921,6 +2004,30 @@ void inline slurmdbd_free_cluster_procs_msg(uint16_t rpc_version,
 {
 	if (msg) {
 		xfree(msg->cluster_name);
+		xfree(msg->cluster_nodes);
+		xfree(msg);
+	}
+}
+
+void inline slurmdbd_free_rec_msg(uint16_t rpc_version, 
+				   slurmdbd_msg_type_t type,
+				   dbd_rec_msg_t *msg)
+{
+	void (*my_destroy) (void *object);
+
+	if (msg) {
+		switch(type) {
+		case DBD_ADD_RESV:
+		case DBD_REMOVE_RESV:
+		case DBD_MODIFY_RESV:
+			my_destroy = destroy_acct_reservation_rec;
+			break;
+		default:
+			fatal("Unknown rec type");
+			return;
+		}
+		if(msg->rec)
+			(*(my_destroy))(msg->rec);
 		xfree(msg);
 	}
 }
@@ -1966,6 +2073,9 @@ void inline slurmdbd_free_cond_msg(uint16_t rpc_version,
 		case DBD_ARCHIVE_DUMP:
 			my_destroy = destroy_acct_archive_cond;
 			break;
+		case DBD_GET_RESVS:
+			my_destroy = destroy_acct_reservation_cond;
+			break;
 		default:
 			fatal("Unknown cond type");
 			return;
@@ -2020,13 +2130,15 @@ void inline slurmdbd_free_job_start_msg(uint16_t rpc_version,
 		xfree(msg->cluster);
 		xfree(msg->name);
 		xfree(msg->nodes);
+		xfree(msg->node_inx);
 		xfree(msg->partition);
+		xfree(msg->wckey);
 		xfree(msg);
 	}
 }
 
-void inline slurmdbd_free_job_start_rc_msg(uint16_t rpc_version, 
-					   dbd_job_start_rc_msg_t *msg)
+void inline slurmdbd_free_id_rc_msg(uint16_t rpc_version, 
+					   dbd_id_rc_msg_t *msg)
 {
 	xfree(msg);
 }
@@ -2135,6 +2247,7 @@ void inline slurmdbd_free_step_start_msg(uint16_t rpc_version,
 	if (msg) {
 		xfree(msg->name);
 		xfree(msg->nodes);
+		xfree(msg->node_inx);
 		xfree(msg);
 	}
 }
@@ -2231,9 +2344,16 @@ void inline
 slurmdbd_pack_cluster_procs_msg(uint16_t rpc_version, 
 				dbd_cluster_procs_msg_t *msg, Buf buffer)
 {
-	packstr(msg->cluster_name, buffer);
-	pack32(msg->proc_count,    buffer);
-	pack_time(msg->event_time, buffer);
+	if(rpc_version >= 5) {
+		packstr(msg->cluster_name, buffer);
+		packstr(msg->cluster_nodes, buffer);
+		pack32(msg->proc_count,    buffer);
+		pack_time(msg->event_time, buffer);
+	} else {
+		packstr(msg->cluster_name, buffer);
+		pack32(msg->proc_count,    buffer);
+		pack_time(msg->event_time, buffer);
+	}
 }
 
 int inline
@@ -2245,9 +2365,20 @@ slurmdbd_unpack_cluster_procs_msg(uint16_t rpc_version,
 
 	msg_ptr = xmalloc(sizeof(dbd_cluster_procs_msg_t));
 	*msg = msg_ptr;
-	safe_unpackstr_xmalloc(&msg_ptr->cluster_name, &uint32_tmp, buffer);
-	safe_unpack32(&msg_ptr->proc_count, buffer);
-	safe_unpack_time(&msg_ptr->event_time, buffer);
+
+	if(rpc_version >= 5) {
+		safe_unpackstr_xmalloc(&msg_ptr->cluster_name,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->cluster_nodes,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->proc_count, buffer);
+		safe_unpack_time(&msg_ptr->event_time, buffer);
+	} else {
+		safe_unpackstr_xmalloc(&msg_ptr->cluster_name,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->proc_count, buffer);
+		safe_unpack_time(&msg_ptr->event_time, buffer);
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -2256,6 +2387,58 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+void inline slurmdbd_pack_rec_msg(uint16_t rpc_version, 
+				  slurmdbd_msg_type_t type,
+				  dbd_rec_msg_t *msg, Buf buffer)
+{
+	void (*my_function) (void *object, uint16_t rpc_version, Buf buffer);
+
+	switch(type) {
+	case DBD_ADD_RESV:
+	case DBD_REMOVE_RESV:
+	case DBD_MODIFY_RESV:
+		my_function = pack_acct_reservation_rec;
+		break;
+	default:
+		fatal("Unknown pack type");
+		return;
+	}
+
+	(*(my_function))(msg->rec, rpc_version, buffer);
+}
+
+int inline slurmdbd_unpack_rec_msg(uint16_t rpc_version, 
+				   slurmdbd_msg_type_t type,
+				   dbd_rec_msg_t **msg, Buf buffer)
+{
+	dbd_rec_msg_t *msg_ptr = NULL;
+	int (*my_function) (void **object, uint16_t rpc_version, Buf buffer);
+
+	switch(type) {
+	case DBD_ADD_RESV:
+	case DBD_REMOVE_RESV:
+	case DBD_MODIFY_RESV:
+		my_function = unpack_acct_reservation_rec;
+		break;
+	default:
+		fatal("Unknown unpack type");
+		return SLURM_ERROR;
+	}
+
+	msg_ptr = xmalloc(sizeof(dbd_rec_msg_t));
+	*msg = msg_ptr;
+
+	if((*(my_function))(&msg_ptr->rec, rpc_version, buffer) == SLURM_ERROR)
+		goto unpack_error;
+	
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurmdbd_free_rec_msg(rpc_version, type, msg_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
 void inline slurmdbd_pack_cond_msg(uint16_t rpc_version, 
 				   slurmdbd_msg_type_t type,
 				   dbd_cond_msg_t *msg, Buf buffer)
@@ -2296,6 +2479,9 @@ void inline slurmdbd_pack_cond_msg(uint16_t rpc_version,
 	case DBD_ARCHIVE_DUMP:
 		my_function = pack_acct_archive_cond;
 		break;
+	case DBD_GET_RESVS:
+		my_function = pack_acct_reservation_cond;
+		break;
 	default:
 		fatal("Unknown pack type");
 		return;
@@ -2345,6 +2531,9 @@ int inline slurmdbd_unpack_cond_msg(uint16_t rpc_version,
 	case DBD_ARCHIVE_DUMP:
 		my_function = unpack_acct_archive_cond;
 		break;
+	case DBD_GET_RESVS:
+		my_function = unpack_acct_reservation_cond;
+		break;
 	default:
 		fatal("Unknown unpack type");
 		return SLURM_ERROR;
@@ -2588,7 +2777,31 @@ void inline
 slurmdbd_pack_job_start_msg(uint16_t rpc_version, 
 			    dbd_job_start_msg_t *msg, Buf buffer)
 {
-	if(rpc_version >= 3) {
+	if(rpc_version >= 5) {
+		packstr(msg->account, buffer);
+		pack32(msg->alloc_cpus, buffer);
+		pack32(msg->alloc_nodes, buffer);
+		pack32(msg->assoc_id, buffer);
+		packstr(msg->block_id, buffer);
+		packstr(msg->cluster, buffer);
+		pack32(msg->db_index, buffer);
+		pack_time(msg->eligible_time, buffer);
+		pack32(msg->gid, buffer);
+		pack32(msg->job_id, buffer);
+		pack16(msg->job_state, buffer);
+		packstr(msg->name, buffer);
+		packstr(msg->nodes, buffer);
+		packstr(msg->node_inx, buffer);
+		packstr(msg->partition, buffer);
+		pack32(msg->priority, buffer);
+		pack32(msg->req_cpus, buffer);
+		pack32(msg->resv_id, buffer);
+		pack_time(msg->start_time, buffer);
+		pack_time(msg->submit_time, buffer);
+		pack32(msg->timelimit, buffer);		
+		pack32(msg->uid, buffer);		
+		packstr(msg->wckey, buffer);
+	} else if(rpc_version >= 3) {
 		packstr(msg->account, buffer);
 		pack32(msg->alloc_cpus, buffer);
 		pack32(msg->assoc_id, buffer);
@@ -2636,9 +2849,10 @@ slurmdbd_unpack_job_start_msg(uint16_t rpc_version,
 	dbd_job_start_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_start_msg_t));
 	*msg = msg_ptr;
 
-	if(rpc_version >= 3) {
+	if(rpc_version >= 5) {
 		safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer);
 		safe_unpack32(&msg_ptr->alloc_cpus, buffer);
+		safe_unpack32(&msg_ptr->alloc_nodes, buffer);
 		safe_unpack32(&msg_ptr->assoc_id, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->block_id, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->cluster, &uint32_tmp, buffer);
@@ -2649,13 +2863,63 @@ slurmdbd_unpack_job_start_msg(uint16_t rpc_version,
 		safe_unpack16(&msg_ptr->job_state, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->node_inx, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->partition,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&msg_ptr->priority, buffer);
 		safe_unpack32(&msg_ptr->req_cpus, buffer);
+		safe_unpack32(&msg_ptr->resv_id, buffer);
 		safe_unpack_time(&msg_ptr->start_time, buffer);
 		safe_unpack_time(&msg_ptr->submit_time, buffer);
+		safe_unpack32(&msg_ptr->timelimit, buffer);	
 		safe_unpack32(&msg_ptr->uid, buffer);	
+		safe_unpackstr_xmalloc(&msg_ptr->wckey, &uint32_tmp, buffer);
+	} else if(rpc_version >= 3) {
+		char *temp = NULL, *jname = NULL;
+		safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->alloc_cpus, buffer);
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->block_id, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->cluster, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack_time(&msg_ptr->eligible_time, buffer);
+		safe_unpack32(&msg_ptr->gid, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpack16(&msg_ptr->job_state, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->partition,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->priority, buffer);
+		safe_unpack32(&msg_ptr->req_cpus, buffer);
+		safe_unpack_time(&msg_ptr->start_time, buffer);
+		safe_unpack_time(&msg_ptr->submit_time, buffer);
+		safe_unpack32(&msg_ptr->uid, buffer);	
+
+		/* first set the jname to the msg_ptr->name */
+		jname = xstrdup(msg_ptr->name);
+		/* then grep for " since that is the delimiter for
+		   the wckey */
+		if((temp = strchr(jname, '\"'))) {
+			if(strrchr(jname, '\"') != temp) {
+				error("job %u has quotes in it's name '%s', "
+				      "no way to get correct wckey, "
+				      "setting name to 'bad_name'", 
+				      msg_ptr->job_id, jname);
+				xfree(jname);
+				jname = xstrdup("bad_name");
+			} else {			
+				/* if we have a wckey set the " to NULL to
+				 * end the jname */
+				temp[0] = '\0';
+				/* increment and copy the remainder */
+				temp++;
+				msg_ptr->wckey = xstrdup(temp);
+			}
+		}
+		xfree(msg_ptr->name);
+		msg_ptr->name = xstrdup(jname);
+		xfree(jname);
 	} else {
 		safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer);
 		safe_unpack32(&msg_ptr->alloc_cpus, buffer);
@@ -2686,26 +2950,25 @@ unpack_error:
 }
 
 void inline 
-slurmdbd_pack_job_start_rc_msg(uint16_t rpc_version, 
-			       dbd_job_start_rc_msg_t *msg, Buf buffer)
+slurmdbd_pack_id_rc_msg(uint16_t rpc_version, 
+			dbd_id_rc_msg_t *msg, Buf buffer)
 {
-	pack32(msg->db_index, buffer);
+	pack32(msg->id, buffer);
 	pack32(msg->return_code, buffer);
 }
 
 int inline 
-slurmdbd_unpack_job_start_rc_msg(uint16_t rpc_version, 
-				 dbd_job_start_rc_msg_t **msg, Buf buffer)
+slurmdbd_unpack_id_rc_msg(uint16_t rpc_version, 
+			  dbd_id_rc_msg_t **msg, Buf buffer)
 {
-	dbd_job_start_rc_msg_t *msg_ptr = 
-		xmalloc(sizeof(dbd_job_start_rc_msg_t));
+	dbd_id_rc_msg_t *msg_ptr = xmalloc(sizeof(dbd_id_rc_msg_t));
 	*msg = msg_ptr;
-	safe_unpack32(&msg_ptr->db_index, buffer);
+	safe_unpack32(&msg_ptr->id, buffer);
 	safe_unpack32(&msg_ptr->return_code, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_job_start_rc_msg(rpc_version, msg_ptr);
+	slurmdbd_free_id_rc_msg(rpc_version, msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -2764,6 +3027,9 @@ void inline slurmdbd_pack_list_msg(uint16_t rpc_version,
 	case DBD_GOT_CLUSTERS:
 		my_function = pack_acct_cluster_rec;
 		break;
+	case DBD_GOT_CONFIG:
+		my_function = pack_config_key_pair;
+		break;
 	case DBD_GOT_JOBS:
 		my_function = pack_jobacct_job_rec;
 		break;
@@ -2774,6 +3040,9 @@ void inline slurmdbd_pack_list_msg(uint16_t rpc_version,
 	case DBD_GOT_QOS:
 		my_function = pack_acct_qos_rec;
 		break;
+	case DBD_GOT_RESVS:
+		my_function = pack_acct_reservation_rec;
+		break;
 	case DBD_ADD_WCKEYS:
 	case DBD_GOT_WCKEYS:
 		my_function = pack_acct_wckey_rec;
@@ -2785,9 +3054,6 @@ void inline slurmdbd_pack_list_msg(uint16_t rpc_version,
 	case DBD_GOT_TXN:
 		my_function = pack_acct_txn_rec;
 		break;
-	case DBD_UPDATE_SHARES_USED:
-		my_function = pack_update_shares_used;
-		break;
 	default:
 		fatal("Unknown pack type");
 		return;
@@ -2835,6 +3101,10 @@ int inline slurmdbd_unpack_list_msg(uint16_t rpc_version,
 		my_function = unpack_acct_cluster_rec;
 		my_destroy = destroy_acct_cluster_rec;
 		break;
+	case DBD_GOT_CONFIG:
+		my_function = unpack_config_key_pair;
+		my_destroy = destroy_config_key_pair;
+		break;
 	case DBD_GOT_JOBS:
 		my_function = unpack_jobacct_job_rec;
 		my_destroy = destroy_jobacct_job_rec;
@@ -2848,6 +3118,10 @@ int inline slurmdbd_unpack_list_msg(uint16_t rpc_version,
 		my_function = unpack_acct_qos_rec;
 		my_destroy = destroy_acct_qos_rec;
 		break;
+	case DBD_GOT_RESVS:
+		my_function = unpack_acct_reservation_rec;
+		my_destroy = destroy_acct_reservation_rec;
+		break;
 	case DBD_ADD_WCKEYS:
 	case DBD_GOT_WCKEYS:
 		my_function = unpack_acct_wckey_rec;
@@ -2862,10 +3136,6 @@ int inline slurmdbd_unpack_list_msg(uint16_t rpc_version,
 		my_function = unpack_acct_txn_rec;
 		my_destroy = destroy_acct_txn_rec;
 		break;
-	case DBD_UPDATE_SHARES_USED:
-		my_function = unpack_update_shares_used;
-		my_destroy = destroy_update_shares_rec;
-		break;
 	default:
 		fatal("Unknown unpack type");
 		return SLURM_ERROR;
@@ -2977,12 +3247,22 @@ void inline
 slurmdbd_pack_node_state_msg(uint16_t rpc_version,
 			     dbd_node_state_msg_t *msg, Buf buffer)
 {
-	packstr(msg->cluster_name, buffer);
-	pack32(msg->cpu_count, buffer);
-	packstr(msg->hostlist, buffer);
-	packstr(msg->reason, buffer);
-	pack16(msg->new_state, buffer);
-	pack_time(msg->event_time, buffer);
+	if(rpc_version >= 5) {
+		packstr(msg->cluster_name, buffer);
+		pack32(msg->cpu_count, buffer);
+		packstr(msg->hostlist, buffer);
+		packstr(msg->reason, buffer);
+		pack16(msg->new_state, buffer);
+		pack_time(msg->event_time, buffer);
+		pack16(msg->state, buffer);
+	} else {
+		packstr(msg->cluster_name, buffer);
+		pack32(msg->cpu_count, buffer);
+		packstr(msg->hostlist, buffer);
+		packstr(msg->reason, buffer);
+		pack16(msg->new_state, buffer);
+		pack_time(msg->event_time, buffer);
+	}
 }
 
 int inline
@@ -2994,12 +3274,26 @@ slurmdbd_unpack_node_state_msg(uint16_t rpc_version,
 
 	msg_ptr = xmalloc(sizeof(dbd_node_state_msg_t));
 	*msg = msg_ptr;
-	safe_unpackstr_xmalloc(&msg_ptr->cluster_name, &uint32_tmp, buffer);
-	safe_unpack32(&msg_ptr->cpu_count, buffer);
-	safe_unpackstr_xmalloc(&msg_ptr->hostlist, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&msg_ptr->reason,   &uint32_tmp, buffer);
-	safe_unpack16(&msg_ptr->new_state, buffer);
-	safe_unpack_time(&msg_ptr->event_time, buffer);
+
+	if(rpc_version >= 5) {
+		safe_unpackstr_xmalloc(&msg_ptr->cluster_name, 
+				       &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->cpu_count, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->hostlist, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->reason,   &uint32_tmp, buffer);
+		safe_unpack16(&msg_ptr->new_state, buffer);
+		safe_unpack_time(&msg_ptr->event_time, buffer);
+		safe_unpack16(&msg_ptr->state, buffer);
+	} else {
+		safe_unpackstr_xmalloc(&msg_ptr->cluster_name, 
+				       &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->cpu_count, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->hostlist, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->reason,   &uint32_tmp, buffer);
+		safe_unpack16(&msg_ptr->new_state, buffer);
+		safe_unpack_time(&msg_ptr->event_time, buffer);	
+	}
+
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -3065,7 +3359,13 @@ void inline
 slurmdbd_pack_roll_usage_msg(uint16_t rpc_version,
 			     dbd_roll_usage_msg_t *msg, Buf buffer)
 {
-	pack_time(msg->start, buffer);
+	if(rpc_version >= 5) {
+		pack16(msg->archive_data, buffer);
+		pack_time(msg->end, buffer);
+		pack_time(msg->start, buffer);
+	} else {
+		pack_time(msg->start, buffer);
+	}
 }
 
 int inline 
@@ -3075,7 +3375,14 @@ slurmdbd_unpack_roll_usage_msg(uint16_t rpc_version,
 	dbd_roll_usage_msg_t *msg_ptr = xmalloc(sizeof(dbd_roll_usage_msg_t));
 
 	*msg = msg_ptr;
-	safe_unpack_time(&msg_ptr->start, buffer);
+
+	if(rpc_version >= 5) {
+		safe_unpack16(&msg_ptr->archive_data, buffer);
+		safe_unpack_time(&msg_ptr->end, buffer);
+		safe_unpack_time(&msg_ptr->start, buffer);
+	} else {
+		safe_unpack_time(&msg_ptr->start, buffer);
+	}
 	return SLURM_SUCCESS;
 	
 unpack_error:
@@ -3130,15 +3437,31 @@ void inline
 slurmdbd_pack_step_start_msg(uint16_t rpc_version, dbd_step_start_msg_t *msg,
 			     Buf buffer)
 {
-	pack32(msg->assoc_id, buffer);
-	pack32(msg->db_index, buffer);
-	pack32(msg->job_id, buffer);
-	packstr(msg->name, buffer);
-	packstr(msg->nodes, buffer);
-	pack_time(msg->start_time, buffer);
-	pack_time(msg->job_submit_time, buffer);
-	pack32(msg->step_id, buffer);
-	pack32(msg->total_procs, buffer);
+	if(rpc_version >= 5) {
+		pack32(msg->assoc_id, buffer);
+		pack32(msg->db_index, buffer);
+		pack32(msg->job_id, buffer);
+		packstr(msg->name, buffer);
+		packstr(msg->nodes, buffer);
+		packstr(msg->node_inx, buffer);
+		pack32(msg->node_cnt, buffer);
+		pack_time(msg->start_time, buffer);
+		pack_time(msg->job_submit_time, buffer);
+		pack32(msg->step_id, buffer);
+		pack16(msg->task_dist, buffer);
+		pack32(msg->total_procs, buffer);
+		pack32(msg->total_tasks, buffer);
+	} else {
+		pack32(msg->assoc_id, buffer);
+		pack32(msg->db_index, buffer);
+		pack32(msg->job_id, buffer);
+		packstr(msg->name, buffer);
+		packstr(msg->nodes, buffer);
+		pack_time(msg->start_time, buffer);
+		pack_time(msg->job_submit_time, buffer);
+		pack32(msg->step_id, buffer);
+		pack32(msg->total_procs, buffer);
+	}
 }
 
 int inline 
@@ -3148,15 +3471,32 @@ slurmdbd_unpack_step_start_msg(uint16_t rpc_version,
 	uint32_t uint32_tmp;
 	dbd_step_start_msg_t *msg_ptr = xmalloc(sizeof(dbd_step_start_msg_t));
 	*msg = msg_ptr;
-	safe_unpack32(&msg_ptr->assoc_id, buffer);
-	safe_unpack32(&msg_ptr->db_index, buffer);
-	safe_unpack32(&msg_ptr->job_id, buffer);
-	safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
-	safe_unpack_time(&msg_ptr->start_time, buffer);
-	safe_unpack_time(&msg_ptr->job_submit_time, buffer);
-	safe_unpack32(&msg_ptr->step_id, buffer);
-	safe_unpack32(&msg_ptr->total_procs, buffer);
+	if(rpc_version >= 5) {
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->node_inx, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->node_cnt, buffer);
+		safe_unpack_time(&msg_ptr->start_time, buffer);
+		safe_unpack_time(&msg_ptr->job_submit_time, buffer);
+		safe_unpack32(&msg_ptr->step_id, buffer);
+		safe_unpack16(&msg_ptr->task_dist, buffer);
+		safe_unpack32(&msg_ptr->total_procs, buffer);
+		safe_unpack32(&msg_ptr->total_tasks, buffer);
+	} else {
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpack_time(&msg_ptr->start_time, buffer);
+		safe_unpack_time(&msg_ptr->job_submit_time, buffer);
+		safe_unpack32(&msg_ptr->step_id, buffer);
+		safe_unpack32(&msg_ptr->total_procs, buffer);		
+	}
+
 	return SLURM_SUCCESS;
 
 unpack_error:
diff --git a/src/common/slurmdbd_defs.h b/src/common/slurmdbd_defs.h
index b1aa9f8b1830152a69e40fe28401969ce7d6cb89..c5b37a6a5eace5c8b69beefd296f237b7e550344 100644
--- a/src/common/slurmdbd_defs.h
+++ b/src/common/slurmdbd_defs.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -84,7 +85,7 @@
  *	communicating with it (e.g. it will not accept messages with a
  *	version higher than SLURMDBD_VERSION).
  */
-#define SLURMDBD_VERSION	04
+#define SLURMDBD_VERSION	05 
 #define SLURMDBD_VERSION_MIN	02
 
 /* SLURM DBD message types */
@@ -117,7 +118,9 @@ typedef enum {
 	DBD_GOT_USERS,  	/* Response to DBD_GET_USERS		*/
 	DBD_JOB_COMPLETE,	/* Record job completion 		*/
 	DBD_JOB_START,		/* Record job starting			*/
-	DBD_JOB_START_RC,	/* return db_index from job insertion 	*/
+	DBD_ID_RC,	        /* return db_index from job
+				 * insertion, or any other id from
+				 * other commands.              	*/
 	DBD_JOB_SUSPEND,	/* Record job suspension		*/
 	DBD_MODIFY_ACCOUNTS,    /* Modify existing account              */
 	DBD_MODIFY_ASSOCS,      /* Modify existing association          */
@@ -135,7 +138,9 @@ typedef enum {
 	DBD_ROLL_USAGE,         /* Roll up usage                        */
 	DBD_STEP_COMPLETE,	/* Record step completion		*/
 	DBD_STEP_START,		/* Record step starting			*/
-	DBD_UPDATE_SHARES_USED,	/* Record current share usage		*/
+	DBD_UPDATE_SHARES_USED,	/* Doesn't do anything but
+				 * needs to be here for
+				 * history sake		*/
 	DBD_GET_JOBS_COND, 	/* Get job information with a condition */
 	DBD_GET_TXN,		/* Get transaction information		*/
 	DBD_GOT_TXN,		/* Got transaction information		*/
@@ -154,6 +159,13 @@ typedef enum {
 	DBD_ARCHIVE_DUMP,    	/* issue a request to dump jobs to
 				 * archive */
 	DBD_ARCHIVE_LOAD,    	/* load an archive file    	        */
+	DBD_ADD_RESV,    	/* add a reservation                    */
+	DBD_REMOVE_RESV,    	/* remove a reservation                 */
+	DBD_MODIFY_RESV,    	/* modify a reservation                 */
+	DBD_GET_RESVS,    	/* Get reservation information  	*/
+	DBD_GOT_RESVS,		/* Response to DBD_GET_RESV		*/
+	DBD_GET_CONFIG,  	/* Get configuration information	*/
+	DBD_GOT_CONFIG,		/* Response to DBD_GET_CONFIG		*/
 } slurmdbd_msg_type_t;
 
 /*****************************************************************************\
@@ -172,10 +184,17 @@ typedef struct {
 
 typedef struct dbd_cluster_procs_msg {
 	char *cluster_name;	/* name of cluster */
+	char *cluster_nodes;	/* name of cluster */
 	uint32_t proc_count;	/* total processor count */
 	time_t event_time;	/* time of transition */
 } dbd_cluster_procs_msg_t;
 
+typedef struct {
+	void *rec; /* this could be anything based on the type types
+		     * are defined in slurm_accounting_storage.h
+		     * *_rec_t */
+} dbd_rec_msg_t;
+
 typedef struct {
 	void *cond; /* this could be anything based on the type types
 		     * are defined in slurm_accounting_storage.h
@@ -183,13 +202,15 @@ typedef struct {
 } dbd_cond_msg_t;
 
 typedef struct {
+	uint16_t archive_data;
+	time_t end;
 	time_t start;
 } dbd_roll_usage_msg_t;
 
 typedef struct {
+	time_t end;
 	void *rec;
 	time_t start;
-	time_t end;
 } dbd_usage_msg_t;
 
 typedef struct dbd_get_jobs_msg {
@@ -234,6 +255,7 @@ typedef struct dbd_job_start_msg {
 	char *   account;       /* Account name for those not running
 				 * with associations */
 	uint32_t alloc_cpus;	/* count of allocated processors */
+	uint32_t alloc_nodes;   /* how many nodes used in job */ 
 	uint32_t assoc_id;	/* accounting association id */
 	char *   cluster;       /* cluster job is being ran on */
 	char *   block_id;      /* Bluegene block id */
@@ -244,18 +266,24 @@ typedef struct dbd_job_start_msg {
 	uint16_t job_state;	/* job state */
 	char *   name;		/* job name */
 	char *   nodes;		/* hosts allocated to the job */
+	char *   node_inx;      /* ranged bitmap string of hosts
+				 * allocated to the job */
 	char *   partition;	/* partition job is running on */
 	uint32_t priority;	/* job priority */
 	uint32_t req_cpus;	/* count of req processors */
+	uint32_t resv_id;	/* reservation id */
 	time_t   start_time;	/* job start time */
 	time_t   submit_time;	/* job submit time */
+	uint32_t timelimit;	/* job timelimit */
 	uint32_t uid;	        /* user ID if associations are being used */
+	char *   wckey;		/* wckey name */
 } dbd_job_start_msg_t;
 
-typedef struct dbd_job_start_rc_msg {
-	uint32_t db_index;	/* db_index */
+/* returns a uint32_t along with a return code */
+typedef struct dbd_id_rc_msg {
+	uint32_t id;
 	uint32_t return_code;
-} dbd_job_start_rc_msg_t;
+} dbd_id_rc_msg_t;
 
 typedef struct dbd_job_suspend_msg {
 	uint32_t assoc_id;	/* accounting association id needed
@@ -288,6 +316,8 @@ typedef struct dbd_node_state_msg {
 	char *hostlist;		/* name of hosts */
 	uint16_t new_state;	/* new state of host, see DBD_NODE_STATE_* */
 	char *reason;		/* explanation for the node's state */
+	uint16_t state;         /* current state of node.  Used to get
+				   flags on the state (i.e. maintenance) */
 } dbd_node_state_msg_t;
 
 typedef struct dbd_rc_msg {
@@ -314,6 +344,7 @@ typedef struct dbd_step_comp_msg {
 				  * in db */
 	uint32_t step_id;	/* step ID */
 	uint32_t total_procs;	/* count of allocated processors */
+	uint32_t total_tasks;	/* count of tasks for step */
 } dbd_step_comp_msg_t;
 
 typedef struct dbd_step_start_msg {
@@ -322,11 +353,16 @@ typedef struct dbd_step_start_msg {
 	uint32_t job_id;	/* job ID */
 	char *   name;		/* step name */
 	char *   nodes;		/* hosts allocated to the step */
+	char *   node_inx;	/* bitmap index of hosts allocated to
+				 * the step */
+	uint32_t node_cnt;      /* how many nodes used in step */ 
 	time_t   start_time;	/* step start time */
 	time_t   job_submit_time;/* job submit time needed to find job record
 				  * in db */
 	uint32_t step_id;	/* step ID */
+	uint16_t task_dist;     /* layout method of step */
 	uint32_t total_procs;	/* count of allocated processors */
+	uint32_t total_tasks;	/* count of tasks for step */
 } dbd_step_start_msg_t;
 
 /* flag to let us know if we are running on cache or from the actual
@@ -389,6 +425,9 @@ void inline slurmdbd_free_acct_coord_msg(uint16_t rpc_version,
 					 dbd_acct_coord_msg_t *msg);
 void inline slurmdbd_free_cluster_procs_msg(uint16_t rpc_version, 
 					    dbd_cluster_procs_msg_t *msg);
+void inline slurmdbd_free_rec_msg(uint16_t rpc_version, 
+				  slurmdbd_msg_type_t type,
+				  dbd_rec_msg_t *msg);
 void inline slurmdbd_free_cond_msg(uint16_t rpc_version, 
 				   slurmdbd_msg_type_t type,
 				   dbd_cond_msg_t *msg);
@@ -402,8 +441,8 @@ void inline slurmdbd_free_job_complete_msg(uint16_t rpc_version,
 					   dbd_job_comp_msg_t *msg);
 void inline slurmdbd_free_job_start_msg(uint16_t rpc_version, 
 					dbd_job_start_msg_t *msg);
-void inline slurmdbd_free_job_start_rc_msg(uint16_t rpc_version, 
-					   dbd_job_start_rc_msg_t *msg);
+void inline slurmdbd_free_id_rc_msg(uint16_t rpc_version, 
+					   dbd_id_rc_msg_t *msg);
 void inline slurmdbd_free_job_suspend_msg(uint16_t rpc_version, 
 					  dbd_job_suspend_msg_t *msg);
 void inline slurmdbd_free_list_msg(uint16_t rpc_version, 
@@ -436,6 +475,9 @@ void inline slurmdbd_pack_acct_coord_msg(uint16_t rpc_version,
 void inline slurmdbd_pack_cluster_procs_msg(uint16_t rpc_version, 
 					    dbd_cluster_procs_msg_t *msg,
 					    Buf buffer);
+void inline slurmdbd_pack_rec_msg(uint16_t rpc_version, 
+				  slurmdbd_msg_type_t type,
+				  dbd_rec_msg_t *msg, Buf buffer);
 void inline slurmdbd_pack_cond_msg(uint16_t rpc_version, 
 				   slurmdbd_msg_type_t type,
 				   dbd_cond_msg_t *msg, Buf buffer);
@@ -452,9 +494,9 @@ void inline slurmdbd_pack_job_complete_msg(uint16_t rpc_version,
 void inline slurmdbd_pack_job_start_msg(uint16_t rpc_version, 
 					dbd_job_start_msg_t *msg,
 					Buf buffer);
-void inline slurmdbd_pack_job_start_rc_msg(uint16_t rpc_version, 
-					   dbd_job_start_rc_msg_t *msg,
-					   Buf buffer);
+void inline slurmdbd_pack_id_rc_msg(uint16_t rpc_version, 
+				    dbd_id_rc_msg_t *msg,
+				    Buf buffer);
 void inline slurmdbd_pack_job_suspend_msg(uint16_t rpc_version, 
 					  dbd_job_suspend_msg_t *msg,
 					  Buf buffer);
@@ -493,6 +535,9 @@ int inline slurmdbd_unpack_acct_coord_msg(uint16_t rpc_version,
 int inline slurmdbd_unpack_cluster_procs_msg(uint16_t rpc_version, 
 					     dbd_cluster_procs_msg_t **msg,
 					     Buf buffer);
+int inline slurmdbd_unpack_rec_msg(uint16_t rpc_version, 
+				   slurmdbd_msg_type_t type,
+				   dbd_rec_msg_t **msg, Buf buffer);
 int inline slurmdbd_unpack_cond_msg(uint16_t rpc_version, 
 				    slurmdbd_msg_type_t type,
 				    dbd_cond_msg_t **msg, Buf buffer);
@@ -509,9 +554,9 @@ int inline slurmdbd_unpack_job_complete_msg(uint16_t rpc_version,
 int inline slurmdbd_unpack_job_start_msg(uint16_t rpc_version, 
 					 dbd_job_start_msg_t **msg,
 					 Buf buffer);
-int inline slurmdbd_unpack_job_start_rc_msg(uint16_t rpc_version, 
-					    dbd_job_start_rc_msg_t **msg,
-					    Buf buffer);
+int inline slurmdbd_unpack_id_rc_msg(uint16_t rpc_version, 
+				     dbd_id_rc_msg_t **msg,
+				     Buf buffer);
 int inline slurmdbd_unpack_job_suspend_msg(uint16_t rpc_version, 
 					   dbd_job_suspend_msg_t **msg,
 					   Buf buffer);
diff --git a/src/common/stepd_api.c b/src/common/stepd_api.c
index 4e8e8ee97809933d9c9973b7c37e779e3a9b6bb5..939974569740e57eda3196b0b3f6f498ff6abfe3 100644
--- a/src/common/stepd_api.c
+++ b/src/common/stepd_api.c
@@ -1,16 +1,17 @@
 /*****************************************************************************\
  *  src/common/stepd_api.c - slurmstepd message API
- *  $Id: stepd_api.c 14503 2008-07-14 17:27:40Z jette $
+ *  $Id: stepd_api.c 16867 2009-03-12 16:35:42Z jette $
  *****************************************************************************
  *  Copyright (C) 2005-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Portions Copyright (C) 2008 Vijay Ramasubramanian
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -324,14 +325,21 @@ rwfail:
  * Send a checkpoint request to all tasks of a job step.
  */
 int
-stepd_checkpoint(int fd, int signal, time_t timestamp)
+stepd_checkpoint(int fd, time_t timestamp, char *image_dir)
 {
 	int req = REQUEST_CHECKPOINT_TASKS;
 	int rc;
 
 	safe_write(fd, &req, sizeof(int));
-	safe_write(fd, &signal, sizeof(int));
 	safe_write(fd, &timestamp, sizeof(time_t));
+	if (image_dir) {
+		rc = strlen(image_dir) + 1;
+		safe_write(fd, &rc, sizeof(int));
+		safe_write(fd, image_dir, rc);
+	} else {
+		rc = 0;
+		safe_write(fd, &rc, sizeof(int));
+	}
 
 	/* Receive the return code */
 	safe_read(fd, &rc, sizeof(int));
@@ -455,9 +463,9 @@ _sockname_regex_init(regex_t *re, const char *nodename)
 	xstrcat(pattern, "_([[:digit:]]*)\\.([[:digit:]]*)$");
 
 	if (regcomp(re, pattern, REG_EXTENDED) != 0) {
-                error("sockname regex compilation failed\n");
-                return -1;
-        }
+		error("sockname regex compilation failed\n");
+		return -1;
+	}
 
 	xfree(pattern);
 
@@ -468,9 +476,9 @@ static int
 _sockname_regex(regex_t *re, const char *filename,
 		uint32_t *jobid, uint32_t *stepid)
 {
-        size_t nmatch = 5;
-        regmatch_t pmatch[5];
-        char *match;
+	size_t nmatch = 5;
+	regmatch_t pmatch[5];
+	char *match;
 
 	memset(pmatch, 0, sizeof(regmatch_t)*nmatch);
 	if (regexec(re, filename, nmatch, pmatch, 0) == REG_NOMATCH) {
diff --git a/src/common/stepd_api.h b/src/common/stepd_api.h
index a160324eaf78914caa61d0d22bf2aebd64361312..053405de86c9db10d3306d5de3ae3f4b8a7e0290 100644
--- a/src/common/stepd_api.h
+++ b/src/common/stepd_api.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  src/common/stepd_api.h - slurmstepd message API
- *  $Id: stepd_api.h 13695 2008-03-21 21:28:17Z jette $
+ *  $Id: stepd_api.h 16867 2009-03-12 16:35:42Z jette $
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -136,7 +137,7 @@ int stepd_signal(int fd, int signal);
 /*
  * Send a checkpoint request to all tasks of a job step.
  */
-int stepd_checkpoint(int fd, int signal, time_t timestamp);
+int stepd_checkpoint(int fd, time_t timestamp, char *image_dir);
 
 /*
  * Send a signal to a single task in a job step.
diff --git a/src/common/switch.c b/src/common/switch.c
index f7ea41c0cba3da16f64982d86a316d86ef5ed882..f308364c517ca2bb5287b447b7bfda94debc3f9d 100644
--- a/src/common/switch.c
+++ b/src/common/switch.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  src/common/switch.c - Generic switch (interconnect) for slurm
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -57,8 +59,7 @@
 typedef struct slurm_switch_ops {
 	int          (*state_save)        ( char *dir_name );
 	int          (*state_restore)     ( char *dir_name, bool recover );
-	
-	bool         (*no_frag)           ( void );
+
 	int          (*alloc_jobinfo)     ( switch_jobinfo_t *jobinfo );
 	int          (*build_jobinfo)     ( switch_jobinfo_t jobinfo,
 						char *nodelist,
@@ -192,7 +193,6 @@ _slurm_switch_get_ops( slurm_switch_context_t c )
 	static const char *syms[] = {
 		"switch_p_libstate_save",
 		"switch_p_libstate_restore",
-		"switch_p_no_frag",
 		"switch_p_alloc_jobinfo",
 		"switch_p_build_jobinfo",
 		"switch_p_copy_jobinfo",
@@ -343,14 +343,6 @@ extern int  switch_clear(void)
 	return (*(g_context->ops.state_clear))( );
 }
 
-extern bool switch_no_frag(void)
-{
-	if ( switch_init() < 0 )
-		return SLURM_ERROR;
-
-	return (*(g_context->ops.no_frag))( );
-}
-
 extern int  switch_alloc_jobinfo(switch_jobinfo_t *jobinfo)
 {
 	if ( switch_init() < 0 )
diff --git a/src/common/switch.h b/src/common/switch.h
index ec574710cbe3e51f34a74aac2a8390c3f0863403..a294c0b01415ec5cfacda3d18cc7a9666dc9ec85 100644
--- a/src/common/switch.h
+++ b/src/common/switch.h
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  src/common/switch.h - Generic switch (interconnect) info for slurm
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -93,12 +95,6 @@ extern int  switch_restore(char *dir_name, bool recover);
  */
 extern int  switch_clear(void);
 
-/* report if resource fragmentation is important. if so, delay scheduling a 
- * new job while another is in the process of terminating.
- * RET          - true if fragmentation is important
- */
-extern bool switch_no_frag(void);
-
 /* return the number of a switch-specific error code */
 extern int switch_get_errno(void);
 
diff --git a/src/common/timers.c b/src/common/timers.c
index 3f34a4598a32235180be36bfcbe5a655ba3ec69a..5cc6ca1bd1e3078095eefc04f5e2a79ab7ccf0eb 100644
--- a/src/common/timers.c
+++ b/src/common/timers.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/timers.h b/src/common/timers.h
index b580270b653ab82f4b2344c2be8725a30cd96e77..6cc38daf46a9a692e098d77a8835d51ab2c9a732 100644
--- a/src/common/timers.h
+++ b/src/common/timers.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> and Kevin Tew <tew1@llnl.gov> 
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/uid.c b/src/common/uid.c
index ad47db4c1d475bbaf10d69d4a8b6f518720f3c5d..fdff3ef44315ca34e409cab6097ffec4fcd1e8ff 100644
--- a/src/common/uid.c
+++ b/src/common/uid.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/common/uid.c - uid/gid lookup utility functions
- * $Id: uid.c 17177 2009-04-07 18:09:43Z jette $
+ * $Id: uid.c 17178 2009-04-07 18:23:28Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/uid.h b/src/common/uid.h
index 06701af57eebb02432bf147b52e0084c425a1a93..bf7062b8e9233ee530314600442f88c01e0d06b6 100644
--- a/src/common/uid.h
+++ b/src/common/uid.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/common/uid.h - uid/gid lookup utility functions
- * $Id: uid.h 17177 2009-04-07 18:09:43Z jette $
+ * $Id: uid.h 17178 2009-04-07 18:23:28Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/unsetenv.c b/src/common/unsetenv.c
index 5fd8b4cb49961c52205db00ad28dba77b6061810..bc5c3def0008192acb0aaf74f281546a72a02d71 100644
--- a/src/common/unsetenv.c
+++ b/src/common/unsetenv.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/unsetenv.h b/src/common/unsetenv.h
index 4d98de3abd6c8912af3fac40b729c80e0917aa3f..a8b998132b9f25ecd21b8f7c666c53e7d3ec7e97 100644
--- a/src/common/unsetenv.h
+++ b/src/common/unsetenv.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/write_labelled_message.c b/src/common/write_labelled_message.c
new file mode 100644
index 0000000000000000000000000000000000000000..6d379bcd25d5cb6b4d6af8d5c6e525e531f20e9b
--- /dev/null
+++ b/src/common/write_labelled_message.c
@@ -0,0 +1,177 @@
+/*****************************************************************************\
+ *  write_labelled_message.c - write a message with an optional label
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Christopher J. Morrone <morrone2@llnl.gov> and 
+ *  David Bremer <dbremer@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include <signal.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+
+#include "src/common/write_labelled_message.h"
+#include "slurm/slurm_errno.h"
+#include "src/common/log.h"
+
+static int _write_label(int fd, int taskid, int label_width);
+static int _write_line(int fd, void *buf, int len);
+static int _write_newline(int fd);
+
+
+
+int write_labelled_message(int fd, void *buf, int len, int taskid,
+			   bool label, int label_width)
+{
+	void *start;
+	void *end;
+	int remaining = len;
+	int written = 0;
+	int line_len;
+	int rc = -1;
+
+	while (remaining > 0) {
+		start = buf + written;
+		end = memchr(start, '\n', remaining);
+		if (label)
+			if (_write_label(fd, taskid, label_width)
+			    != SLURM_SUCCESS)
+				goto done;
+		if (end == NULL) { /* no newline found */
+			rc = _write_line(fd, start, remaining);
+			if (rc <= 0) {
+				goto done;
+			} else {
+				remaining -= rc;
+				written += rc;
+			}
+			if (label)
+				if (_write_newline(fd) != SLURM_SUCCESS)
+					goto done;
+		} else {
+			line_len = (int)(end - start) + 1;
+			rc = _write_line(fd, start, line_len);
+			if (rc <= 0) {
+				goto done;
+			} else {
+				remaining -= rc;
+				written += rc;
+			}
+		}
+
+	}
+done:
+	if (written > 0)
+		return written;
+	else
+		return rc;
+}
+
+
+static int _write_label(int fd, int taskid, int label_width)
+{
+	int n;
+	int left = label_width + 2;
+	char buf[16];
+	void *ptr = buf;
+
+	snprintf(buf, 16, "%0*d: ", label_width, taskid);
+	while (left > 0) {
+	again:
+		if ((n = write(fd, ptr, left)) < 0) {
+			if (errno == EINTR)
+				goto again;
+			if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
+				debug3("  got EAGAIN in _write_label");
+				goto again;
+			}
+			error("In _write_label: %m");
+			return SLURM_ERROR;
+		}
+		left -= n;
+		ptr += n;
+	}
+
+	return SLURM_SUCCESS;
+}
+
+
+static int _write_newline(int fd)
+{
+	int n;
+
+	debug2("Called _write_newline");
+again:
+	if ((n = write(fd, "\n", 1)) < 0) {
+		if (errno == EINTR
+		    || errno == EAGAIN
+		    || errno == EWOULDBLOCK) {
+			goto again;
+		}
+		error("In _write_newline: %m");
+		return SLURM_ERROR;
+	}
+	return SLURM_SUCCESS;
+}
+
+
+/*
+ * Blocks until write is complete, regardless of the file
+ * descriptor being in non-blocking mode.
+ */
+static int _write_line(int fd, void *buf, int len)
+{
+	int n;
+	int left = len;
+	void *ptr = buf;
+
+	debug2("Called _write_line");
+	while (left > 0) {
+	again:
+		if ((n = write(fd, ptr, left)) < 0) {
+			if (errno == EINTR)
+				goto again;
+			if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) {
+				debug3("  got EAGAIN in _write_line");
+				goto again;
+			}
+			return -1;
+		}
+		left -= n;
+		ptr += n;
+	}
+	
+	return len;
+}
diff --git a/src/common/write_labelled_message.h b/src/common/write_labelled_message.h
new file mode 100644
index 0000000000000000000000000000000000000000..6deaa0a5caa10412fb66babbdbe749f299a316a3
--- /dev/null
+++ b/src/common/write_labelled_message.h
@@ -0,0 +1,65 @@
+/*****************************************************************************\
+ *  write_labelled_message.h - write a message with an optional label
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by David Bremer <dbremer@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _HAVE_WRITE_LABELLED_MESSAGE
+#define _HAVE_WRITE_LABELLED_MESSAGE
+
+#include "slurm/slurm.h"
+
+/*
+ * fd          is the file descriptor to write to
+ * buf         is the char buffer to write
+ * len         is the buffer length in bytes
+ * taskid      is will be used in the label
+ * label       if true, prepend each line of the buffer with a 
+ *               label for the task id
+ * label_width is the number of digits to use for the task id
+ *
+ * Write as many lines from the message as possible.  Return
+ * the number of bytes from the message that have been written,
+ * or -1 on error.  If len==0, -1 will be returned.
+ *
+ * If the message ends in a partial line (line does not end
+ * in a '\n'), then add a newline to the output file, but only
+ * in label mode.
+ */
+
+int write_labelled_message(int fd, void *buf, int len, int taskid,
+			   bool label, int label_width);
+
+#endif
diff --git a/src/common/xassert.c b/src/common/xassert.c
index 2b2d2362c9b42d5d2fb78da27fb5ce1045c1fe64..430959ec40b3a74e5111fcfdca582bfba05f4b3d 100644
--- a/src/common/xassert.c
+++ b/src/common/xassert.c
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  xassert.c - replacement for assert which sends error to log instead 
  *		of stderr
- *  $Id: xassert.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: xassert.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/xassert.h b/src/common/xassert.h
index 2e5e6fb44dfd8ab5e5d532823c9877399fad380a..1c66ce5c4ac9a71cb72c7e8d8448b5936ae2d81c 100644
--- a/src/common/xassert.h
+++ b/src/common/xassert.h
@@ -2,15 +2,16 @@
  *  xassert.h: assert type macro with configurable handling
  *             If NDEBUG is defined, do nothing.
  *             If not, and expression is zero, log an error message and abort.
- *  $Id: xassert.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: xassert.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/xmalloc.c b/src/common/xmalloc.c
index 30a2075b709fbeb1a53026b9cb253abff38cfe37..61751c0c6ae6b4495ef2d2f2f1256c2bf5625e1a 100644
--- a/src/common/xmalloc.c
+++ b/src/common/xmalloc.c
@@ -2,16 +2,17 @@
  *  xmalloc.c - enhanced malloc routines
  *  Started with Jim Garlick's xmalloc and tied into slurm log facility.
  *  Also added ability to print file, line, and function of caller.
- *  $Id: xmalloc.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: xmalloc.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jim Garlick <garlick1@llnl.gov> and 
  *	Mark Grondona <mgrondona@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/xmalloc.h b/src/common/xmalloc.h
index 4d8c796acfb1a5a9f0332d4285a204874c41f3af..fe99169eae098968e626520625987daca48f1156 100644
--- a/src/common/xmalloc.h
+++ b/src/common/xmalloc.h
@@ -3,16 +3,17 @@
  *  - default: never return if errors are encountered.
  *  - attempt to report file, line, and calling function on assertion failure
  *  - use configurable slurm log facility for reporting errors
- *  $Id: xmalloc.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: xmalloc.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jim Garlick <garlick1@llnl.gov> and 
  *	Mark Grondona <mgrondona@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/xsignal.c b/src/common/xsignal.c
index fcc5e577f213b866e3d76088bd21d9b7e0e1d9ff..f9b0a1f77dbcd84e3945d2aff585f64c8483a62f 100644
--- a/src/common/xsignal.c
+++ b/src/common/xsignal.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/xsignal.h b/src/common/xsignal.h
index ad66edb77149aad34f1d05f99523fdd6480cff0a..9834cfccbd5ccd3b0e652cb18b8aced137763de8 100644
--- a/src/common/xsignal.h
+++ b/src/common/xsignal.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/common/xsignal.h - POSIX signal wrapper functions
- * $Id: xsignal.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: xsignal.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/xstring.c b/src/common/xstring.c
index 94d0a181d493a65a796603c0ab6a7f38ff896e1e..a360bbeaa357fa8d9f43278744746d6d088e5532 100644
--- a/src/common/xstring.c
+++ b/src/common/xstring.c
@@ -7,10 +7,11 @@
  *  Written by Jim Garlick <garlick@llnl.gov>
  *             Mark Grondona <grondona@llnl.gov>, et al.
  *	
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/common/xstring.h b/src/common/xstring.h
index 493819a49bda7bf3feb544b92bcea8408d30e244..ec1fe44e2cec6c75d79013006b4abbdd9b2fec98 100644
--- a/src/common/xstring.h
+++ b/src/common/xstring.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jim Garlick <garlick@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/database/Makefile.am b/src/database/Makefile.am
index dadae58c3b9336e25e36b6e29b03244a79fe3a96..790a8b820d42e9b35cf551a9eb8a016f827bd621 100644
--- a/src/database/Makefile.am
+++ b/src/database/Makefile.am
@@ -4,18 +4,26 @@ AUTOMAKE_OPTIONS = foreign
 
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
-noinst_LTLIBRARIES =      \
-	libslurm_mysql.la \
-	libslurm_pgsql.la
-
+if WITH_MYSQL
+MYSQL_LIB = libslurm_mysql.la
 libslurm_mysql_la_SOURCES = mysql_common.c mysql_common.h
-libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h
+libslurm_mysql_la_LIBADD   = $(MYSQL_LIBS)
+libslurm_mysql_la_LDFLAGS  = $(LIB_LDFLAGS)
+libslurm_mysql_la_CFLAGS = $(MYSQL_CFLAGS)
+else
+MYSQL_LIB =
+EXTRA_libslurm_mysql_la_SOURCES = mysql_common.c mysql_common.h
+endif
 
-libslurm_mysql_la_LIBADD   = $(MYSQL_LIBS) 
+if WITH_PGSQL
+PGSQL_LIB = libslurm_pgsql.la
+libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h
 libslurm_pgsql_la_LIBADD   = $(PGSQL_LIBS) 
-
-libslurm_mysql_la_LDFLAGS  = $(LIB_LDFLAGS)
 libslurm_pgsql_la_LDFLAGS  = $(LIB_LDFLAGS)
-
-libslurm_mysql_la_CFLAGS = $(MYSQL_CFLAGS)
 libslurm_pgsql_la_CFLAGS = $(PGSQL_CFLAGS)
+else
+PGSQL_LIB =
+EXTRA_libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h
+endif
+
+noinst_LTLIBRARIES = $(MYSQL_LIB) $(PGSQL_LIB)
diff --git a/src/database/Makefile.in b/src/database/Makefile.in
index fbc9789b5e575ad99ccea24d6155153b03c86d6d..f7ea468992ed21992cdc6d808a1e15c9745d2869 100644
--- a/src/database/Makefile.in
+++ b/src/database/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -69,20 +73,32 @@ CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
 CONFIG_CLEAN_FILES =
 LTLIBRARIES = $(noinst_LTLIBRARIES)
 am__DEPENDENCIES_1 =
-libslurm_mysql_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
-am_libslurm_mysql_la_OBJECTS = libslurm_mysql_la-mysql_common.lo
+@WITH_MYSQL_TRUE@libslurm_mysql_la_DEPENDENCIES =  \
+@WITH_MYSQL_TRUE@	$(am__DEPENDENCIES_1)
+am__libslurm_mysql_la_SOURCES_DIST = mysql_common.c mysql_common.h
+@WITH_MYSQL_TRUE@am_libslurm_mysql_la_OBJECTS =  \
+@WITH_MYSQL_TRUE@	libslurm_mysql_la-mysql_common.lo
+am__EXTRA_libslurm_mysql_la_SOURCES_DIST = mysql_common.c \
+	mysql_common.h
 libslurm_mysql_la_OBJECTS = $(am_libslurm_mysql_la_OBJECTS)
 libslurm_mysql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) \
 	$(libslurm_mysql_la_CFLAGS) $(CFLAGS) \
 	$(libslurm_mysql_la_LDFLAGS) $(LDFLAGS) -o $@
-libslurm_pgsql_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
-am_libslurm_pgsql_la_OBJECTS = libslurm_pgsql_la-pgsql_common.lo
+@WITH_MYSQL_TRUE@am_libslurm_mysql_la_rpath =
+@WITH_PGSQL_TRUE@libslurm_pgsql_la_DEPENDENCIES =  \
+@WITH_PGSQL_TRUE@	$(am__DEPENDENCIES_1)
+am__libslurm_pgsql_la_SOURCES_DIST = pgsql_common.c pgsql_common.h
+@WITH_PGSQL_TRUE@am_libslurm_pgsql_la_OBJECTS =  \
+@WITH_PGSQL_TRUE@	libslurm_pgsql_la-pgsql_common.lo
+am__EXTRA_libslurm_pgsql_la_SOURCES_DIST = pgsql_common.c \
+	pgsql_common.h
 libslurm_pgsql_la_OBJECTS = $(am_libslurm_pgsql_la_OBJECTS)
 libslurm_pgsql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) \
 	$(libslurm_pgsql_la_CFLAGS) $(CFLAGS) \
 	$(libslurm_pgsql_la_LDFLAGS) $(LDFLAGS) -o $@
+@WITH_PGSQL_TRUE@am_libslurm_pgsql_la_rpath =
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -95,9 +111,14 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(libslurm_mysql_la_SOURCES) $(libslurm_pgsql_la_SOURCES)
-DIST_SOURCES = $(libslurm_mysql_la_SOURCES) \
-	$(libslurm_pgsql_la_SOURCES)
+SOURCES = $(libslurm_mysql_la_SOURCES) \
+	$(EXTRA_libslurm_mysql_la_SOURCES) \
+	$(libslurm_pgsql_la_SOURCES) \
+	$(EXTRA_libslurm_pgsql_la_SOURCES)
+DIST_SOURCES = $(am__libslurm_mysql_la_SOURCES_DIST) \
+	$(am__EXTRA_libslurm_mysql_la_SOURCES_DIST) \
+	$(am__libslurm_pgsql_la_SOURCES_DIST) \
+	$(am__EXTRA_libslurm_pgsql_la_SOURCES_DIST)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -111,6 +132,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -272,18 +297,21 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-noinst_LTLIBRARIES = \
-	libslurm_mysql.la \
-	libslurm_pgsql.la
-
-libslurm_mysql_la_SOURCES = mysql_common.c mysql_common.h
-libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h
-libslurm_mysql_la_LIBADD = $(MYSQL_LIBS) 
-libslurm_pgsql_la_LIBADD = $(PGSQL_LIBS) 
-libslurm_mysql_la_LDFLAGS = $(LIB_LDFLAGS)
-libslurm_pgsql_la_LDFLAGS = $(LIB_LDFLAGS)
-libslurm_mysql_la_CFLAGS = $(MYSQL_CFLAGS)
-libslurm_pgsql_la_CFLAGS = $(PGSQL_CFLAGS)
+@WITH_MYSQL_FALSE@MYSQL_LIB = 
+@WITH_MYSQL_TRUE@MYSQL_LIB = libslurm_mysql.la
+@WITH_MYSQL_TRUE@libslurm_mysql_la_SOURCES = mysql_common.c mysql_common.h
+@WITH_MYSQL_TRUE@libslurm_mysql_la_LIBADD = $(MYSQL_LIBS)
+@WITH_MYSQL_TRUE@libslurm_mysql_la_LDFLAGS = $(LIB_LDFLAGS)
+@WITH_MYSQL_TRUE@libslurm_mysql_la_CFLAGS = $(MYSQL_CFLAGS)
+@WITH_MYSQL_FALSE@EXTRA_libslurm_mysql_la_SOURCES = mysql_common.c mysql_common.h
+@WITH_PGSQL_FALSE@PGSQL_LIB = 
+@WITH_PGSQL_TRUE@PGSQL_LIB = libslurm_pgsql.la
+@WITH_PGSQL_TRUE@libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h
+@WITH_PGSQL_TRUE@libslurm_pgsql_la_LIBADD = $(PGSQL_LIBS) 
+@WITH_PGSQL_TRUE@libslurm_pgsql_la_LDFLAGS = $(LIB_LDFLAGS)
+@WITH_PGSQL_TRUE@libslurm_pgsql_la_CFLAGS = $(PGSQL_CFLAGS)
+@WITH_PGSQL_FALSE@EXTRA_libslurm_pgsql_la_SOURCES = pgsql_common.c pgsql_common.h
+noinst_LTLIBRARIES = $(MYSQL_LIB) $(PGSQL_LIB)
 all: all-am
 
 .SUFFIXES:
@@ -327,9 +355,9 @@ clean-noinstLTLIBRARIES:
 	  rm -f "$${dir}/so_locations"; \
 	done
 libslurm_mysql.la: $(libslurm_mysql_la_OBJECTS) $(libslurm_mysql_la_DEPENDENCIES) 
-	$(libslurm_mysql_la_LINK)  $(libslurm_mysql_la_OBJECTS) $(libslurm_mysql_la_LIBADD) $(LIBS)
+	$(libslurm_mysql_la_LINK) $(am_libslurm_mysql_la_rpath) $(libslurm_mysql_la_OBJECTS) $(libslurm_mysql_la_LIBADD) $(LIBS)
 libslurm_pgsql.la: $(libslurm_pgsql_la_OBJECTS) $(libslurm_pgsql_la_DEPENDENCIES) 
-	$(libslurm_pgsql_la_LINK)  $(libslurm_pgsql_la_OBJECTS) $(libslurm_pgsql_la_LIBADD) $(LIBS)
+	$(libslurm_pgsql_la_LINK) $(am_libslurm_pgsql_la_rpath) $(libslurm_pgsql_la_OBJECTS) $(libslurm_pgsql_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
diff --git a/src/database/mysql_common.c b/src/database/mysql_common.c
index 9e6a3b9f248aaa0b8d1e50a1df93a4e30c10b0f6..28a3402d05956da080a2064e91eb9683150b1949 100644
--- a/src/database/mysql_common.c
+++ b/src/database/mysql_common.c
@@ -7,7 +7,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -47,34 +48,8 @@
 pthread_mutex_t mysql_lock = PTHREAD_MUTEX_INITIALIZER;
 #endif
 
-#ifdef HAVE_MYSQL
-
 static char *table_defs_table = "table_defs_table";
 
-static int _clear_results(MYSQL *mysql_db)
-{
-	MYSQL_RES *result = NULL;
-	int rc = 0;
-	do {
-		/* did current statement return data? */
-		if((result = mysql_store_result(mysql_db)))
-			mysql_free_result(result);
-		
-		/* more results? -1 = no, >0 = error, 0 = yes (keep looping) */
-		if ((rc = mysql_next_result(mysql_db)) > 0)
-			error("Could not execute statement %d %s\n",
-			      mysql_errno(mysql_db),
-			      mysql_error(mysql_db));
-	} while (rc == 0);
-
-	if(rc > 0) {
-		errno = rc;
-		return SLURM_ERROR;
-	} 
-
-	return SLURM_SUCCESS;
-}
-
 static MYSQL_RES *_get_first_result(MYSQL *mysql_db)
 {
 	MYSQL_RES *result = NULL;
@@ -346,6 +321,9 @@ static int _create_db(char *db_name, mysql_db_info_t *db_info)
 	char create_line[50];
 	MYSQL *mysql_db = NULL;
 	int rc = SLURM_ERROR;
+	
+	MYSQL *db_ptr = NULL;
+	char *db_host = NULL;
 
 	while(rc == SLURM_ERROR) {
 		rc = SLURM_SUCCESS;
@@ -355,10 +333,25 @@ static int _create_db(char *db_name, mysql_db_info_t *db_info)
 		if(!(mysql_db = mysql_init(mysql_db)))
 			fatal("mysql_init failed: %s", mysql_error(mysql_db));
 		
-		if(mysql_real_connect(mysql_db, 
-				      db_info->host, db_info->user,
-				      db_info->pass, NULL, 
-				      db_info->port, NULL, 0)) {
+		db_host = db_info->host;
+		db_ptr = mysql_real_connect(mysql_db,
+					    db_host, db_info->user,
+					    db_info->pass, NULL,
+					    db_info->port, NULL, 0);
+
+		if (!db_ptr && db_info->backup) {
+			info("Connection failed to host = %s "
+			     "user = %s port = %u",
+			     db_host, db_info->user,
+			     db_info->port);  
+			db_host = db_info->backup;
+			db_ptr = mysql_real_connect(mysql_db, db_host,
+						    db_info->user,
+						    db_info->pass, NULL,
+						    db_info->port, NULL, 0);
+		}
+
+		if (db_ptr) {
 			snprintf(create_line, sizeof(create_line),
 				 "create database %s", db_name);
 			if(mysql_query(mysql_db, create_line)) {
@@ -369,9 +362,9 @@ static int _create_db(char *db_name, mysql_db_info_t *db_info)
 			mysql_close_db_connection(&mysql_db);
 		} else {
 			info("Connection failed to host = %s "
-			     "user = %s pass = %s port = %u",
-			     db_info->host, db_info->user,
-			     db_info->pass, db_info->port);
+			     "user = %s port = %u",
+			     db_host, db_info->user,
+			     db_info->port);
 #ifdef MYSQL_NOT_THREAD_SAFE
 			slurm_mutex_unlock(&mysql_lock);
 #endif
@@ -392,6 +385,7 @@ static int _create_db(char *db_name, mysql_db_info_t *db_info)
 extern int *destroy_mysql_db_info(mysql_db_info_t *db_info)
 {
 	if(db_info) {
+		xfree(db_info->backup);
 		xfree(db_info->host);
 		xfree(db_info->user);
 		xfree(db_info->pass);
@@ -406,6 +400,8 @@ extern int mysql_get_db_connection(MYSQL **mysql_db, char *db_name,
 	int rc = SLURM_SUCCESS;
 	bool storage_init = false;
 	
+	char *db_host = db_info->host;
+
 	if(!(*mysql_db = mysql_init(*mysql_db)))
 		fatal("mysql_init failed: %s", mysql_error(*mysql_db));
 	else {
@@ -418,7 +414,7 @@ extern int mysql_get_db_connection(MYSQL **mysql_db, char *db_name,
 		mysql_options(*mysql_db, MYSQL_OPT_CONNECT_TIMEOUT,
 			      (char *)&my_timeout);
 		while(!storage_init) {
-			if(!mysql_real_connect(*mysql_db, db_info->host,
+			if(!mysql_real_connect(*mysql_db, db_host,
 					       db_info->user, db_info->pass,
 					       db_name, db_info->port,
 					       NULL, CLIENT_MULTI_STATEMENTS)) {
@@ -431,6 +427,11 @@ extern int mysql_get_db_connection(MYSQL **mysql_db, char *db_name,
 					      "%d %s",
 					      mysql_errno(*mysql_db),
 					      mysql_error(*mysql_db));
+					if ((db_host == db_info->host)
+					    && db_info->backup) {
+						db_host = db_info->backup;
+						continue;
+					}
 					rc = SLURM_ERROR;
 					break;
 				}
@@ -468,6 +469,30 @@ extern int mysql_cleanup()
 	return SLURM_SUCCESS;
 }
 
+extern int mysql_clear_results(MYSQL *mysql_db)
+{
+	MYSQL_RES *result = NULL;
+	int rc = 0;
+	do {
+		/* did current statement return data? */
+		if((result = mysql_store_result(mysql_db)))
+			mysql_free_result(result);
+		
+		/* more results? -1 = no, >0 = error, 0 = yes (keep looping) */
+		if ((rc = mysql_next_result(mysql_db)) > 0)
+			error("Could not execute statement %d %s\n",
+			      mysql_errno(mysql_db),
+			      mysql_error(mysql_db));
+	} while (rc == 0);
+
+	if(rc > 0) {
+		errno = rc;
+		return SLURM_ERROR;
+	} 
+
+	return SLURM_SUCCESS;
+}
+
 extern int mysql_db_query(MYSQL *mysql_db, char *query)
 {
 	if(!mysql_db)
@@ -476,13 +501,9 @@ extern int mysql_db_query(MYSQL *mysql_db, char *query)
 	slurm_mutex_lock(&mysql_lock);
 #endif
 	/* clear out the old results so we don't get a 2014 error */
-	_clear_results(mysql_db);		
+	mysql_clear_results(mysql_db);		
 //try_again:
 	if(mysql_query(mysql_db, query)) {
-		/* if(mysql_errno(mysql_db) == CR_SERVER_GONE_ERROR) { */
-/* 			/\* FIX ME: this means the connection went away *\/ */
-/* 		} */
-
 		error("mysql_query failed: %d %s\n%s",
 		      mysql_errno(mysql_db),
 		      mysql_error(mysql_db), query);
@@ -490,6 +511,14 @@ extern int mysql_db_query(MYSQL *mysql_db, char *query)
 #ifdef MYSQL_NOT_THREAD_SAFE
 		slurm_mutex_unlock(&mysql_lock);
 #endif
+		/* FIXME: If we get ER_LOCK_WAIT_TIMEOUT here we need
+		to restart the connections, but it appears restarting
+		the calling program is the only way to handle this.
+		If anyone in the future figures out a way to handle
+		this, super.  Until then we will need to restart the
+		calling program if you ever get this error. 
+		*/
+
 		return SLURM_ERROR;
 	}
 
@@ -502,7 +531,7 @@ extern int mysql_db_query(MYSQL *mysql_db, char *query)
 extern int mysql_db_ping(MYSQL *mysql_db)
 {
 	/* clear out the old results so we don't get a 2014 error */
-	_clear_results(mysql_db);		
+	mysql_clear_results(mysql_db);		
 	return mysql_ping(mysql_db);
 }
 
@@ -512,7 +541,7 @@ extern int mysql_db_commit(MYSQL *mysql_db)
 	slurm_mutex_lock(&mysql_lock);
 #endif
 	/* clear out the old results so we don't get a 2014 error */
-	_clear_results(mysql_db);		
+	mysql_clear_results(mysql_db);		
 	if(mysql_commit(mysql_db)) {
 		error("mysql_commit failed: %d %s",
 		      mysql_errno(mysql_db),
@@ -535,7 +564,7 @@ extern int mysql_db_rollback(MYSQL *mysql_db)
 	slurm_mutex_lock(&mysql_lock);
 #endif
 	/* clear out the old results so we don't get a 2014 error */
-	_clear_results(mysql_db);		
+	mysql_clear_results(mysql_db);		
 	if(mysql_rollback(mysql_db)) {
 		error("mysql_commit failed: %d %s",
 		      mysql_errno(mysql_db),
@@ -578,7 +607,7 @@ extern int mysql_db_query_check_after(MYSQL *mysql_db, char *query)
 	int rc = SLURM_SUCCESS;
 		
 	if((rc = mysql_db_query(mysql_db, query)) != SLURM_ERROR)  
-		rc = _clear_results(mysql_db);
+		rc = mysql_clear_results(mysql_db);
 	
 	return rc;
 }
@@ -655,7 +684,3 @@ extern int mysql_db_create_table(MYSQL *mysql_db, char *table_name,
 	return _mysql_make_table_current(mysql_db, table_name,
 					 first_field, ending);
 }
-
-
-#endif
-
diff --git a/src/database/mysql_common.h b/src/database/mysql_common.h
index b6aba93e960b5cf174d467f1e5946c5feb1dcc8e..e1b1e111455dd540d20d764b6944c290bc7ceca2 100644
--- a/src/database/mysql_common.h
+++ b/src/database/mysql_common.h
@@ -7,7 +7,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -55,10 +56,6 @@
 #include <slurm/slurm_errno.h>
 #include "src/common/list.h"
 #include "src/common/xstring.h"
-
-#ifndef HAVE_MYSQL
-typedef void mysql_conn_t;
-#else
 #include <mysql.h>
 #include <mysqld_error.h>
 
@@ -70,6 +67,7 @@ typedef struct {
 } mysql_conn_t;
 
 typedef struct {
+	char *backup;	
 	uint32_t port;	
 	char *host;	
 	char *user;	
@@ -89,6 +87,7 @@ extern int mysql_get_db_connection(MYSQL **mysql_db, char *db_name,
 				   mysql_db_info_t *db_info);
 extern int mysql_close_db_connection(MYSQL **mysql_db);
 extern int mysql_cleanup();
+extern int mysql_clear_results(MYSQL *mysql_db);
 extern int mysql_db_query(MYSQL *mysql_db, char *query);
 extern int mysql_db_ping(MYSQL *mysql_db);
 extern int mysql_db_commit(MYSQL *mysql_db);
@@ -104,5 +103,3 @@ extern int mysql_db_create_table(MYSQL *mysql_db, char *table_name,
 
 
 #endif
-
-#endif
diff --git a/src/database/pgsql_common.c b/src/database/pgsql_common.c
index 819836e83a49b4cd0f7d0074f11f96ff26b4ef0f..af20b8249f5db9dffdaa6e15fa16b4c88e3686a8 100644
--- a/src/database/pgsql_common.c
+++ b/src/database/pgsql_common.c
@@ -7,7 +7,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -43,8 +44,6 @@
 
 pthread_mutex_t pgsql_lock = PTHREAD_MUTEX_INITIALIZER;
 
-#ifdef HAVE_PGSQL
-
 extern int *destroy_pgsql_db_info(pgsql_db_info_t *db_info)
 {
 	if(db_info) {
@@ -384,7 +383,3 @@ extern int pgsql_db_make_table_current(PGconn *pgsql_db, char *table_name,
 	END_TIMER2("make table current");
 	return SLURM_SUCCESS;
 }
-
-
-#endif
-
diff --git a/src/database/pgsql_common.h b/src/database/pgsql_common.h
index 2762d57b68cb8dfae4acbfe8991dbc2bf36728b9..2f01da72af3661f2e167a180418de27b3b0ee824 100644
--- a/src/database/pgsql_common.h
+++ b/src/database/pgsql_common.h
@@ -7,7 +7,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -55,10 +56,6 @@
 #include <slurm/slurm_errno.h>
 #include "src/slurmctld/slurmctld.h"
 #include "src/common/xstring.h"
-
-#ifndef HAVE_PGSQL
-typedef void pgsql_conn_t;
-#else
 #include <libpq-fe.h>
 
 typedef struct {
@@ -105,5 +102,3 @@ extern int pgsql_db_create_table(PGconn *pgsql_db,
 extern int pgsql_db_make_table_current(PGconn *pgsql_db, char *table_name,
 				       storage_field_t *fields);
 #endif
-
-#endif
diff --git a/src/plugins/Makefile.am b/src/plugins/Makefile.am
index a8e77ec42c224cf914b3047a598704194cad4f0f..d1db142570292a66e717a6cbc8c7bb6ce613ecc9 100644
--- a/src/plugins/Makefile.am
+++ b/src/plugins/Makefile.am
@@ -1 +1 @@
-SUBDIRS = accounting_storage auth checkpoint crypto jobacct_gather jobcomp mpi proctrack sched select switch task
+SUBDIRS = accounting_storage auth checkpoint crypto jobacct_gather jobcomp mpi priority proctrack sched select switch task topology
diff --git a/src/plugins/Makefile.in b/src/plugins/Makefile.in
index 633a61fdd5f3547634f835b91011d472219ecd9f..56cfa2b0aa172af20c3d75011571ae8f9df93f11 100644
--- a/src/plugins/Makefile.in
+++ b/src/plugins/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -89,6 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -248,7 +256,7 @@ target_os = @target_os@
 target_vendor = @target_vendor@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = accounting_storage auth checkpoint crypto jobacct_gather jobcomp mpi proctrack sched select switch task
+SUBDIRS = accounting_storage auth checkpoint crypto jobacct_gather jobcomp mpi priority proctrack sched select switch task topology
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/accounting_storage/Makefile.in b/src/plugins/accounting_storage/Makefile.in
index 888063d7b8150fc5c59d7d7596de5ec012831f20..b7f8ec83517b7f75a424cf8edf97b562f4d9fb0d 100644
--- a/src/plugins/accounting_storage/Makefile.in
+++ b/src/plugins/accounting_storage/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/accounting_storage/filetxt/Makefile.in b/src/plugins/accounting_storage/filetxt/Makefile.in
index dc782ffc8f0885f129c688b0ddfa8b9164be7d01..f4b80875b89bf8ee9805b3ae0148ef594fbe7fff 100644
--- a/src/plugins/accounting_storage/filetxt/Makefile.in
+++ b/src/plugins/accounting_storage/filetxt/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -112,6 +116,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
index 621f93008e492f7f0cc51b45d0ceaa4836a4d1ed..c08ad1853fb75063680b32436156ec4205f04f5f 100644
--- a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
+++ b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
@@ -8,7 +8,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -298,6 +299,12 @@ extern int acct_storage_p_add_wckeys(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
+extern int acct_storage_p_add_reservation(void *db_conn, 
+					  acct_reservation_rec_t *resv)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
 				       acct_user_cond_t *user_q,
 				       acct_user_rec_t *user)
@@ -340,6 +347,12 @@ extern List acct_storage_p_modify_wckeys(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
+extern int acct_storage_p_modify_reservation(void *db_conn, 
+					     acct_reservation_rec_t *resv)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 				       acct_user_cond_t *user_q)
 {
@@ -383,6 +396,12 @@ extern List acct_storage_p_remove_wckeys(void *db_conn, uint32_t uid,
 	return NULL;
 }
 
+extern int acct_storage_p_remove_reservation(void *db_conn, 
+					     acct_reservation_rec_t *resv)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_get_users(void *db_conn, uid_t uid,
 				     acct_user_cond_t *user_q)
 {
@@ -401,6 +420,11 @@ extern List acct_storage_p_get_clusters(void *db_conn, uid_t uid,
 	return NULL;
 }
 
+extern List acct_storage_p_get_config(void *db_conn)
+{
+	return NULL;
+}
+
 extern List acct_storage_p_get_associations(void *db_conn, uid_t uid,
 					    acct_association_cond_t *assoc_q)
 {
@@ -419,6 +443,12 @@ extern List acct_storage_p_get_wckeys(void *db_conn, uid_t uid,
 	return NULL;
 }
 
+extern List acct_storage_p_get_reservations(void *mysql_conn, uid_t uid,
+					    acct_reservation_cond_t *resv_cond)
+{
+	return NULL;
+}
+
 extern List acct_storage_p_get_txn(void *db_conn, uid_t uid,
 				   acct_txn_cond_t *txn_cond)
 {
@@ -435,7 +465,8 @@ extern int acct_storage_p_get_usage(void *db_conn, uid_t uid,
 }
 
 extern int acct_storage_p_roll_usage(void *db_conn, 
-				     time_t sent_start)
+				     time_t sent_start, time_t sent_end,
+				     uint16_t archive_data)
 {
 	int rc = SLURM_SUCCESS;
 
@@ -466,6 +497,7 @@ extern int clusteracct_storage_p_register_ctld(void *db_conn,
 
 extern int clusteracct_storage_p_cluster_procs(void *db_conn,
 					       char *cluster,
+					       char *cluster_nodes,
 					       uint32_t procs,
 					       time_t event_time)
 {
@@ -489,7 +521,7 @@ extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 	int	i,
 		rc=SLURM_SUCCESS;
 	char	buf[BUFFER_SIZE], *account, *nodes;
-	char    *jname = NULL, *wckey = NULL;
+	char    *jname = NULL;
 	long	priority;
 	int track_steps = 0;
 
@@ -512,29 +544,11 @@ extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 		   -1L : (long) job_ptr->priority;
 
 	if (job_ptr->name && job_ptr->name[0]) {
-		char *temp = NULL;
-		/* first set the jname to the job_ptr->name */
 		jname = xstrdup(job_ptr->name);
-		/* then grep for " since that is the delimiter for
-		   the wckey */
-		temp = strchr(jname, '\"');
-		if(temp) {
-			/* if we have a wckey set the " to NULL to
-			 * end the jname */
-			temp[0] = '\0';
-			/* increment and copy the remainder */
-			temp++;
-			wckey = xstrdup(temp);
-		}
-
 		for (i=0; jname[i]; i++) 
 			if (isspace(jname[i]))
 				jname[i]='_';
-	}
-
-	if(!jname || !jname[0]) {
-		/* free jname if something is allocated here */
-		xfree(jname);
+	} else {
 		jname = xstrdup("allocation");
 		track_steps = 1;
 	}
@@ -563,7 +577,6 @@ extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 	rc = _print_record(job_ptr, job_ptr->start_time, buf);
 	
 	xfree(jname);
-	xfree(wckey);
 	return rc;
 }
 
@@ -729,7 +742,7 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 
 	if (jobacct == NULL) {
 		/* JobAcctGather=jobacct_gather/none, no data to process */
-		bzero(&dummy_jobacct, sizeof(dummy_jobacct));
+		memset(&dummy_jobacct, 0, sizeof(dummy_jobacct));
 		jobacct = &dummy_jobacct;
 	}
 	
diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
index cc4cc6d0d21c2a9064aa0ea5d280cf905b8e730d..f0245c8d5ba28645716179d251772c8f11bde9f0 100644
--- a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
+++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -250,11 +251,15 @@ static jobacct_step_rec_t *_create_jobacct_step_rec(
 {
 	jobacct_step_rec_t *jobacct_step = create_jobacct_step_rec();
 	
-	jobacct_step->jobid = filetxt_step->header.jobnum;
 	jobacct_step->elapsed = filetxt_step->elapsed;
 	jobacct_step->end = filetxt_step->header.timestamp;
 	jobacct_step->exitcode = filetxt_step->exitcode;
 	jobacct_step->ncpus = filetxt_step->ncpus;
+	if(filetxt_step->nodes) {
+		hostlist_t hl = hostlist_create(filetxt_step->nodes);
+		jobacct_step->nnodes = hostlist_count(hl);
+		hostlist_destroy(hl);
+	}
 	jobacct_step->nodes = xstrdup(filetxt_step->nodes);
 	jobacct_step->requid = filetxt_step->requid;
 	memcpy(&jobacct_step->sacct, &filetxt_step->sacct, sizeof(sacct_t));
@@ -315,6 +320,11 @@ no_cond:
 	jobacct_job->partition = xstrdup(filetxt_job->header.partition);
 	jobacct_job->req_cpus = filetxt_job->ncpus;
 	jobacct_job->alloc_cpus = filetxt_job->ncpus;
+	if(filetxt_job->nodes) {
+		hostlist_t hl = hostlist_create(filetxt_job->nodes);
+		jobacct_job->alloc_nodes = hostlist_count(hl);
+		hostlist_destroy(hl);
+	}
 	jobacct_job->nodes = xstrdup(filetxt_job->nodes);
 	jobacct_job->priority = filetxt_job->priority;
 	jobacct_job->requid = filetxt_job->requid;
@@ -328,8 +338,14 @@ no_cond:
 	if(filetxt_job->steps) {
 		itr = list_iterator_create(filetxt_job->steps);
 		while((filetxt_step = list_next(itr))) {
-			list_append(jobacct_job->steps,
-				    _create_jobacct_step_rec(filetxt_step));
+			jobacct_step_rec_t *step =
+				_create_jobacct_step_rec(filetxt_step);
+			if(step) {
+				step->job_ptr = jobacct_job;
+				if(!jobacct_job->first_step_ptr)
+					jobacct_job->first_step_ptr = step;
+				list_append(jobacct_job->steps, step);
+			}
 		}
 		list_iterator_destroy(itr);
 	}
diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h
index 62306c91f7a84f63923b4a5e53b3d6f959b45918..f7186a3dbf4ddf90601f9a3a96876ecabc376be8 100644
--- a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h
+++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.h
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/accounting_storage/mysql/Makefile.am b/src/plugins/accounting_storage/mysql/Makefile.am
index c7414d3eb473beeda7b9e3cb0350d4b1914812b7..78233a8d356e0cb508490f5db75d33bca0525abf 100644
--- a/src/plugins/accounting_storage/mysql/Makefile.am
+++ b/src/plugins/accounting_storage/mysql/Makefile.am
@@ -6,6 +6,7 @@ PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
+if WITH_MYSQL
 pkglib_LTLIBRARIES = accounting_storage_mysql.la
 
 # Mysql storage plugin.
@@ -20,3 +21,8 @@ accounting_storage_mysql_la_LIBADD = \
 force:
 $(accounting_storage_mysql_la_LIBADD) : force
 	@cd `dirname $@` && $(MAKE) `basename $@`
+else
+EXTRA_accounting_storage_mysql_la_SOURCES = accounting_storage_mysql.c \
+		mysql_jobacct_process.c mysql_jobacct_process.h \
+		mysql_rollup.c mysql_rollup.h
+endif
diff --git a/src/plugins/accounting_storage/mysql/Makefile.in b/src/plugins/accounting_storage/mysql/Makefile.in
index fed89e5f1cff697d633d8ce619c8adea5b7a5e30..8b291bbf2266ad2e2e1072fb7ffa3146facac477 100644
--- a/src/plugins/accounting_storage/mysql/Makefile.in
+++ b/src/plugins/accounting_storage/mysql/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,19 +81,25 @@ am__installdirs = "$(DESTDIR)$(pkglibdir)"
 pkglibLTLIBRARIES_INSTALL = $(INSTALL)
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
 am__DEPENDENCIES_1 =
-accounting_storage_mysql_la_DEPENDENCIES =  \
-	$(top_builddir)/src/database/libslurm_mysql.la \
-	$(am__DEPENDENCIES_1)
-am_accounting_storage_mysql_la_OBJECTS =  \
-	accounting_storage_mysql_la-accounting_storage_mysql.lo \
-	accounting_storage_mysql_la-mysql_jobacct_process.lo \
-	accounting_storage_mysql_la-mysql_rollup.lo
+@WITH_MYSQL_TRUE@accounting_storage_mysql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_mysql.la \
+@WITH_MYSQL_TRUE@	$(am__DEPENDENCIES_1)
+am__accounting_storage_mysql_la_SOURCES_DIST =  \
+	accounting_storage_mysql.c mysql_jobacct_process.c \
+	mysql_jobacct_process.h mysql_rollup.c mysql_rollup.h
+@WITH_MYSQL_TRUE@am_accounting_storage_mysql_la_OBJECTS = accounting_storage_mysql_la-accounting_storage_mysql.lo \
+@WITH_MYSQL_TRUE@	accounting_storage_mysql_la-mysql_jobacct_process.lo \
+@WITH_MYSQL_TRUE@	accounting_storage_mysql_la-mysql_rollup.lo
+am__EXTRA_accounting_storage_mysql_la_SOURCES_DIST =  \
+	accounting_storage_mysql.c mysql_jobacct_process.c \
+	mysql_jobacct_process.h mysql_rollup.c mysql_rollup.h
 accounting_storage_mysql_la_OBJECTS =  \
 	$(am_accounting_storage_mysql_la_OBJECTS)
 accounting_storage_mysql_la_LINK = $(LIBTOOL) --tag=CC \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
 	$(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) \
 	$(accounting_storage_mysql_la_LDFLAGS) $(LDFLAGS) -o $@
+@WITH_MYSQL_TRUE@am_accounting_storage_mysql_la_rpath = -rpath \
+@WITH_MYSQL_TRUE@	$(pkglibdir)
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -102,8 +112,10 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(accounting_storage_mysql_la_SOURCES)
-DIST_SOURCES = $(accounting_storage_mysql_la_SOURCES)
+SOURCES = $(accounting_storage_mysql_la_SOURCES) \
+	$(EXTRA_accounting_storage_mysql_la_SOURCES)
+DIST_SOURCES = $(am__accounting_storage_mysql_la_SOURCES_DIST) \
+	$(am__EXTRA_accounting_storage_mysql_la_SOURCES_DIST)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -117,6 +129,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -279,17 +295,21 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-pkglib_LTLIBRARIES = accounting_storage_mysql.la
+@WITH_MYSQL_TRUE@pkglib_LTLIBRARIES = accounting_storage_mysql.la
 
 # Mysql storage plugin.
-accounting_storage_mysql_la_SOURCES = accounting_storage_mysql.c \
-		mysql_jobacct_process.c mysql_jobacct_process.h \
-		mysql_rollup.c mysql_rollup.h
+@WITH_MYSQL_TRUE@accounting_storage_mysql_la_SOURCES = accounting_storage_mysql.c \
+@WITH_MYSQL_TRUE@		mysql_jobacct_process.c mysql_jobacct_process.h \
+@WITH_MYSQL_TRUE@		mysql_rollup.c mysql_rollup.h
 
-accounting_storage_mysql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-accounting_storage_mysql_la_CFLAGS = $(MYSQL_CFLAGS)
-accounting_storage_mysql_la_LIBADD = \
-	$(top_builddir)/src/database/libslurm_mysql.la $(MYSQL_LIBS)
+@WITH_MYSQL_TRUE@accounting_storage_mysql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+@WITH_MYSQL_TRUE@accounting_storage_mysql_la_CFLAGS = $(MYSQL_CFLAGS)
+@WITH_MYSQL_TRUE@accounting_storage_mysql_la_LIBADD = \
+@WITH_MYSQL_TRUE@	$(top_builddir)/src/database/libslurm_mysql.la $(MYSQL_LIBS)
+
+@WITH_MYSQL_FALSE@EXTRA_accounting_storage_mysql_la_SOURCES = accounting_storage_mysql.c \
+@WITH_MYSQL_FALSE@		mysql_jobacct_process.c mysql_jobacct_process.h \
+@WITH_MYSQL_FALSE@		mysql_rollup.c mysql_rollup.h
 
 all: all-am
 
@@ -352,7 +372,7 @@ clean-pkglibLTLIBRARIES:
 	  rm -f "$${dir}/so_locations"; \
 	done
 accounting_storage_mysql.la: $(accounting_storage_mysql_la_OBJECTS) $(accounting_storage_mysql_la_DEPENDENCIES) 
-	$(accounting_storage_mysql_la_LINK) -rpath $(pkglibdir) $(accounting_storage_mysql_la_OBJECTS) $(accounting_storage_mysql_la_LIBADD) $(LIBS)
+	$(accounting_storage_mysql_la_LINK) $(am_accounting_storage_mysql_la_rpath) $(accounting_storage_mysql_la_OBJECTS) $(accounting_storage_mysql_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
@@ -592,9 +612,9 @@ uninstall-am: uninstall-pkglibLTLIBRARIES
 	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
 
 
-force:
-$(accounting_storage_mysql_la_LIBADD) : force
-	@cd `dirname $@` && $(MAKE) `basename $@`
+@WITH_MYSQL_TRUE@force:
+@WITH_MYSQL_TRUE@$(accounting_storage_mysql_la_LIBADD) : force
+@WITH_MYSQL_TRUE@	@cd `dirname $@` && $(MAKE) `basename $@`
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
 .NOEXPORT:
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index 9820488c4ddd0c9cbc4ed1f43a7312bd8816feca..d3e2ecca8d56093d3471d90e64781b33b0ee20a6 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -4,12 +4,13 @@
  *  $Id: accounting_storage_mysql.c 13061 2008-01-22 21:23:56Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -87,18 +88,13 @@ const char plugin_name[] = "Accounting storage MYSQL plugin";
 const char plugin_type[] = "accounting_storage/mysql";
 const uint32_t plugin_version = 100;
 
-#ifdef HAVE_MYSQL
-
 static mysql_db_info_t *mysql_db_info = NULL;
 static char *mysql_db_name = NULL;
 static time_t global_last_rollup = 0;
 static pthread_mutex_t rollup_lock = PTHREAD_MUTEX_INITIALIZER;
 
-#define DEFAULT_ACCT_DB "slurm_acct_db"
 #define DELETE_SEC_BACK 86400
 
-
-
 char *acct_coord_table = "acct_coord_table";
 char *acct_table = "acct_table";
 char *assoc_day_table = "assoc_day_usage_table";
@@ -113,6 +109,7 @@ char *event_table = "cluster_event_table";
 char *job_table = "job_table";
 char *last_ran_table = "last_ran_table";
 char *qos_table = "qos_table";
+char *resv_table = "resv_table";
 char *step_table = "step_table";
 char *txn_table = "txn_table";
 char *user_table = "user_table";
@@ -252,9 +249,10 @@ static uint32_t _get_wckeyid(mysql_conn_t *mysql_conn, char **name,
 		/* since we are unable to rely on uids here (someone could
 		   not have there uid in the system yet) we must
 		   first get the user name from the associd */
-		if(!(user = _get_user_from_associd(mysql_conn, associd)))
+		if(!(user = _get_user_from_associd(mysql_conn, associd))) {
+			error("No user for associd %u", associd);
 			goto no_wckeyid;
-
+		}
 		/* get the default key */
 		if(!*name) {
 			acct_user_rec_t user_rec;
@@ -262,10 +260,13 @@ static uint32_t _get_wckeyid(mysql_conn_t *mysql_conn, char **name,
 			user_rec.uid = NO_VAL;
 			user_rec.name = user;
 			if(assoc_mgr_fill_in_user(mysql_conn, &user_rec,
-						  1) != SLURM_SUCCESS) {
-				error("No user by name of %s", user);
+						  1, NULL) != SLURM_SUCCESS) {
+				error("No user by name of %s assoc %u",
+				      user, associd);
+				xfree(user);
 				goto no_wckeyid;
 			}
+			
 			if(user_rec.default_wckey)
 				*name = xstrdup_printf("*%s", 
 						       user_rec.default_wckey);
@@ -279,7 +280,8 @@ static uint32_t _get_wckeyid(mysql_conn_t *mysql_conn, char **name,
 		wckey_rec.user = user;
 		wckey_rec.cluster = cluster;
 		if(assoc_mgr_fill_in_wckey(mysql_conn, &wckey_rec,
-					   1, NULL) != SLURM_SUCCESS) {
+					   ACCOUNTING_ENFORCE_WCKEYS,
+					   NULL) != SLURM_SUCCESS) {
 			List wckey_list = NULL;
 			acct_wckey_rec_t *wckey_ptr = NULL;
 						
@@ -290,7 +292,7 @@ static uint32_t _get_wckeyid(mysql_conn_t *mysql_conn, char **name,
 			wckey_ptr->user = xstrdup(user);
 			wckey_ptr->cluster = xstrdup(cluster);
 			list_append(wckey_list, wckey_ptr);
-			/* info("adding wckey '%s' '%s' '%s'",  */
+/* 			info("adding wckey '%s' '%s' '%s'", */
 /* 				     wckey_ptr->name, wckey_ptr->user, */
 /* 				     wckey_ptr->cluster); */
 			/* we have already checked to make
@@ -304,7 +306,8 @@ static uint32_t _get_wckeyid(mysql_conn_t *mysql_conn, char **name,
 				acct_storage_p_commit(mysql_conn, 1);
 			/* If that worked lets get it */
 			assoc_mgr_fill_in_wckey(mysql_conn, &wckey_rec,
-						1, NULL);
+						ACCOUNTING_ENFORCE_WCKEYS,
+						NULL);
 				
 			list_destroy(wckey_list);
 		}
@@ -478,14 +481,15 @@ static int _setup_association_limits(acct_association_rec_t *assoc,
 	if(!assoc)
 		return SLURM_ERROR;
 	
-	if((int)assoc->fairshare >= 0) {
+	if((int)assoc->shares_raw >= 0) {
 		xstrcat(*cols, ", fairshare");
-		xstrfmtcat(*vals, ", %u", assoc->fairshare);
-		xstrfmtcat(*extra, ", fairshare=%u", assoc->fairshare);
-	} else if (((int)assoc->fairshare == INFINITE) || get_fs) {
+		xstrfmtcat(*vals, ", %u", assoc->shares_raw);
+		xstrfmtcat(*extra, ", fairshare=%u", assoc->shares_raw);
+	} else if (((int)assoc->shares_raw == INFINITE) || get_fs) {
 		xstrcat(*cols, ", fairshare");
 		xstrcat(*vals, ", 1");
-		xstrcat(*extra, ", fairshare=1");		
+		xstrcat(*extra, ", fairshare=1");
+		assoc->shares_raw = 1;
 	} 
 
 	if((int)assoc->grp_cpu_mins >= 0) {
@@ -854,10 +858,77 @@ static int _setup_qos_limits(acct_qos_rec_t *qos,
 			   qos->job_flags);
 	}
 
+	if((int)qos->usage_factor >= 0) {
+		xstrcat(*cols, ", usage_factor");
+		xstrfmtcat(*vals, ", %f", qos->usage_factor);
+		xstrfmtcat(*extra, ", usage_factor=%f", qos->usage_factor);
+	} else if((int)qos->usage_factor == INFINITE) {
+		xstrcat(*cols, ", usage_factor");
+		xstrcat(*vals, ", 1");
+		xstrcat(*extra, ", usage_factor=1");
+	}
+
 	return SLURM_SUCCESS;
 
 }
 
+static int _setup_resv_limits(acct_reservation_rec_t *resv,
+			      char **cols, char **vals,
+			      char **extra)
+{	
+	/* strip off the action item from the flags */
+
+	if(resv->assocs) {
+		xstrcat(*cols, ", assoclist");
+		xstrfmtcat(*vals, ", \"%s\"", resv->assocs);
+		xstrfmtcat(*extra, ", assoclist=\"%s\"", resv->assocs);
+	}
+
+	if(resv->cpus != (uint32_t)NO_VAL) {
+		xstrcat(*cols, ", cpus");
+		xstrfmtcat(*vals, ", %u", resv->cpus);
+		xstrfmtcat(*extra, ", cpus=%u", resv->cpus);		
+	}
+	
+	if(resv->flags != (uint16_t)NO_VAL) {
+		xstrcat(*cols, ", flags");
+		xstrfmtcat(*vals, ", %u", resv->flags);
+		xstrfmtcat(*extra, ", flags=%u", resv->flags);		
+	}
+
+	if(resv->name) {
+		xstrcat(*cols, ", name");
+		xstrfmtcat(*vals, ", \"%s\"", resv->name);
+		xstrfmtcat(*extra, ", name=\"%s\"", resv->name);
+	}
+	
+	if(resv->nodes) {
+		xstrcat(*cols, ", nodelist");
+		xstrfmtcat(*vals, ", \"%s\"", resv->nodes);
+		xstrfmtcat(*extra, ", nodelist=\"%s\"", resv->nodes);
+	}
+	
+	if(resv->node_inx) {
+		xstrcat(*cols, ", node_inx");
+		xstrfmtcat(*vals, ", \"%s\"", resv->node_inx);
+		xstrfmtcat(*extra, ", node_inx=\"%s\"", resv->node_inx);
+	}
+	
+	if(resv->time_end) {
+		xstrcat(*cols, ", end");
+		xstrfmtcat(*vals, ", %u", resv->time_end);
+		xstrfmtcat(*extra, ", end=%u", resv->time_end);		
+	}
+
+	if(resv->time_start) {
+		xstrcat(*cols, ", start");
+		xstrfmtcat(*vals, ", %u", resv->time_start);
+		xstrfmtcat(*extra, ", start=%u", resv->time_start);		
+	}
+
+	
+	return SLURM_SUCCESS;
+}
 /* when doing a select on this all the select should have a prefix of
  * t1. */
 static int _setup_association_cond_limits(acct_association_cond_t *assoc_cond,
@@ -1294,38 +1365,159 @@ static int _setup_wckey_cond_limits(acct_wckey_cond_t *wckey_cond,
 	return set;
 }
 
+static int _setup_resv_cond_limits(acct_reservation_cond_t *resv_cond,
+				   char **extra)
+{
+	int set = 0;
+	ListIterator itr = NULL;
+	char *object = NULL;
+	char *prefix = "t1";
+	time_t now = time(NULL);
+
+	if(!resv_cond)
+		return 0;
+
+	if(resv_cond->cluster_list && list_count(resv_cond->cluster_list)) {
+		set = 0;
+		if(*extra)
+			xstrcat(*extra, " && (");
+		else
+			xstrcat(*extra, " where (");
+		itr = list_iterator_create(resv_cond->cluster_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.cluster=\"%s\"", prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(resv_cond->id_list && list_count(resv_cond->id_list)) {
+		set = 0;
+		if(*extra)
+			xstrcat(*extra, " && (");
+		else
+			xstrcat(*extra, " where (");
+		itr = list_iterator_create(resv_cond->id_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.id=%s", prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+	
+	if(resv_cond->name_list && list_count(resv_cond->name_list)) {
+		set = 0;
+		if(*extra)
+			xstrcat(*extra, " && (");
+		else
+			xstrcat(*extra, " where (");
+		itr = list_iterator_create(resv_cond->name_list);
+		while((object = list_next(itr))) {
+			if(set) 
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "%s.name=\"%s\"", prefix, object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
+	if(resv_cond->time_start) {
+		if(!resv_cond->time_end)
+			resv_cond->time_end = now;
+
+		if(*extra)
+			xstrcat(*extra, " && (");
+		else
+			xstrcat(*extra, " where (");
+		xstrfmtcat(*extra, 
+			   "(t1.start < %d "
+			   "&& (t1.end >= %d || t1.end = 0)))",
+			   resv_cond->time_end, resv_cond->time_start);
+	} else if(resv_cond->time_end) {
+		if(*extra)
+			xstrcat(*extra, " && (");
+		else
+			xstrcat(*extra, " where (");
+		xstrfmtcat(*extra, 
+			   "(t1.start < %d))", resv_cond->time_end);
+	}
+
+
+	return set;
+}
+
 static uint32_t _get_parent_id(
-       mysql_conn_t *mysql_conn, char *parent, char *cluster)
+	mysql_conn_t *mysql_conn, char *parent, char *cluster)
 {
-       uint32_t parent_id = 0;
-       MYSQL_RES *result = NULL;
-       MYSQL_ROW row;
-       char *query = NULL;
-       
-       xassert(parent); 
-       xassert(cluster);
-
-       query = xstrdup_printf("select id from %s where user='' "
-                              "and deleted = 0 and acct=\"%s\" "
-                              "and cluster=\"%s\";", 
-                              assoc_table, parent, cluster);
-       debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
-       
-       if(!(result = mysql_db_query_ret(mysql_conn->db_conn, query, 1))) {
-               xfree(query);
-               return 0;
-       }
-       xfree(query);
-
-       if((row = mysql_fetch_row(result))) {
-               if(row[0])
-                       parent_id = atoi(row[0]);       
-       } else 
-               error("no association for parent %s on cluster %s",
-                     parent, cluster);
-       mysql_free_result(result);
-
-       return parent_id;
+	uint32_t parent_id = 0;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	char *query = NULL;
+	
+	xassert(parent); 
+	xassert(cluster);
+
+	query = xstrdup_printf("select id from %s where user='' "
+			       "and deleted = 0 and acct=\"%s\" "
+			       "and cluster=\"%s\";", 
+			       assoc_table, parent, cluster);
+	debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+	
+	if(!(result = mysql_db_query_ret(mysql_conn->db_conn, query, 1))) {
+		xfree(query);
+		return 0;
+	}
+	xfree(query);
+
+	if((row = mysql_fetch_row(result))) {
+		if(row[0])
+			parent_id = atoi(row[0]);	
+	} else 
+		error("no association for parent %s on cluster %s",
+		      parent, cluster);
+	mysql_free_result(result);
+
+	return parent_id;
+}
+
+static int _set_assoc_lft_rgt(
+	mysql_conn_t *mysql_conn, acct_association_rec_t *assoc)
+{
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	char *query = NULL;
+	int rc = SLURM_ERROR;
+
+	xassert(assoc->id);
+
+	query = xstrdup_printf("select lft, rgt from %s where id=%u;", 
+			       assoc_table, assoc->id);
+	debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+	
+	if(!(result = mysql_db_query_ret(mysql_conn->db_conn, query, 1))) {
+		xfree(query);
+		return 0;
+	}
+	xfree(query);
+
+	if((row = mysql_fetch_row(result))) {
+		if(row[0])
+			assoc->lft = atoi(row[0]);	
+		if(row[1])
+			assoc->rgt = atoi(row[1]);	
+		rc = SLURM_SUCCESS;
+	} else 
+		error("no association (%u)", assoc->id);
+	mysql_free_result(result);
+
+	return rc;
 }
 
 /* This function will take the object given and free it later so it
@@ -1818,7 +2010,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 				continue;
 			}
 			/* We do want to send all user accounts though */
-			mod_assoc->fairshare = NO_VAL;
+			mod_assoc->shares_raw = NO_VAL;
 			if(row[ASSOC_PART][0]) { 
 				// see if there is a partition name
 				object = xstrdup_printf(
@@ -2398,10 +2590,9 @@ static int _get_db_index(MYSQL *db_conn,
    user or not.
 */
 static int _get_usage_for_list(mysql_conn_t *mysql_conn,
-			    slurmdbd_msg_type_t type, List object_list, 
-			    time_t start, time_t end)
+			       slurmdbd_msg_type_t type, List object_list, 
+			       time_t start, time_t end)
 {
-#ifdef HAVE_MYSQL
 	int rc = SLURM_SUCCESS;
 	int i=0;
 	MYSQL_RES *result = NULL;
@@ -2513,7 +2704,7 @@ static int _get_usage_for_list(mysql_conn_t *mysql_conn,
 		query = xstrdup_printf(
 			"select %s from %s "
 			"where (period_start < %d && period_start >= %d) "
-			"&& %s order by id, period_start;",
+			"&& (%s) order by id, period_start;",
 			tmp, my_usage_table, end, start, id_str);
 		break;
 	default:
@@ -2607,9 +2798,6 @@ static int _get_usage_for_list(mysql_conn_t *mysql_conn,
 
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 static mysql_db_info_t *_mysql_acct_create_db_info()
@@ -2617,10 +2805,12 @@ static mysql_db_info_t *_mysql_acct_create_db_info()
 	mysql_db_info_t *db_info = xmalloc(sizeof(mysql_db_info_t));
 	db_info->port = slurm_get_accounting_storage_port();
 	if(!db_info->port) {
-		db_info->port = 3306;
+		db_info->port = DEFAULT_MYSQL_PORT;
 		slurm_set_accounting_storage_port(db_info->port);
 	}
 	db_info->host = slurm_get_accounting_storage_host();	
+	db_info->backup = slurm_get_accounting_storage_backup_host();
+	
 	db_info->user = slurm_get_accounting_storage_user();	
 	db_info->pass = slurm_get_accounting_storage_pass();	
 	return db_info;
@@ -2695,8 +2885,9 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "deleted", "tinyint default 0" },
 		{ "name", "tinytext not null" },
 		{ "control_host", "tinytext not null default ''" },
-		{ "control_port", "mediumint not null default 0" },
-		{ "rpc_version", "mediumint not null default 0" },
+		{ "control_port", "int unsigned not null default 0" },
+		{ "rpc_version", "smallint unsigned not null default 0" },
+		{ "classification", "smallint unsigned default 0" },
 		{ NULL, NULL}		
 	};
 
@@ -2709,6 +2900,7 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "cpu_count", "int default 0" },
 		{ "alloc_cpu_secs", "bigint default 0" },
 		{ "down_cpu_secs", "bigint default 0" },
+		{ "pdown_cpu_secs", "bigint default 0" },
 		{ "idle_cpu_secs", "bigint default 0" },
 		{ "resv_cpu_secs", "bigint default 0" },
 		{ "over_cpu_secs", "bigint default 0" },
@@ -2719,19 +2911,21 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "node_name", "tinytext default '' not null" },
 		{ "cluster", "tinytext not null" },
 		{ "cpu_count", "int not null" },
+		{ "state", "smallint default 0 not null" },
 		{ "period_start", "int unsigned not null" },
 		{ "period_end", "int unsigned default 0 not null" },
 		{ "reason", "tinytext not null" },
+		{ "cluster_nodes", "text not null default ''" },
 		{ NULL, NULL}		
 	};
 
 	storage_field_t job_table_fields[] = {
 		{ "id", "int not null auto_increment" },
 		{ "deleted", "tinyint default 0" },
-		{ "jobid", "mediumint unsigned not null" },
-		{ "associd", "mediumint unsigned not null" },
+		{ "jobid", "int unsigned not null" },
+		{ "associd", "int unsigned not null" },
 		{ "wckey", "tinytext not null default ''" },
-		{ "wckeyid", "mediumint unsigned not null" },
+		{ "wckeyid", "int unsigned not null" },
 		{ "uid", "smallint unsigned not null" },
 		{ "gid", "smallint unsigned not null" },
 		{ "cluster", "tinytext not null" },
@@ -2743,16 +2937,20 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "start", "int unsigned default 0 not null" },
 		{ "end", "int unsigned default 0 not null" },
 		{ "suspended", "int unsigned default 0 not null" },
+		{ "timelimit", "int unsigned default 0 not null" },
 		{ "name", "tinytext not null" }, 
 		{ "track_steps", "tinyint not null" },
 		{ "state", "smallint not null" }, 
 		{ "comp_code", "int default 0 not null" },
 		{ "priority", "int not null" },
-		{ "req_cpus", "mediumint unsigned not null" }, 
-		{ "alloc_cpus", "mediumint unsigned not null" }, 
+		{ "req_cpus", "int unsigned not null" }, 
+		{ "alloc_cpus", "int unsigned not null" }, 
+		{ "alloc_nodes", "int unsigned not null" }, 
 		{ "nodelist", "text" },
+		{ "node_inx", "text" },
 		{ "kill_requid", "smallint default -1 not null" },
 		{ "qos", "smallint default 0" },
+		{ "resvid", "int unsigned not null" },
 		{ NULL, NULL}
 	};
 
@@ -2786,6 +2984,22 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "preemptees", "text not null default ''" },
 		{ "preemptors", "text not null default ''" },
 		{ "priority", "int default 0" },
+		{ "usage_factor", "float default 1.0 not null" },
+		{ NULL, NULL}		
+	};
+
+	storage_field_t resv_table_fields[] = {
+		{ "id", "int unsigned default 0 not null" },
+		{ "name", "text not null" },
+		{ "cluster", "text not null" },
+		{ "deleted", "tinyint default 0" },
+		{ "cpus", "int unsigned not null" },
+		{ "assoclist", "text not null default ''" },
+		{ "nodelist", "text not null default ''" },
+		{ "node_inx", "text not null default ''" },
+		{ "start", "int unsigned default 0 not null"},
+		{ "end", "int unsigned default 0 not null" },
+		{ "flags", "smallint default 0 not null" },
 		{ NULL, NULL}		
 	};
 
@@ -2798,36 +3012,40 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "suspended", "int unsigned default 0 not null" },
 		{ "name", "text not null" },
 		{ "nodelist", "text not null" },
+		{ "node_inx", "text" },
 		{ "state", "smallint not null" },
 		{ "kill_requid", "smallint default -1 not null" },
 		{ "comp_code", "int default 0 not null" },
-		{ "cpus", "mediumint unsigned not null" },
+		{ "nodes", "int unsigned not null" },
+		{ "cpus", "int unsigned not null" },
+		{ "tasks", "int unsigned not null" },
+		{ "task_dist", "smallint default 0" },
 		{ "user_sec", "int unsigned default 0 not null" },
 		{ "user_usec", "int unsigned default 0 not null" },
 		{ "sys_sec", "int unsigned default 0 not null" },
 		{ "sys_usec", "int unsigned default 0 not null" },
 		{ "max_vsize", "int unsigned default 0 not null" },
 		{ "max_vsize_task", "smallint unsigned default 0 not null" },
-		{ "max_vsize_node", "mediumint unsigned default 0 not null" },
+		{ "max_vsize_node", "int unsigned default 0 not null" },
 		{ "ave_vsize", "float default 0.0 not null" },
 		{ "max_rss", "int unsigned default 0 not null" },
 		{ "max_rss_task", "smallint unsigned default 0 not null" },
-		{ "max_rss_node", "mediumint unsigned default 0 not null" },
+		{ "max_rss_node", "int unsigned default 0 not null" },
 		{ "ave_rss", "float default 0.0 not null" },
-		{ "max_pages", "mediumint unsigned default 0 not null" },
+		{ "max_pages", "int unsigned default 0 not null" },
 		{ "max_pages_task", "smallint unsigned default 0 not null" },
-		{ "max_pages_node", "mediumint unsigned default 0 not null" },
+		{ "max_pages_node", "int unsigned default 0 not null" },
 		{ "ave_pages", "float default 0.0 not null" },
-		{ "min_cpu", "mediumint unsigned default 0 not null" },
+		{ "min_cpu", "int unsigned default 0 not null" },
 		{ "min_cpu_task", "smallint unsigned default 0 not null" },
-		{ "min_cpu_node", "mediumint unsigned default 0 not null" },
+		{ "min_cpu_node", "int unsigned default 0 not null" },
 		{ "ave_cpu", "float default 0.0 not null" },
 		{ NULL, NULL}
 	};
 
 	storage_field_t suspend_table_fields[] = {
 		{ "id", "int not null" },
-		{ "associd", "mediumint not null" },
+		{ "associd", "int not null" },
 		{ "start", "int unsigned default 0 not null" },
 		{ "end", "int unsigned default 0 not null" },
 		{ NULL, NULL}		
@@ -2872,6 +3090,8 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 		{ "id", "int not null" },
 		{ "period_start", "int unsigned not null" },
 		{ "alloc_cpu_secs", "bigint default 0" },
+		{ "resv_cpu_secs", "bigint default 0" },
+		{ "over_cpu_secs", "bigint default 0" },
 		{ NULL, NULL}		
 	};
 
@@ -3076,6 +3296,12 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 				 ", primary key (id, stepid))") == SLURM_ERROR)
 		return SLURM_ERROR;
 
+	if(mysql_db_create_table(db_conn, resv_table,
+				 resv_table_fields, 
+				 ", primary key (id, start, cluster(20)))")
+	   == SLURM_ERROR)
+		return SLURM_ERROR;
+
 	if(mysql_db_create_table(db_conn, suspend_table,
 				 suspend_table_fields, 
 				 ")") == SLURM_ERROR)
@@ -3140,7 +3366,6 @@ static int _mysql_acct_check_tables(MYSQL *db_conn)
 
 	return rc;
 }
-#endif
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -3150,14 +3375,8 @@ extern int init ( void )
 {
 	static int first = 1;
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_MYSQL
 	MYSQL *db_conn = NULL;
 	char *location = NULL;
-#else
-	fatal("No MySQL database was found on the machine. "
-	      "Please check the config.log from the run of configure "
-	      "and run again.");
-#endif
 
 	/* since this can be loaded from many different places
 	   only tell us once. */
@@ -3174,25 +3393,24 @@ extern int init ( void )
 		xfree(cluster_name);
 	}
 
-#ifdef HAVE_MYSQL
 	mysql_db_info = _mysql_acct_create_db_info();
 
 	location = slurm_get_accounting_storage_loc();
 	if(!location)
-		mysql_db_name = xstrdup(DEFAULT_ACCT_DB);
+		mysql_db_name = xstrdup(DEFAULT_ACCOUNTING_DB);
 	else {
 		int i = 0;
 		while(location[i]) {
 			if(location[i] == '.' || location[i] == '/') {
 				debug("%s doesn't look like a database "
 				      "name using %s",
-				      location, DEFAULT_ACCT_DB);
+				      location, DEFAULT_ACCOUNTING_DB);
 				break;
 			}
 			i++;
 		}
 		if(location[i]) {
-			mysql_db_name = xstrdup(DEFAULT_ACCT_DB);
+			mysql_db_name = xstrdup(DEFAULT_ACCOUNTING_DB);
 			xfree(location);
 		} else
 			mysql_db_name = location;
@@ -3208,8 +3426,6 @@ extern int init ( void )
 	rc = _mysql_acct_check_tables(db_conn);
 
 	mysql_close_db_connection(&db_conn);
-	
-#endif		
 
 	if(rc == SLURM_SUCCESS)
 		verbose("%s loaded", plugin_name);
@@ -3221,21 +3437,16 @@ extern int init ( void )
 
 extern int fini ( void )
 {
-#ifdef HAVE_MYSQL
 	destroy_mysql_db_info(mysql_db_info);		
 	xfree(mysql_db_name);
 	xfree(default_qos_str);
 	mysql_cleanup();
 	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern void *acct_storage_p_get_connection(bool make_agent, int conn_num,
 					   bool rollback)
 {
-#ifdef HAVE_MYSQL
 	mysql_conn_t *mysql_conn = xmalloc(sizeof(mysql_conn_t));
 	
 	if(!mysql_db_info)
@@ -3255,15 +3466,10 @@ extern void *acct_storage_p_get_connection(bool make_agent, int conn_num,
 		errno = SLURM_SUCCESS;
 	}
 	return (void *)mysql_conn;
-#else
-	return NULL;
-#endif
 }
 
 extern int acct_storage_p_close_connection(mysql_conn_t **mysql_conn)
 {
-#ifdef HAVE_MYSQL
-
 	if(!mysql_conn || !(*mysql_conn))
 		return SLURM_SUCCESS;
 
@@ -3273,14 +3479,10 @@ extern int acct_storage_p_close_connection(mysql_conn_t **mysql_conn)
 	xfree((*mysql_conn));
 
 	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 {
-#ifdef HAVE_MYSQL
 	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
 
@@ -3380,22 +3582,22 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 			case ACCT_REMOVE_USER:
 			case ACCT_ADD_COORD:
 			case ACCT_REMOVE_COORD:
-				rc = assoc_mgr_update_local_users(object);
+				rc = assoc_mgr_update_users(object);
 				break;
 			case ACCT_ADD_ASSOC:
 			case ACCT_MODIFY_ASSOC:
 			case ACCT_REMOVE_ASSOC:
-				rc = assoc_mgr_update_local_assocs(object);
+				rc = assoc_mgr_update_assocs(object);
 				break;
 			case ACCT_ADD_QOS:
 			case ACCT_MODIFY_QOS:
 			case ACCT_REMOVE_QOS:
-				rc = assoc_mgr_update_local_qos(object);
+				rc = assoc_mgr_update_qos(object);
 				break;
 			case ACCT_ADD_WCKEY:
 			case ACCT_MODIFY_WCKEY:
 			case ACCT_REMOVE_WCKEY:
-				rc = assoc_mgr_update_local_wckeys(object);
+				rc = assoc_mgr_update_wckeys(object);
 				break;
 			case ACCT_UPDATE_NOTSET:
 			default:
@@ -3411,15 +3613,11 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 	list_flush(mysql_conn->update_list);
 
 	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int acct_storage_p_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 				    List user_list)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	acct_user_rec_t *object = NULL;
@@ -3551,15 +3749,11 @@ extern int acct_storage_p_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	list_destroy(wckey_list);
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int acct_storage_p_add_coord(mysql_conn_t *mysql_conn, uint32_t uid, 
 				    List acct_list, acct_user_cond_t *user_cond)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL, *user = NULL, *acct = NULL;
 	char *user_name = NULL, *txn_query = NULL;
 	ListIterator itr, itr2;
@@ -3643,15 +3837,11 @@ extern int acct_storage_p_add_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 	
 	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int acct_storage_p_add_accts(mysql_conn_t *mysql_conn, uint32_t uid, 
 				    List acct_list)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	acct_account_rec_t *object = NULL;
@@ -3762,15 +3952,11 @@ extern int acct_storage_p_add_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	list_destroy(assoc_list);
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid, 
 				       List cluster_list)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	acct_cluster_rec_t *object = NULL;
@@ -3806,12 +3992,13 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 						  &vals, &extra,
 						  QOS_LEVEL_SET, 1);
 		xstrfmtcat(query, 
-			   "insert into %s (creation_time, mod_time, name) "
-			   "values (%d, %d, \"%s\") "
+			   "insert into %s (creation_time, mod_time, "
+			   "name, classification) "
+			   "values (%d, %d, \"%s\", %u) "
 			   "on duplicate key update deleted=0, mod_time=%d, "
 			   "control_host='', control_port=0;",
 			   cluster_table, 
-			   now, now, object->name,
+			   now, now, object->name, object->classification,
 			   now);
 		debug3("%d(%d) query\n%s",
 		       mysql_conn->conn, __LINE__, query);
@@ -3917,16 +4104,12 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 					   uint32_t uid, 
 					   List association_list)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	int i=0;
@@ -4198,9 +4381,11 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 				   == SLURM_ERROR)
 					continue;
 				moved_parent = 1;
+			} else {
+				object->lft = atoi(row[MASSOC_LFT]);
+				object->rgt = atoi(row[MASSOC_RGT]);
 			}
 
-
 			affect_rows = 2;
 			xstrfmtcat(query,
 				   "update %s set deleted=0, "
@@ -4233,7 +4418,29 @@ extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
 		}
 
 		object->id = assoc_id;
-		
+
+		/* get the parent id only if we haven't moved the
+		 * parent since we get the total list if that has
+		 * happened */
+		if(!moved_parent &&
+		   (!last_parent || !last_cluster
+		    || strcmp(parent, last_parent)
+		    || strcmp(object->cluster, last_cluster))) {
+			uint32_t tmp32 = 0;
+			if((tmp32 = _get_parent_id(mysql_conn, 
+						   parent,
+						   object->cluster))) {
+				my_par_id = tmp32;
+
+				last_parent = parent;
+				last_cluster = object->cluster;
+			}
+		}
+		object->parent_id = my_par_id;
+
+		if(!moved_parent && !object->lft)
+			_set_assoc_lft_rgt(mysql_conn, object);
+
 
                /* get the parent id only if we haven't moved the
                 * parent since we get the total list if that has
@@ -4337,7 +4544,7 @@ end_it:
 			 * since you can't query on mod time here and I don't
 			 * want to rewrite code to make it happen
 			 */
-			//bzero(&assoc_cond, sizeof(acct_association_cond_t));
+			//memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
 			
 			if(!(assoc_list = 
 			     acct_storage_p_get_associations(mysql_conn,
@@ -4370,15 +4577,11 @@ end_it:
 	}
 					
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int acct_storage_p_add_qos(mysql_conn_t *mysql_conn, uint32_t uid, 
 				  List qos_list)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	acct_qos_rec_t *object = NULL;
@@ -4479,15 +4682,11 @@ extern int acct_storage_p_add_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int acct_storage_p_add_wckeys(mysql_conn_t *mysql_conn, uint32_t uid, 
 				     List wckey_list)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	acct_wckey_rec_t *object = NULL;
@@ -4591,16 +4790,59 @@ extern int acct_storage_p_add_wckeys(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
+}
+
+extern int acct_storage_p_add_reservation(mysql_conn_t *mysql_conn,
+					  acct_reservation_rec_t *resv)
+{
+	int rc = SLURM_SUCCESS;
+	char *cols = NULL, *vals = NULL, *extra = NULL, 
+		*query = NULL;//, *tmp_extra = NULL;
+
+	if(!resv) {
+		error("No reservation was given to edit");
+		return SLURM_ERROR;
+	}
+
+	if(!resv->id) {
+		error("We need an id to edit a reservation.");
+		return SLURM_ERROR;
+	}
+	if(!resv->time_start) {
+		error("We need a start time to edit a reservation.");
+		return SLURM_ERROR;
+	}
+	if(!resv->cluster) {
+		error("We need a cluster name to edit a reservation.");
+		return SLURM_ERROR;
+	}
+
+	_setup_resv_limits(resv, &cols, &vals, &extra);
+	
+	xstrfmtcat(query,
+		   "insert into %s (id, cluster%s) values (%u, '%s'%s) "
+		   "on duplicate key update deleted=0%s;",
+		   resv_table, cols, resv->id, resv->cluster,
+		   vals, extra);
+	debug3("%d(%d) query\n%s",
+	       mysql_conn->conn, __LINE__, query);
+	
+	if((rc = mysql_db_query(mysql_conn->db_conn, query)
+	    == SLURM_SUCCESS))
+		rc = mysql_clear_results(mysql_conn->db_conn);
+	
+	xfree(query);
+	xfree(cols);
+	xfree(vals);
+	xfree(extra);
+	
+	return rc;
 }
 
 extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid, 
 					acct_user_cond_t *user_cond,
 					acct_user_rec_t *user)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
 	int rc = SLURM_SUCCESS;
@@ -4737,9 +4979,6 @@ extern List acct_storage_p_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 				
 	return ret_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_modify_accounts(
@@ -4747,7 +4986,6 @@ extern List acct_storage_p_modify_accounts(
 	acct_account_cond_t *acct_cond,
 	acct_account_rec_t *acct)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
 	int rc = SLURM_SUCCESS;
@@ -4875,9 +5113,6 @@ extern List acct_storage_p_modify_accounts(
 	xfree(vals);
 
 	return ret_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn, 
@@ -4885,7 +5120,6 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 					   acct_cluster_cond_t *cluster_cond,
 					   acct_cluster_rec_t *cluster)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
 	int rc = SLURM_SUCCESS;
@@ -4897,6 +5131,7 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 	int set = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
+	bool clust_reg = false;
 
 	/* If you need to alter the default values of the cluster use
 	 * modify_associations since this is used only for registering
@@ -4927,20 +5162,33 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		xstrcat(extra, ")");
 	}
 
+	if(cluster_cond->classification) {
+		xstrfmtcat(extra, " && (classification & %u)",
+			   cluster_cond->classification);
+	}
+
 	set = 0;
 	if(cluster->control_host) {
 		xstrfmtcat(vals, ", control_host='%s'", cluster->control_host);
 		set++;
+		clust_reg = true;
 	}
 
 	if(cluster->control_port) {
 		xstrfmtcat(vals, ", control_port=%u", cluster->control_port);
 		set++;
+		clust_reg = true;
 	}
 
 	if(cluster->rpc_version) {
 		xstrfmtcat(vals, ", rpc_version=%u", cluster->rpc_version);
 		set++;
+		clust_reg = true;
+	}
+
+	if(cluster->classification) {
+		xstrfmtcat(vals, ", classification=%u", 
+			   cluster->classification);
 	}
 
 	if(!vals) {
@@ -4948,7 +5196,7 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		errno = SLURM_NO_CHANGE_IN_DATA;
 		error("Nothing to change");
 		return NULL;
-	} else if(set != 3) {
+	} else if(clust_reg && (set != 3)) {
 		xfree(vals);
 		xfree(extra);
 		errno = EFAULT;
@@ -4982,7 +5230,7 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 		object = xstrdup(row[0]);
 
 		/* check to see if this is the first time to register */
-		if(row[1][0] == '0')
+		if(clust_reg && (row[1][0] == '0'))
 			set = 0;
 
 		list_append(ret_list, object);
@@ -5040,9 +5288,17 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 			error("can not open socket back to slurmctld");
 		} else {
 			slurm_msg_t out_msg;
+			accounting_update_msg_t update;
+			/* We have to put this update message here so
+			   we can tell the sender to send the correct
+			   RPC version.
+			*/
+			memset(&update, 0, sizeof(accounting_update_msg_t));
+			update.rpc_version = cluster->rpc_version;
 			slurm_msg_t_init(&out_msg);
 			out_msg.msg_type = ACCOUNTING_FIRST_REG;
 			out_msg.flags = SLURM_GLOBAL_AUTH_KEY;
+			out_msg.data = &update;
 			slurm_send_node_msg(fd, &out_msg);
 			/* We probably need to add matching recv_msg function
 			 * for an arbitray fd or should these be fire
@@ -5059,9 +5315,6 @@ end_it:
 	xfree(send_char);
 
 	return ret_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_modify_associations(
@@ -5069,7 +5322,6 @@ extern List acct_storage_p_modify_associations(
 	acct_association_cond_t *assoc_cond,
 	acct_association_rec_t *assoc)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
 	int rc = SLURM_SUCCESS;
@@ -5137,7 +5389,7 @@ extern List acct_storage_p_modify_associations(
 		   >= ACCT_ADMIN_OPERATOR) 
 			is_admin = 1;	
 		else {
-			if(assoc_mgr_fill_in_user(mysql_conn, &user, 1)
+			if(assoc_mgr_fill_in_user(mysql_conn, &user, 1, NULL)
 			   != SLURM_SUCCESS) {
 				error("couldn't get information for this user");
 				errno = SLURM_ERROR;
@@ -5334,7 +5586,7 @@ extern List acct_storage_p_modify_associations(
 		init_acct_association_rec(mod_assoc);
 		mod_assoc->id = atoi(row[MASSOC_ID]);
 
-		mod_assoc->fairshare = assoc->fairshare;
+		mod_assoc->shares_raw = assoc->shares_raw;
 
 		mod_assoc->grp_cpus = assoc->grp_cpus;
 		mod_assoc->grp_cpu_mins = assoc->grp_cpu_mins;
@@ -5350,6 +5602,9 @@ extern List acct_storage_p_modify_associations(
 		mod_assoc->max_submit_jobs = assoc->max_submit_jobs;
 		mod_assoc->max_wall_pj = assoc->max_wall_pj;
 
+		/* no need to get the parent id since if we moved
+		 * parent id's we will get it when we send the total list */
+
 		if(!row[MASSOC_USER][0])
 			mod_assoc->parent_acct = xstrdup(assoc->parent_acct);
 		if(assoc->qos_list && list_count(assoc->qos_list)) {
@@ -5484,7 +5739,7 @@ extern List acct_storage_p_modify_associations(
 		 * want to rewrite code to make it happen
 		 */
 
-		//bzero(&local_assoc_cond, sizeof(acct_association_cond_t));
+		//memset(&local_assoc_cond, 0, sizeof(acct_association_cond_t));
 		
 		if(!(local_assoc_list = 
 		     acct_storage_p_get_associations(mysql_conn,
@@ -5514,16 +5769,12 @@ end_it:
 	xfree(vals);
 
 	return ret_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid, 
 					acct_qos_cond_t *qos_cond,
 					acct_qos_rec_t *qos)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
 	int rc = SLURM_SUCCESS;
@@ -5843,9 +6094,6 @@ extern List acct_storage_p_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 				
 	return ret_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_modify_wckeys(mysql_conn_t *mysql_conn,
@@ -5856,37 +6104,238 @@ extern List acct_storage_p_modify_wckeys(mysql_conn_t *mysql_conn,
 	return NULL;
 }
 
-extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid, 
-					acct_user_cond_t *user_cond)
+extern int acct_storage_p_modify_reservation(mysql_conn_t *mysql_conn,
+					     acct_reservation_rec_t *resv)
 {
-#ifdef HAVE_MYSQL
-	ListIterator itr = NULL;
-	List ret_list = NULL;
-	List coord_list = NULL;
-	int rc = SLURM_SUCCESS;
-	char *object = NULL;
-	char *extra = NULL, *query = NULL,
-		*name_char = NULL, *assoc_char = NULL;
-	time_t now = time(NULL);
-	char *user_name = NULL;
-	int set = 0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
-	acct_user_cond_t user_coord_cond;
-	acct_association_cond_t assoc_cond;
-	acct_wckey_cond_t wckey_cond;
-	
-	if(!user_cond) {
-		error("we need something to remove");
-		return NULL;
-	}
-
-	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
-		return NULL;
+	int rc = SLURM_SUCCESS;
+	char *cols = NULL, *vals = NULL, *extra = NULL, 
+		*query = NULL;//, *tmp_extra = NULL;
+	time_t start = 0, now = time(NULL);
+	int i;
+	int set = 0;
+	char *resv_req_inx[] = {
+		"assoclist",
+		"start",
+		"end",
+		"cpus",
+		"name",
+		"nodelist",
+		"node_inx",
+		"flags"
+	};
+	enum {
+		RESV_ASSOCS,
+		RESV_START,
+		RESV_END,
+		RESV_CPU,
+		RESV_NAME,
+		RESV_NODES,
+		RESV_NODE_INX,
+		RESV_FLAGS,
+		RESV_COUNT
+	};
 
-	xstrcat(extra, "where deleted=0");
+	if(!resv) {
+		error("No reservation was given to edit");
+		return SLURM_ERROR;
+	}
 
-	if(user_cond->assoc_cond && user_cond->assoc_cond->user_list
+	if(!resv->id) {
+		error("We need an id to edit a reservation.");
+		return SLURM_ERROR;
+	}
+	if(!resv->time_start) {
+		error("We need a start time to edit a reservation.");
+		return SLURM_ERROR;
+	}
+	if(!resv->cluster) {
+		error("We need a cluster name to edit a reservation.");
+		return SLURM_ERROR;
+	}
+		
+	if(!resv->time_start_prev) {
+		error("We need a time to check for last "
+		      "start of reservation.");
+		return SLURM_ERROR;
+	}
+	
+	for(i=0; i<RESV_COUNT; i++) {
+		if(i) 
+			xstrcat(cols, ", ");
+		xstrcat(cols, resv_req_inx[i]);
+	}
+	
+	/* check for both the last start and the start because most
+	   likely the start time hasn't changed, but something else
+	   may have since the last time we did an update to the
+	   reservation. */
+	query = xstrdup_printf("select %s from %s where id=%u "
+			       "and (start=%d || start=%d) and cluster='%s' "
+			       "and deleted=0 order by start desc "
+			       "limit 1 FOR UPDATE;",
+			       cols, resv_table, resv->id, 
+			       resv->time_start, resv->time_start_prev,
+			       resv->cluster);
+try_again:
+	debug4("%d(%d) query\n%s",
+	       mysql_conn->conn, __LINE__, query);
+	if(!(result = mysql_db_query_ret(
+		     mysql_conn->db_conn, query, 0))) {
+		rc = SLURM_ERROR;
+		goto end_it;
+	}
+	if(!(row = mysql_fetch_row(result))) {
+		rc = SLURM_ERROR;
+		mysql_free_result(result);
+		error("There is no reservation by id %u, "
+		      "start %d, and cluster '%s'", resv->id,
+		      resv->time_start_prev, resv->cluster);
+		if(!set && resv->time_end) {
+			/* This should never really happen,
+			   but just incase the controller and the
+			   database get out of sync we check
+			   to see if there is a reservation
+			   not deleted that hasn't ended yet. */
+			xfree(query);
+			query = xstrdup_printf(
+				"select %s from %s where id=%u "
+				"and start <= %d and cluster='%s' "
+				"and deleted=0 order by start desc "
+				"limit 1;",
+				cols, resv_table, resv->id, 
+				resv->time_end, resv->cluster);
+			set = 1;
+			goto try_again;
+		}
+		goto end_it;
+	} 
+	
+	start = atoi(row[RESV_START]);
+	
+	xfree(query);
+	xfree(cols);
+	
+	set = 0;
+	
+	/* check differences here */
+		
+	if(!resv->name 
+	   && row[RESV_NAME] && row[RESV_NAME][0])
+		// if this changes we just update the
+		// record, no need to create a new one since
+		// this doesn't really effect the
+		// reservation accounting wise
+		resv->name = xstrdup(row[RESV_NAME]);
+
+	if(resv->assocs)
+		set = 1;
+	else if(row[RESV_ASSOCS] && row[RESV_ASSOCS][0])
+		resv->assocs = xstrdup(row[RESV_ASSOCS]);
+
+	if(resv->cpus != (uint32_t)NO_VAL) 
+		set = 1;
+	else 
+		resv->cpus = atoi(row[RESV_CPU]);
+		
+	if(resv->flags != (uint16_t)NO_VAL) 
+		set = 1;
+	else
+		resv->flags = atoi(row[RESV_FLAGS]);
+		
+	if(resv->nodes) 
+		set = 1;
+	else if(row[RESV_NODES] && row[RESV_NODES][0]) {
+		resv->nodes = xstrdup(row[RESV_NODES]);
+		resv->node_inx = xstrdup(row[RESV_NODE_INX]);
+	}
+		
+	if(!resv->time_end)
+		resv->time_end = atoi(row[RESV_END]);
+
+	mysql_free_result(result);
+
+	_setup_resv_limits(resv, &cols, &vals, &extra);
+	/* use start below instead of resv->time_start_prev
+	 * just incase we have a different one from being out
+	 * of sync
+	 */
+	if((start > now) || !set) {
+		/* we haven't started the reservation yet, or
+		   we are changing the associations or end
+		   time which we can just update it */
+		query = xstrdup_printf("update %s set deleted=0%s "
+				       "where deleted=0 and id=%u "
+				       "and start=%d and cluster='%s';",
+				       resv_table, extra, resv->id,
+				       start,
+				       resv->cluster);
+	} else {
+		/* time_start is already done above and we
+		 * changed something that is in need on a new
+		 * entry. */
+		query = xstrdup_printf("update %s set end=%d "
+				       "where deleted=0 && id=%u "
+				       "&& start=%d and cluster='%s';",
+				       resv_table, resv->time_start-1,
+				       resv->id, start,
+				       resv->cluster);
+		xstrfmtcat(query,
+			   "insert into %s (id, cluster%s) "
+			   "values (%u, '%s'%s) "
+			   "on duplicate key update deleted=0%s;",
+			   resv_table, cols, resv->id, resv->cluster,
+			   vals, extra);
+	}
+
+	debug3("%d(%d) query\n%s",
+	       mysql_conn->conn, __LINE__, query);
+	
+	if((rc = mysql_db_query(mysql_conn->db_conn, query)
+	    == SLURM_SUCCESS))
+		rc = mysql_clear_results(mysql_conn->db_conn);
+	
+end_it:
+	
+	xfree(query);
+	xfree(cols);
+	xfree(vals);
+	xfree(extra);
+	
+	return rc;
+}
+
+extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid, 
+					acct_user_cond_t *user_cond)
+{
+	ListIterator itr = NULL;
+	List ret_list = NULL;
+	List coord_list = NULL;
+	int rc = SLURM_SUCCESS;
+	char *object = NULL;
+	char *extra = NULL, *query = NULL,
+		*name_char = NULL, *assoc_char = NULL;
+	time_t now = time(NULL);
+	char *user_name = NULL;
+	int set = 0;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	acct_user_cond_t user_coord_cond;
+	acct_association_cond_t assoc_cond;
+	acct_wckey_cond_t wckey_cond;
+	
+	if(!user_cond) {
+		error("we need something to remove");
+		return NULL;
+	}
+
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
+		return NULL;
+
+	xstrcat(extra, "where deleted=0");
+
+	if(user_cond->assoc_cond && user_cond->assoc_cond->user_list
 	   && list_count(user_cond->assoc_cond->user_list)) {
 		set = 0;
 		xstrcat(extra, " && (");
@@ -6029,17 +6478,12 @@ extern List acct_storage_p_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	}		
 
 	return ret_list;
-
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid, 
 					List acct_list,
 					acct_user_cond_t *user_cond)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL, *object = NULL, *extra = NULL, *last_user = NULL;
 	char *user_name = NULL;
 	time_t now = time(NULL);
@@ -6080,7 +6524,7 @@ extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 		   >= ACCT_ADMIN_OPERATOR) 
 			is_admin = 1;	
 		else {
-			if(assoc_mgr_fill_in_user(mysql_conn, &user, 1)
+			if(assoc_mgr_fill_in_user(mysql_conn, &user, 1, NULL)
 			   != SLURM_SUCCESS) {
 				error("couldn't get information for this user");
 				errno = SLURM_ERROR;
@@ -6225,15 +6669,11 @@ extern List acct_storage_p_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 	list_destroy(user_list);
 
 	return ret_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid, 
 					acct_account_cond_t *acct_cond)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
 	List coord_list = NULL;
@@ -6357,16 +6797,12 @@ extern List acct_storage_p_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 
 	return ret_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 					   uint32_t uid, 
 					   acct_cluster_cond_t *cluster_cond)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
 	List tmp_list = NULL;
@@ -6492,16 +6928,12 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 	}
 
 	return ret_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_remove_associations(
 	mysql_conn_t *mysql_conn, uint32_t uid, 
 	acct_association_cond_t *assoc_cond)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
 	int rc = SLURM_SUCCESS;
@@ -6563,7 +6995,7 @@ extern List acct_storage_p_remove_associations(
 		   >= ACCT_ADMIN_OPERATOR) 
 			is_admin = 1;	
 		else {
-			if(assoc_mgr_fill_in_user(mysql_conn, &user, 1)
+			if(assoc_mgr_fill_in_user(mysql_conn, &user, 1, NULL)
 			   != SLURM_SUCCESS) {
 				error("couldn't get information for this user");
 				errno = SLURM_ERROR;
@@ -6738,15 +7170,11 @@ end_it:
 	mysql_free_result(result);
 
 	return NULL;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid, 
 				      acct_qos_cond_t *qos_cond)
 {
-#ifdef HAVE_MYSQL
 	ListIterator itr = NULL;
 	List ret_list = NULL;
 	int rc = SLURM_SUCCESS;
@@ -6870,16 +7298,12 @@ extern List acct_storage_p_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 
 	return ret_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_remove_wckeys(mysql_conn_t *mysql_conn,
 					 uint32_t uid, 
 					 acct_wckey_cond_t *wckey_cond)
 {
-#ifdef HAVE_MYSQL
 	List ret_list = NULL;
 	int rc = SLURM_SUCCESS;
 	char *extra = NULL, *query = NULL,
@@ -6954,15 +7378,58 @@ empty:
 	}
 
 	return ret_list;
-#else
-	return NULL;
-#endif
+}
+
+extern int acct_storage_p_remove_reservation(mysql_conn_t *mysql_conn,
+					     acct_reservation_rec_t *resv)
+{
+	int rc = SLURM_SUCCESS;
+	char *query = NULL;//, *tmp_extra = NULL;
+
+	if(!resv) {
+		error("No reservation was given to edit");
+		return SLURM_ERROR;
+	}
+
+	if(!resv->id || !resv->time_start || !resv->cluster) {
+		error("We need an id, start time, and cluster "
+		      "name to edit a reservation.");
+		return SLURM_ERROR;
+	}
+
+
+	/* first delete the resv that hasn't happened yet. */
+	query = xstrdup_printf("delete from %s where start > %d "
+			       "and id=%u and start=%d "
+			       "and cluster='%s';", 
+			       resv_table, resv->time_start_prev,
+			       resv->id, 
+			       resv->time_start, resv->cluster);
+	/* then update the remaining ones with a deleted flag and end
+	 * time of the time_start_prev which is set to when the
+	 * command was issued */
+	xstrfmtcat(query,
+		   "update %s set end=%d, deleted=1 where deleted=0 and "
+		   "id=%u and start=%d and cluster='%s;'",
+		   resv_table, resv->time_start_prev,
+		   resv->id, resv->time_start,
+		   resv->cluster);
+	
+	debug3("%d(%d) query\n%s",
+	       mysql_conn->conn, __LINE__, query);
+	
+	if((rc = mysql_db_query(mysql_conn->db_conn, query)
+	    == SLURM_SUCCESS))
+		rc = mysql_clear_results(mysql_conn->db_conn);
+	
+	xfree(query);
+	
+	return rc;
 }
 
 extern List acct_storage_p_get_users(mysql_conn_t *mysql_conn, uid_t uid, 
 				     acct_user_cond_t *user_cond)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL;	
 	char *extra = NULL;	
 	char *tmp = NULL;	
@@ -7016,7 +7483,8 @@ extern List acct_storage_p_get_users(mysql_conn_t *mysql_conn, uid_t uid,
 			   >= ACCT_ADMIN_OPERATOR) 
 				is_admin = 1;	
 			else {
-				assoc_mgr_fill_in_user(mysql_conn, &user, 1);
+				assoc_mgr_fill_in_user(mysql_conn, &user, 1,
+						       NULL);
 			}
 		}
 	}
@@ -7227,15 +7695,11 @@ get_wckeys:
 	}
 
 	return user_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_get_accts(mysql_conn_t *mysql_conn, uid_t uid,
 				     acct_account_cond_t *acct_cond)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL;	
 	char *extra = NULL;	
 	char *tmp = NULL;	
@@ -7288,7 +7752,8 @@ extern List acct_storage_p_get_accts(mysql_conn_t *mysql_conn, uid_t uid,
 			   >= ACCT_ADMIN_OPERATOR) 
 				is_admin = 1;	
 			else {
-				assoc_mgr_fill_in_user(mysql_conn, &user, 1);
+				assoc_mgr_fill_in_user(mysql_conn, &user, 1,
+						       NULL);
 			}
 
 			if(!is_admin && (!user.coord_accts 
@@ -7471,15 +7936,11 @@ empty:
 	}
 
 	return acct_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_get_clusters(mysql_conn_t *mysql_conn, uid_t uid, 
 					acct_cluster_cond_t *cluster_cond)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL;	
 	char *extra = NULL;	
 	char *tmp = NULL;	
@@ -7499,12 +7960,14 @@ extern List acct_storage_p_get_clusters(mysql_conn_t *mysql_conn, uid_t uid,
 	/* if this changes you will need to edit the corresponding enum */
 	char *cluster_req_inx[] = {
 		"name",
+		"classification",
 		"control_host",
 		"control_port",
 		"rpc_version",
 	};
 	enum {
 		CLUSTER_REQ_NAME,
+		CLUSTER_REQ_CLASS,
 		CLUSTER_REQ_CH,
 		CLUSTER_REQ_CP,
 		CLUSTER_REQ_VERSION,
@@ -7575,6 +8038,9 @@ empty:
 	assoc_cond.cluster_list = list_create(NULL);
 
 	while((row = mysql_fetch_row(result))) {
+		MYSQL_RES *result2 = NULL;
+		MYSQL_ROW row2;
+
 		cluster = xmalloc(sizeof(acct_cluster_rec_t));
 		list_append(cluster_list, cluster);
 
@@ -7591,9 +8057,28 @@ empty:
 				cluster_cond->usage_end);
 		}
 
+		cluster->classification = atoi(row[CLUSTER_REQ_CLASS]);
 		cluster->control_host = xstrdup(row[CLUSTER_REQ_CH]);
 		cluster->control_port = atoi(row[CLUSTER_REQ_CP]);
 		cluster->rpc_version = atoi(row[CLUSTER_REQ_VERSION]);
+		query = xstrdup_printf(
+			"select cpu_count, cluster_nodes from "
+			"%s where cluster=\"%s\" "
+			"and period_end=0 and node_name='' limit 1",
+			event_table, cluster->name);
+		debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+		if(!(result2 = mysql_db_query_ret(
+			     mysql_conn->db_conn, query, 0))) {
+			xfree(query);
+			continue;
+		}
+		xfree(query);
+		if((row2 = mysql_fetch_row(result2))) {
+			cluster->cpu_count = atoi(row2[0]);
+			if(row2[1] && row2[1][0])
+				cluster->nodes = xstrdup(row2[1]);
+		}
+		mysql_free_result(result2);	
 	}
 	mysql_free_result(result);
 
@@ -7642,16 +8127,12 @@ empty:
 	list_destroy(assoc_list);
 
 	return cluster_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 					    uid_t uid, 
 					    acct_association_cond_t *assoc_cond)
 {
-#ifdef HAVE_MYSQL
 	//DEF_TIMERS;
 	char *query = NULL;	
 	char *extra = NULL;	
@@ -7779,7 +8260,8 @@ extern List acct_storage_p_get_associations(mysql_conn_t *mysql_conn,
 			   >= ACCT_ADMIN_OPERATOR) 
 				is_admin = 1;	
 			else {
-				assoc_mgr_fill_in_user(mysql_conn, &user, 1);
+				assoc_mgr_fill_in_user(mysql_conn, &user, 1,
+						       NULL);
 			}
 		}
 	}
@@ -7917,9 +8399,9 @@ empty:
 		if(row[ASSOC_REQ_PART][0])
 			assoc->partition = xstrdup(row[ASSOC_REQ_PART]);
 		if(row[ASSOC_REQ_FS])
-			assoc->fairshare = atoi(row[ASSOC_REQ_FS]);
+			assoc->shares_raw = atoi(row[ASSOC_REQ_FS]);
 		else
-			assoc->fairshare = 1;
+			assoc->shares_raw = 1;
 
 		if(!without_parent_info && parent_acct &&
 		   (!last_acct || !last_cluster 
@@ -8140,15 +8622,16 @@ empty:
 	xfree(parent_qos);
 	//END_TIMER2("get_associations");
 	return assoc_list;
-#else
+}
+
+extern List acct_storage_p_get_config(void *db_conn)
+{
 	return NULL;
-#endif
 }
 
 extern List acct_storage_p_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 				   acct_qos_cond_t *qos_cond)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL;	
 	char *extra = NULL;	
 	char *tmp = NULL;	
@@ -8181,6 +8664,7 @@ extern List acct_storage_p_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		"preemptees",
 		"preemptors",
 		"priority",
+		"usage_factor",
 	};
 	enum {
 		QOS_REQ_NAME,
@@ -8202,6 +8686,7 @@ extern List acct_storage_p_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		QOS_REQ_PREE,
 		QOS_REQ_PREO,
 		QOS_REQ_PRIO,
+		QOS_REQ_UF,
 		QOS_REQ_COUNT
 	};
 
@@ -8367,19 +8852,18 @@ empty:
 
 		if(row[QOS_REQ_PRIO])
 			qos->priority = atoi(row[QOS_REQ_PRIO]);
+
+		if(row[QOS_REQ_UF])
+			qos->usage_factor = atof(row[QOS_REQ_UF]);
 	}
 	mysql_free_result(result);
 
 	return qos_list;
-#else
-	return NULL;
-#endif
 }
 
 extern List acct_storage_p_get_wckeys(mysql_conn_t *mysql_conn, uid_t uid,
 				      acct_wckey_cond_t *wckey_cond)
 {
-#ifdef HAVE_MYSQL
 	//DEF_TIMERS;
 	char *query = NULL;	
 	char *extra = NULL;	
@@ -8441,7 +8925,8 @@ extern List acct_storage_p_get_wckeys(mysql_conn_t *mysql_conn, uid_t uid,
 			   >= ACCT_ADMIN_OPERATOR) 
 				is_admin = 1;	
 			else {
-				assoc_mgr_fill_in_user(mysql_conn, &user, 1);
+				assoc_mgr_fill_in_user(mysql_conn, &user,
+						       1, NULL);
 			}
 		}
 	}
@@ -8505,15 +8990,228 @@ empty:
 
 	//END_TIMER2("get_wckeys");
 	return wckey_list;
-#else
-	return NULL;
-#endif
+}
+
+extern List acct_storage_p_get_reservations(mysql_conn_t *mysql_conn, uid_t uid,
+					    acct_reservation_cond_t *resv_cond)
+{
+	//DEF_TIMERS;
+	char *query = NULL;	
+	char *extra = NULL;	
+	char *tmp = NULL;	
+	List resv_list = NULL;
+	int set = 0;
+	int i=0, is_admin=1;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	uint16_t private_data = 0;
+	acct_job_cond_t job_cond;
+	void *curr_cluster = NULL;
+	List local_cluster_list = NULL;
+
+	/* needed if we don't have an resv_cond */
+	uint16_t with_usage = 0;
+
+	/* if this changes you will need to edit the corresponding enum */
+	char *resv_req_inx[] = {
+		"id",
+		"name",
+		"cluster",
+		"cpus",
+		"assoclist",
+		"nodelist",
+		"node_inx",
+		"start",
+		"end",
+		"flags",
+	};
+
+	enum {
+		RESV_REQ_ID,
+		RESV_REQ_NAME,
+		RESV_REQ_CLUSTER,
+		RESV_REQ_CPUS,
+		RESV_REQ_ASSOCS,
+		RESV_REQ_NODES,
+		RESV_REQ_NODE_INX,
+		RESV_REQ_START,
+		RESV_REQ_END,
+		RESV_REQ_FLAGS,
+		RESV_REQ_COUNT
+	};
+
+	if(!resv_cond) {
+		xstrcat(extra, " where deleted=0");
+		goto empty;
+	}
+
+	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
+		return NULL;
+
+	private_data = slurm_get_private_data();
+	if (private_data & PRIVATE_DATA_RESERVATIONS) {
+		/* This only works when running though the slurmdbd.
+		 * THERE IS NO AUTHENTICATION WHEN RUNNNING OUT OF THE
+		 * SLURMDBD!
+		 */
+		if(slurmdbd_conf) {
+			is_admin = 0;
+			/* we have to check the authentication here in the
+			 * plugin since we don't know what accounts are being
+			 * referenced until after the query.  Here we will
+			 * set if they are an operator or greater and then
+			 * check it below after the query.
+			 */
+			if((uid == slurmdbd_conf->slurm_user_id || uid == 0)
+			   || assoc_mgr_get_admin_level(mysql_conn, uid) 
+			   >= ACCT_ADMIN_OPERATOR) 
+				is_admin = 1;	
+			else {
+				error("Only admins can look at "
+				      "reservation usage");
+				return NULL;
+			}
+		}
+	}
+
+	memset(&job_cond, 0, sizeof(acct_job_cond_t));
+	if(resv_cond->nodes) {
+		job_cond.usage_start = resv_cond->time_start;
+		job_cond.usage_end = resv_cond->time_end;
+		job_cond.used_nodes = resv_cond->nodes;
+		job_cond.cluster_list = resv_cond->cluster_list;
+		local_cluster_list = setup_cluster_list_with_inx(
+			mysql_conn, &job_cond, (void **)&curr_cluster);
+	} else if(with_usage) {
+		job_cond.usage_start = resv_cond->time_start;
+		job_cond.usage_end = resv_cond->time_end;
+	}
+
+	set = _setup_resv_cond_limits(resv_cond, &extra);
+
+	with_usage = resv_cond->with_usage;
+
+empty:
+	xfree(tmp);
+	xstrfmtcat(tmp, "t1.%s", resv_req_inx[i]);
+	for(i=1; i<RESV_REQ_COUNT; i++) {
+		xstrfmtcat(tmp, ", t1.%s", resv_req_inx[i]);
+	}
+	
+	//START_TIMER;
+	query = xstrdup_printf("select distinct %s from %s as t1%s "
+			       "order by cluster, name;", 
+			       tmp, resv_table, extra);
+	xfree(tmp);
+	xfree(extra);
+	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+	if(!(result = mysql_db_query_ret(mysql_conn->db_conn, query, 0))) {
+		xfree(query);
+		if(local_cluster_list)
+			list_destroy(local_cluster_list);
+		return NULL;
+	}
+	xfree(query);
+	
+	resv_list = list_create(destroy_acct_reservation_rec);
+	
+	while((row = mysql_fetch_row(result))) {
+		acct_reservation_rec_t *resv =
+			xmalloc(sizeof(acct_reservation_rec_t));
+		int start = atoi(row[RESV_REQ_START]);
+		list_append(resv_list, resv);
+
+		if(!good_nodes_from_inx(local_cluster_list, &curr_cluster,
+					row[RESV_REQ_NODE_INX], start))
+			continue;
+		
+		resv->id = atoi(row[RESV_REQ_ID]);
+		if(with_usage) {
+			if(!job_cond.resvid_list) 
+				job_cond.resvid_list = list_create(NULL);
+			list_append(job_cond.resvid_list, row[RESV_REQ_ID]);
+		}
+		resv->name = xstrdup(row[RESV_REQ_NAME]);
+		resv->cluster = xstrdup(row[RESV_REQ_CLUSTER]);
+		resv->cpus = atoi(row[RESV_REQ_CPUS]);
+		resv->assocs = xstrdup(row[RESV_REQ_ASSOCS]);
+		resv->nodes = xstrdup(row[RESV_REQ_NODES]);
+		resv->time_start = start;
+		resv->time_end = atoi(row[RESV_REQ_END]);
+		resv->flags = atoi(row[RESV_REQ_FLAGS]);
+	}
+
+	if(local_cluster_list)
+		list_destroy(local_cluster_list);
+
+	if(with_usage && resv_list && list_count(resv_list)) {
+		List job_list = mysql_jobacct_process_get_jobs(
+			mysql_conn, uid, &job_cond);	
+		ListIterator itr = NULL, itr2 = NULL;
+		jobacct_job_rec_t *job = NULL;
+		acct_reservation_rec_t *resv = NULL;
+
+		if(!job_list || !list_count(job_list))
+			goto no_jobs;
+		
+		itr = list_iterator_create(job_list);
+		itr2 = list_iterator_create(resv_list);
+		while((job = list_next(itr))) {
+			int start = job->start;
+			int end = job->end;
+			int set = 0;
+			while((resv = list_next(itr2))) {
+				int elapsed = 0;
+				/* since a reservation could have
+				   changed while a job was running we
+				   have to make sure we get the time
+				   in the correct record.
+				*/
+				if(resv->id != job->resvid) 
+					continue;
+				set = 1;
+
+				if(start < resv->time_start)
+					start = resv->time_start;
+				if(!end || end > resv->time_end) 
+					end = resv->time_end;
+				
+				if((elapsed = (end - start)) < 1)
+					continue;
+
+				if(job->alloc_cpus)
+					resv->alloc_secs +=
+						elapsed * job->alloc_cpus;
+			}
+			list_iterator_reset(itr2);
+			if(!set) {
+				error("we got a job %u with no reservation "
+				      "associatied with it?", job->jobid);
+			}
+		}		
+		
+		list_iterator_destroy(itr2);
+		list_iterator_destroy(itr);
+	no_jobs:
+		if(job_list)
+			list_destroy(job_list);
+	}	
+
+	if(job_cond.resvid_list) {
+		list_destroy(job_cond.resvid_list);
+		job_cond.resvid_list = NULL;
+	}
+
+	/* free result after we use the list with resv id's in it. */
+	mysql_free_result(result);
+
+	//END_TIMER2("get_resvs");
+	return resv_list;
 }
 
 extern List acct_storage_p_get_txn(mysql_conn_t *mysql_conn, uid_t uid,
 				   acct_txn_cond_t *txn_cond)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL;	
 	char *assoc_extra = NULL;	
 	char *name_extra = NULL;	
@@ -8893,16 +9591,12 @@ empty:
 	mysql_free_result(result);
 
 	return txn_list;
-#else
-	return NULL;
-#endif
 }
 
 extern int acct_storage_p_get_usage(mysql_conn_t *mysql_conn, uid_t uid,
 				    void *in, slurmdbd_msg_type_t type,
 				    time_t start, time_t end)
 {
-#ifdef HAVE_MYSQL
 	int rc = SLURM_SUCCESS;
 	int i=0, is_admin=1;
 	MYSQL_RES *result = NULL;
@@ -8994,7 +9688,8 @@ extern int acct_storage_p_get_usage(mysql_conn_t *mysql_conn, uid_t uid,
 			   >= ACCT_ADMIN_OPERATOR) 
 				is_admin = 1;	
 			else {
-				assoc_mgr_fill_in_user(mysql_conn, &user, 1);
+				assoc_mgr_fill_in_user(mysql_conn, &user, 1,
+						       NULL);
 			}
 			
 			if(!is_admin) {
@@ -9096,18 +9791,15 @@ is_user:
 	mysql_free_result(result);
 	
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn, 
-				     time_t sent_start)
+				     time_t sent_start, time_t sent_end,
+				     uint16_t archive_data)
 {
-#ifdef HAVE_MYSQL
 	int rc = SLURM_SUCCESS;
 	int i = 0;
-	time_t my_time = time(NULL);
+	time_t my_time = sent_end;
 	struct tm start_tm;
 	struct tm end_tm;
 	MYSQL_RES *result = NULL;
@@ -9198,6 +9890,9 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 		}
 	}
 	
+	if(!my_time)
+		my_time = time(NULL);
+
 	/* test month gap */
 /* 	last_hour = 1212299999; */
 /* 	last_day = 1212217200; */
@@ -9253,8 +9948,10 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 		   != SLURM_SUCCESS)
 			return rc;
 		END_TIMER2("hourly_rollup");
-		query = xstrdup_printf("update %s set hourly_rollup=%d",
-				       last_ran_table, end_time);
+		/* If we have a sent_end do not update the last_run_table */
+		if(!sent_end)
+			query = xstrdup_printf("update %s set hourly_rollup=%d",
+					       last_ran_table, end_time);
 	} else {
 		debug2("no need to run this hour %d <= %d", 
 		       end_time, start_time);
@@ -9279,13 +9976,14 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 
 	if(end_time-start_time > 0) {
 		START_TIMER;
-		if((rc = mysql_daily_rollup(mysql_conn, start_time, end_time)) 
+		if((rc = mysql_daily_rollup(mysql_conn, start_time, end_time,
+					    archive_data)) 
 		   != SLURM_SUCCESS)
 			return rc;
 		END_TIMER2("daily_rollup");
-		if(query) 
+		if(query && !sent_end) 
 			xstrfmtcat(query, ", daily_rollup=%d", end_time);
-		else 
+		else if(!sent_end)
 			query = xstrdup_printf("update %s set daily_rollup=%d",
 					       last_ran_table, end_time);
 	} else {
@@ -9320,13 +10018,14 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 	if(end_time-start_time > 0) {
 		START_TIMER;
 		if((rc = mysql_monthly_rollup(
-			    mysql_conn, start_time, end_time)) != SLURM_SUCCESS)
+			    mysql_conn, start_time, end_time, archive_data))
+		   != SLURM_SUCCESS)
 			return rc;
 		END_TIMER2("monthly_rollup");
 
-		if(query) 
+		if(query && !sent_end) 
 			xstrfmtcat(query, ", monthly_rollup=%d", end_time);
-		else 
+		else if(!sent_end)
 			query = xstrdup_printf(
 				"update %s set monthly_rollup=%d",
 				last_ran_table, end_time);
@@ -9341,9 +10040,6 @@ extern int acct_storage_p_roll_usage(mysql_conn_t *mysql_conn,
 		xfree(query);
 	}
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int clusteracct_storage_p_node_down(mysql_conn_t *mysql_conn, 
@@ -9351,7 +10047,6 @@ extern int clusteracct_storage_p_node_down(mysql_conn_t *mysql_conn,
 					   struct node_record *node_ptr,
 					   time_t event_time, char *reason)
 {
-#ifdef HAVE_MYSQL
 	uint16_t cpus;
 	int rc = SLURM_SUCCESS;
 	char *query = NULL;
@@ -9391,26 +10086,24 @@ extern int clusteracct_storage_p_node_down(mysql_conn_t *mysql_conn,
 	 */
 	xstrfmtcat(query,
 		   "insert into %s "
-		   "(node_name, cluster, cpu_count, period_start, reason) "
-		   "values (\"%s\", \"%s\", %u, %d, \"%s\") on duplicate key "
+		   "(node_name, state, cluster, cpu_count, "
+		   "period_start, reason) "
+		   "values (\"%s\", %u, \"%s\", %u, %d, \"%s\") "
+		   "on duplicate key "
 		   "update period_end=0;",
-		   event_table, node_ptr->name, cluster, 
+		   event_table, node_ptr->name,  node_ptr->node_state, cluster, 
 		   cpus, event_time, my_reason);
 	debug4("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 extern int clusteracct_storage_p_node_up(mysql_conn_t *mysql_conn, 
 					 char *cluster,
 					 struct node_record *node_ptr,
 					 time_t event_time)
 {
-#ifdef HAVE_MYSQL
 	char* query;
 	int rc = SLURM_SUCCESS;
 
@@ -9425,9 +10118,6 @@ extern int clusteracct_storage_p_node_up(mysql_conn_t *mysql_conn,
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 /* This is only called when not running from the slurmdbd so we can
@@ -9437,7 +10127,6 @@ extern int clusteracct_storage_p_register_ctld(mysql_conn_t *mysql_conn,
 					       char *cluster,
 					       uint16_t port)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL;
 	char *address = NULL;
 	char hostname[255];
@@ -9463,9 +10152,11 @@ extern int clusteracct_storage_p_register_ctld(mysql_conn_t *mysql_conn,
 
 	query = xstrdup_printf(
 		"update %s set deleted=0, mod_time=%d, "
-		"control_host='%s', control_port=%u, rpc_version=%d "
+		"control_host='%s', control_port=%u, rpc_version=%d, "
 		"where name='%s';",
-		cluster_table, now, address, port, SLURMDBD_VERSION, cluster);
+		cluster_table, now, address, port,
+		SLURMDBD_VERSION,
+		cluster);
 	xstrfmtcat(query, 	
 		   "insert into %s "
 		   "(timestamp, action, name, actor, info) "
@@ -9477,18 +10168,14 @@ extern int clusteracct_storage_p_register_ctld(mysql_conn_t *mysql_conn,
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	
 	return mysql_db_query(mysql_conn->db_conn, query);
-	
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn, 
 					       char *cluster,
+					       char *cluster_nodes,
 					       uint32_t procs,
 					       time_t event_time)
 {
-#ifdef HAVE_MYSQL
 	char* query;
 	int rc = SLURM_SUCCESS;
 	int first = 0;
@@ -9500,7 +10187,7 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 
 	/* Record the processor count */
 	query = xstrdup_printf(
-		"select cpu_count from %s where cluster=\"%s\" "
+		"select cpu_count, cluster_nodes from %s where cluster=\"%s\" "
 		"and period_end=0 and node_name='' limit 1",
 		event_table, cluster);
 	if(!(result = mysql_db_query_ret(
@@ -9535,9 +10222,30 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 	if(atoi(row[0]) == procs) {
 		debug3("we have the same procs as before no need to "
 		       "update the database.");
-		goto end_it;
-	}
-	debug("%s has changed from %s cpus to %u", cluster, row[0], procs);   
+		if(cluster_nodes) {
+			if(!row[1][0]) {
+				debug("Adding cluster nodes '%s' to "
+				      "last instance of cluster '%s'.",
+				      cluster_nodes, cluster);
+				query = xstrdup_printf(
+					"update %s set cluster_nodes=\"%s\" "
+					"where cluster=\"%s\" "
+					"and period_end=0 and node_name=''",
+					event_table, cluster_nodes, cluster);
+				rc = mysql_db_query(mysql_conn->db_conn, query);
+				xfree(query);
+				goto end_it;
+			} else if(!strcmp(cluster_nodes, row[1])) {
+				debug3("we have the same nodes in the cluster "
+				       "as before no need to "
+				       "update the database.");
+				goto end_it;
+			}
+		} else 
+			goto end_it;
+	} else
+		debug("%s has changed from %s cpus to %u",
+		      cluster, row[0], procs);   
 
 	/* reset all the entries for this cluster since the procs
 	   changed some of the downed nodes may have gone away.
@@ -9553,9 +10261,10 @@ extern int clusteracct_storage_p_cluster_procs(mysql_conn_t *mysql_conn,
 		goto end_it;
 add_it:
 	query = xstrdup_printf(
-		"insert into %s (cluster, cpu_count, period_start, reason) "
-		"values (\"%s\", %u, %d, 'Cluster processor count')",
-		event_table, cluster, procs, event_time);
+		"insert into %s (cluster, cluster_nodes, cpu_count, "
+		"period_start, reason) "
+		"values (\"%s\", \"%s\", %u, %d, 'Cluster processor count')",
+		event_table, cluster, cluster_nodes, procs, event_time);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
 end_it:
@@ -9564,9 +10273,6 @@ end_it:
 		rc = ACCOUNTING_FIRST_REG;
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int clusteracct_storage_p_get_usage(
@@ -9574,7 +10280,6 @@ extern int clusteracct_storage_p_get_usage(
 	acct_cluster_rec_t *cluster_rec, slurmdbd_msg_type_t type,
 	time_t start, time_t end)
 {
-#ifdef HAVE_MYSQL
 	int rc = SLURM_SUCCESS;
 	int i=0;
 	MYSQL_RES *result = NULL;
@@ -9585,6 +10290,7 @@ extern int clusteracct_storage_p_get_usage(
 	char *cluster_req_inx[] = {
 		"alloc_cpu_secs",
 		"down_cpu_secs",
+		"pdown_cpu_secs",
 		"idle_cpu_secs",
 		"resv_cpu_secs",
 		"over_cpu_secs",
@@ -9595,6 +10301,7 @@ extern int clusteracct_storage_p_get_usage(
 	enum {
 		CLUSTER_ACPU,
 		CLUSTER_DCPU,
+		CLUSTER_PDCPU,
 		CLUSTER_ICPU,
 		CLUSTER_RCPU,
 		CLUSTER_OCPU,
@@ -9643,6 +10350,7 @@ extern int clusteracct_storage_p_get_usage(
 			xmalloc(sizeof(cluster_accounting_rec_t));
 		accounting_rec->alloc_secs = atoll(row[CLUSTER_ACPU]);
 		accounting_rec->down_secs = atoll(row[CLUSTER_DCPU]);
+		accounting_rec->pdown_secs = atoll(row[CLUSTER_PDCPU]);
 		accounting_rec->idle_secs = atoll(row[CLUSTER_ICPU]);
 		accounting_rec->over_secs = atoll(row[CLUSTER_OCPU]);
 		accounting_rec->resv_secs = atoll(row[CLUSTER_RCPU]);
@@ -9653,9 +10361,6 @@ extern int clusteracct_storage_p_get_usage(
 	mysql_free_result(result);
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 /* 
@@ -9665,18 +10370,16 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 				       char *cluster_name,
 				       struct job_record *job_ptr)
 {
-#ifdef HAVE_MYSQL
 	int	rc=SLURM_SUCCESS;
-	char	*jname = NULL, *nodes = NULL;
-	long	priority;
+	char	*nodes = NULL, *jname = NULL, *node_inx = NULL;
 	int track_steps = 0;
 	char *block_id = NULL;
 	char *query = NULL;
-	char *wckey = NULL;
 	int reinit = 0;
 	time_t check_time = job_ptr->start_time;
 	uint32_t wckeyid = 0;
 	int no_cluster = 0;
+	int node_cnt = 0;
 
 	if (!job_ptr->details || !job_ptr->details->submit_time) {
 		error("jobacct_storage_p_job_start: "
@@ -9688,11 +10391,69 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 	
 	debug2("mysql_jobacct_job_start() called");
-	if(!check_time)
-		check_time = job_ptr->details->submit_time;
- 
+
+	/* See what we are hearing about here if no start time. If
+	 * this job latest time is before the last roll up we will
+	 * need to reset it to look at this job. */
+	if(!check_time) {
+		check_time = job_ptr->details->begin_time;
+ 		
+		if(!check_time)
+			check_time = job_ptr->details->submit_time;
+	}
+
 	slurm_mutex_lock(&rollup_lock);
 	if(check_time < global_last_rollup) {
+		MYSQL_RES *result = NULL;
+		MYSQL_ROW row;
+
+		/* check to see if we are hearing about this time for the
+		 * first time.
+		 */
+		query = xstrdup_printf("select id from %s where jobid=%u and "
+				       "submit=%d and eligible=%d "
+				       "and start=%d;",
+				       job_table, job_ptr->job_id,
+				       job_ptr->details->submit_time, 
+				       job_ptr->details->begin_time, 
+				       job_ptr->start_time);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+		if(!(result =
+		     mysql_db_query_ret(mysql_conn->db_conn, query, 0))) {
+			xfree(query);
+			slurm_mutex_unlock(&rollup_lock);
+			return SLURM_ERROR;
+		}
+		xfree(query);
+		if((row = mysql_fetch_row(result))) {
+			mysql_free_result(result);
+			debug4("revieved an update for a "
+			       "job (%u) already known about",
+			       job_ptr->job_id);
+			slurm_mutex_unlock(&rollup_lock);
+			goto no_rollup_change;
+		}
+		mysql_free_result(result);
+
+		if(job_ptr->start_time)
+			debug("Need to reroll usage from %sJob %u "
+			      "from %s started then and we are just "
+			      "now hearing about it.",
+			      ctime(&check_time),
+			      job_ptr->job_id, cluster_name);
+		else if(job_ptr->details->begin_time) 
+			debug("Need to reroll usage from %sJob %u "
+			      "from %s became eligible then and we are just "
+			      "now hearing about it.",
+			      ctime(&check_time), 
+			      job_ptr->job_id, cluster_name);
+		else
+			debug("Need to reroll usage from %sJob %u "
+			      "from %s was submitted then and we are just "
+			      "now hearing about it.",
+			      ctime(&check_time),
+			      job_ptr->job_id, cluster_name);
+			
 		global_last_rollup = check_time;
 		slurm_mutex_unlock(&rollup_lock);
 		
@@ -9705,7 +10466,9 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 		xfree(query);
 	} else
 		slurm_mutex_unlock(&rollup_lock);
-	
+
+no_rollup_change:
+
 	if(!cluster_name && job_ptr->assoc_id) {
 		no_cluster = 1;
 		cluster_name = _get_cluster_from_associd(mysql_conn,
@@ -9713,41 +10476,14 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 	}
 
 
-	priority = (job_ptr->priority == NO_VAL) ?
-		-1L : (long) job_ptr->priority;
-
-	if (job_ptr->name && job_ptr->name[0]) {
-		char *temp = NULL;
-		/* first set the jname to the job_ptr->name */
-		jname = xstrdup(job_ptr->name);
-		/* then grep for " since that is the delimiter for
-		   the wckey */
-		if((temp = strchr(jname, '\"'))) {
-			if(strrchr(jname, '\"') != temp) {
-				error("job %u has quotes in it's name '%s', "
-				      "no way to get correct wckey", 
-				      job_ptr->job_id, jname);
-				xfree(jname);
-				jname = _fix_double_quotes(job_ptr->name);
-			} else {
-				/* if we have a wckey set the " to NULL to
-				 * end the jname */
-				temp[0] = '\0';
-				/* increment and copy the remainder */
-				temp++;
-				wckey = xstrdup(temp);
-			}
-		}
-	}
-
-	if(!jname || !jname[0]) {
-		/* free jname if something is allocated here */
-		xfree(jname);
-		jname = xstrdup("allocation");
+	if (job_ptr->name && job_ptr->name[0]) 
+		jname = job_ptr->name;
+	else {
+		jname = "allocation";
 		track_steps = 1;
 	}
 	
-	if (job_ptr->nodes && job_ptr->nodes[0])
+	if (job_ptr->nodes && job_ptr->nodes[0]) 
 		nodes = job_ptr->nodes;
 	else
 		nodes = "None assigned";
@@ -9757,18 +10493,32 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 
 	if(slurmdbd_conf) {
 		block_id = xstrdup(job_ptr->comment);
+		node_cnt = job_ptr->node_cnt;
+		node_inx = job_ptr->network;
 	} else {
+		char temp_bit[BUF_SIZE];
+
+		if(job_ptr->node_bitmap) 
+			node_inx = bit_fmt(temp_bit, sizeof(temp_bit), 
+					   job_ptr->node_bitmap);
+#ifdef HAVE_BG
 		select_g_get_jobinfo(job_ptr->select_jobinfo, 
 				     SELECT_DATA_BLOCK_ID, 
 				     &block_id);
+		select_g_get_jobinfo(job_ptr->select_jobinfo, 
+				     SELECT_DATA_NODE_CNT, 
+				     &node_cnt);
+#else
+		node_cnt = job_ptr->node_cnt;
+#endif 		
 	}
 
 	job_ptr->requid = -1; /* force to -1 for sacct to know this
 			       * hasn't been set yet */
 	
 	/* if there is a start_time get the wckeyid */
-	if(job_ptr->start_time) 
-		wckeyid = _get_wckeyid(mysql_conn, &wckey,
+	if(job_ptr->start_time && job_ptr->assoc_id) 
+		wckeyid = _get_wckeyid(mysql_conn, &job_ptr->wckey,
 				       job_ptr->user_id, cluster_name,
 				       job_ptr->assoc_id);
 	
@@ -9785,7 +10535,8 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 				job_ptr->details->submit_time;
 		query = xstrdup_printf(
 			"insert into %s "
-			"(jobid, associd, wckeyid, uid, gid, nodelist, ",
+			"(jobid, associd, wckeyid, uid, "
+			"gid, nodelist, resvid, timelimit, ",
 			job_table);
 
 		if(cluster_name) 
@@ -9796,15 +10547,19 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 			xstrcat(query, "partition, ");
 		if(block_id) 
 			xstrcat(query, "blockid, ");
-		if(wckey) 
+		if(job_ptr->wckey) 
 			xstrcat(query, "wckey, ");
+		if(node_inx) 
+			xstrcat(query, "node_inx, ");
 		
 		xstrfmtcat(query, 
 			   "eligible, submit, start, name, track_steps, "
-			   "state, priority, req_cpus, alloc_cpus) "
-			   "values (%u, %u, %u, %u, %u, \"%s\", ",
+			   "state, priority, req_cpus, "
+			   "alloc_cpus, alloc_nodes) "
+			   "values (%u, %u, %u, %u, %u, \"%s\", %u, %u, ",
 			   job_ptr->job_id, job_ptr->assoc_id, wckeyid,
-			   job_ptr->user_id, job_ptr->group_id, nodes);
+			   job_ptr->user_id, job_ptr->group_id, nodes, 
+			   job_ptr->resv_id, job_ptr->time_limit);
 		
 		if(cluster_name) 
 			xstrfmtcat(query, "\"%s\", ", cluster_name);
@@ -9814,23 +10569,26 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 			xstrfmtcat(query, "\"%s\", ", job_ptr->partition);
 		if(block_id) 
 			xstrfmtcat(query, "\"%s\", ", block_id);
-		if(wckey) 
-			xstrfmtcat(query, "\"%s\", ", wckey);
+		if(job_ptr->wckey) 
+			xstrfmtcat(query, "\"%s\", ", job_ptr->wckey);
+		if(node_inx) 
+			xstrfmtcat(query, "\"%s\", ", node_inx);
 
 		xstrfmtcat(query, 
-			   "%d, %d, %d, \"%s\", %u, %u, %u, %u, %u) "
+			   "%d, %d, %d, \"%s\", %u, %u, %u, %u, %u, %u) "
 			   "on duplicate key update "
 			   "id=LAST_INSERT_ID(id), state=%u, "
-			   "associd=%u, wckeyid=%u",
+			   "associd=%u, wckeyid=%u, resvid=%u, timelimit=%u",
 			   (int)job_ptr->details->begin_time,
 			   (int)job_ptr->details->submit_time,
 			   (int)job_ptr->start_time,
 			   jname, track_steps,
 			   job_ptr->job_state & (~JOB_COMPLETING),
-			   priority, job_ptr->num_procs,
-			   job_ptr->total_procs, 
+			   job_ptr->priority, job_ptr->num_procs,
+			   job_ptr->total_procs, node_cnt,
 			   job_ptr->job_state & (~JOB_COMPLETING),
-			   job_ptr->assoc_id, wckeyid);
+			   job_ptr->assoc_id, wckeyid, job_ptr->resv_id,
+			   job_ptr->time_limit);
 
 		if(job_ptr->account) 
 			xstrfmtcat(query, ", account=\"%s\"", job_ptr->account);
@@ -9839,8 +10597,10 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 				   job_ptr->partition);
 		if(block_id)
 			xstrfmtcat(query, ", blockid=\"%s\"", block_id);
-		if(wckey) 
-			xstrfmtcat(query, ", wckey=\"%s\"", wckey);
+		if(job_ptr->wckey) 
+			xstrfmtcat(query, ", wckey=\"%s\"", job_ptr->wckey);
+		if(node_inx) 
+			xstrfmtcat(query, ", node_inx=\"%s\"", node_inx);
 		
 		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	try_again:
@@ -9870,31 +10630,30 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 				   job_ptr->partition);
 		if(block_id)
 			xstrfmtcat(query, "blockid=\"%s\", ", block_id);
-
-		if(wckey) 
-			xstrfmtcat(query, "wckey=\"%s\", ", wckey);
+		if(job_ptr->wckey) 
+			xstrfmtcat(query, "wckey=\"%s\", ", job_ptr->wckey);
+		if(node_inx) 
+			xstrfmtcat(query, "node_inx=\"%s\", ", node_inx);
 
 		xstrfmtcat(query, "start=%d, name=\"%s\", state=%u, "
-			   "alloc_cpus=%u, associd=%u, wckeyid=%u where id=%d",
+			   "alloc_cpus=%u, alloc_nodes=%u, "
+			   "associd=%u, wckeyid=%u, resvid=%u, timelimit=%u "
+			   "where id=%d",
 			   (int)job_ptr->start_time,
 			   jname, job_ptr->job_state & (~JOB_COMPLETING),
-			   job_ptr->total_procs, job_ptr->assoc_id, wckeyid,
+			   job_ptr->total_procs, node_cnt, 
+			   job_ptr->assoc_id, wckeyid,
+			   job_ptr->resv_id, job_ptr->time_limit, 
 			   job_ptr->db_index);
 		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 		rc = mysql_db_query(mysql_conn->db_conn, query);
 	}
 
 	xfree(block_id);
-	xfree(jname);
-	xfree(wckey);
-
 	xfree(query);
 	if(no_cluster)
 		xfree(cluster_name);
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 /* 
@@ -9903,7 +10662,6 @@ extern int jobacct_storage_p_job_start(mysql_conn_t *mysql_conn,
 extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn, 
 					  struct job_record *job_ptr)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL, *nodes = NULL;
 	int rc=SLURM_SUCCESS;
 	time_t start_time = job_ptr->start_time;
@@ -9979,9 +10737,6 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 	xfree(query);
 	
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 /* 
@@ -9990,15 +10745,14 @@ extern int jobacct_storage_p_job_complete(mysql_conn_t *mysql_conn,
 extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn, 
 					struct step_record *step_ptr)
 {
-#ifdef HAVE_MYSQL
-	int cpus = 0;
+	int cpus = 0, tasks = 0, nodes = 0, task_dist = 0;
 	int rc=SLURM_SUCCESS;
 	char node_list[BUFFER_SIZE];
+	char *node_inx = NULL;
 #ifdef HAVE_BG
 	char *ionodes = NULL;
 #endif
 	char *query = NULL;
-	char *sname = NULL;
 
 	if (!step_ptr->job_ptr->db_index 
 	    && (!step_ptr->job_ptr->details
@@ -10011,12 +10765,21 @@ extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn,
 	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
 	if(slurmdbd_conf) {
-		cpus = step_ptr->job_ptr->total_procs;
+		tasks = step_ptr->job_ptr->details->num_tasks;
+		cpus = step_ptr->cpu_count;
 		snprintf(node_list, BUFFER_SIZE, "%s",
 			 step_ptr->job_ptr->nodes);
+		nodes = step_ptr->step_layout->node_cnt;
+		task_dist = step_ptr->step_layout->task_dist;
+		node_inx = step_ptr->network;
 	} else {
+		char temp_bit[BUF_SIZE];
+
+		if(step_ptr->step_node_bitmap) 
+			node_inx = bit_fmt(temp_bit, sizeof(temp_bit), 
+					   step_ptr->step_node_bitmap);
 #ifdef HAVE_BG
-		cpus = step_ptr->job_ptr->num_procs;
+		tasks = cpus = step_ptr->job_ptr->num_procs;
 		select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, 
 				     SELECT_DATA_IONODES, 
 				     &ionodes);
@@ -10027,20 +10790,26 @@ extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn,
 		} else
 			snprintf(node_list, BUFFER_SIZE, "%s",
 				 step_ptr->job_ptr->nodes);
-		
+		select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, 
+				     SELECT_DATA_NODE_CNT, 
+				     &nodes);
 #else
 		if(!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) {
-			cpus = step_ptr->job_ptr->total_procs;
+			tasks = cpus = step_ptr->job_ptr->total_procs;
 			snprintf(node_list, BUFFER_SIZE, "%s",
 				 step_ptr->job_ptr->nodes);
+			nodes = step_ptr->job_ptr->node_cnt;
 		} else {
-			cpus = step_ptr->step_layout->task_cnt;
+			cpus = step_ptr->cpu_count; 
+			tasks = step_ptr->step_layout->task_cnt;
+			nodes = step_ptr->step_layout->node_cnt;
+			task_dist = step_ptr->step_layout->task_dist;
 			snprintf(node_list, BUFFER_SIZE, "%s", 
 				 step_ptr->step_layout->node_list);
 		}
 #endif
 	}
-	
+
 	step_ptr->job_ptr->requid = -1; /* force to -1 for sacct to know this
 					 * hasn't been set yet  */
 
@@ -10063,38 +10832,25 @@ extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn,
 		}
 	}
 
-	if (step_ptr->name && step_ptr->name[0]) {
-		char *temp = NULL;
-		/* first set the jname to the job_ptr->name */
-		sname = xstrdup(step_ptr->name);
-		/* then grep for " since that is the delimiter for
-		   the wckey */
-		if((temp = strchr(sname, '\"'))) {
-			/* if we have a wckey set the " to NULL to
-			 * end the jname */
-			temp[0] = '\0';
-		}
-	}
-
 	/* we want to print a -1 for the requid so leave it a
 	   %d */
 	query = xstrdup_printf(
 		"insert into %s (id, stepid, start, name, state, "
-		"cpus, nodelist) "
-		"values (%d, %d, %d, \"%s\", %d, %d, \"%s\") "
-		"on duplicate key update cpus=%d, end=0, state=%d",
+		"cpus, nodes, node_inx, tasks, nodelist, task_dist) "
+		"values (%d, %d, %d, \"%s\", %d, %d, %d, %d, "
+		"\"%s\", \"%s\", %d) "
+		"on duplicate key update cpus=%d, nodes=%d, "
+		"tasks=%d, end=0, state=%d, node_inx=\"%s\", task_dist=%d",
 		step_table, step_ptr->job_ptr->db_index,
 		step_ptr->step_id, 
-		(int)step_ptr->start_time, sname,
-		JOB_RUNNING, cpus, node_list, cpus, JOB_RUNNING);
+		(int)step_ptr->start_time, step_ptr->name,
+		JOB_RUNNING, cpus, nodes, tasks, node_list, node_inx, task_dist,
+		cpus, nodes, tasks, JOB_RUNNING, node_inx, task_dist);
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
 	rc = mysql_db_query(mysql_conn->db_conn, query);
 	xfree(query);
-	xfree(sname);
+
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 /* 
@@ -10103,11 +10859,10 @@ extern int jobacct_storage_p_step_start(mysql_conn_t *mysql_conn,
 extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn, 
 					   struct step_record *step_ptr)
 {
-#ifdef HAVE_MYSQL
 	time_t now;
 	int elapsed;
 	int comp_status;
-	int cpus = 0;
+	int cpus = 0, tasks = 0;
 	struct jobacctinfo *jobacct = (struct jobacctinfo *)step_ptr->jobacct;
 	struct jobacctinfo dummy_jobacct;
 	float ave_vsize = 0, ave_rss = 0, ave_pages = 0;
@@ -10126,7 +10881,7 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 
 	if (jobacct == NULL) {
 		/* JobAcctGather=jobacct_gather/none, no data to process */
-		bzero(&dummy_jobacct, sizeof(dummy_jobacct));
+		memset(&dummy_jobacct, 0, sizeof(dummy_jobacct));
 		jobacct = &dummy_jobacct;
 	}
 
@@ -10135,18 +10890,20 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 
 	if(slurmdbd_conf) {
 		now = step_ptr->job_ptr->end_time;
-		cpus = step_ptr->job_ptr->total_procs;
-
+		tasks = step_ptr->job_ptr->details->num_tasks;
+		cpus = step_ptr->cpu_count;
 	} else {
 		now = time(NULL);
 #ifdef HAVE_BG
-		cpus = step_ptr->job_ptr->num_procs;
+		tasks = cpus = step_ptr->job_ptr->num_procs;
 		
 #else
 		if(!step_ptr->step_layout || !step_ptr->step_layout->task_cnt)
-			cpus = step_ptr->job_ptr->total_procs;
-		else 
-			cpus = step_ptr->step_layout->task_cnt;
+			tasks = cpus = step_ptr->job_ptr->total_procs;
+		else {
+			cpus = step_ptr->cpu_count; 
+			tasks = step_ptr->step_layout->task_cnt;
+		}
 #endif
 	}
 	
@@ -10248,9 +11005,6 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 	xfree(query);
 	 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 /* 
@@ -10259,7 +11013,6 @@ extern int jobacct_storage_p_step_complete(mysql_conn_t *mysql_conn,
 extern int jobacct_storage_p_suspend(mysql_conn_t *mysql_conn, 
 				     struct job_record *job_ptr)
 {
-#ifdef HAVE_MYSQL
 	char *query = NULL;
 	int rc = SLURM_SUCCESS;
 	bool suspended = false;
@@ -10320,9 +11073,6 @@ extern int jobacct_storage_p_suspend(mysql_conn_t *mysql_conn,
 	}
 	
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 /* 
@@ -10335,11 +11085,11 @@ extern List jobacct_storage_p_get_jobs_cond(mysql_conn_t *mysql_conn,
 					    acct_job_cond_t *job_cond)
 {
 	List job_list = NULL;
-#ifdef HAVE_MYSQL
+
 	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 	job_list = mysql_jobacct_process_get_jobs(mysql_conn, uid, job_cond);	
-#endif
+
 	return job_list;
 }
 
@@ -10349,13 +11099,10 @@ extern List jobacct_storage_p_get_jobs_cond(mysql_conn_t *mysql_conn,
 extern int jobacct_storage_p_archive(mysql_conn_t *mysql_conn, 
 				     acct_archive_cond_t *arch_cond)
 {
-#ifdef HAVE_MYSQL
 	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
 	
 	return mysql_jobacct_process_archive(mysql_conn, arch_cond);
-#endif
-	return SLURM_ERROR;
 }
 
 /* 
@@ -10364,20 +11111,16 @@ extern int jobacct_storage_p_archive(mysql_conn_t *mysql_conn,
 extern int jobacct_storage_p_archive_load(mysql_conn_t *mysql_conn, 
 					  acct_archive_rec_t *arch_rec)
 {
-#ifdef HAVE_MYSQL
 	if(_check_connection(mysql_conn) != SLURM_SUCCESS)
 		return SLURM_ERROR;
 
 	return mysql_jobacct_process_archive_load(mysql_conn, arch_rec);
-#endif
-	return SLURM_ERROR;
 }
 
 extern int acct_storage_p_update_shares_used(mysql_conn_t *mysql_conn, 
 					     List shares_used)
 {
-	/* This definitely needs to be fleshed out.
-	 * Go through the list of shares_used_object_t objects and store them */
+	/* No plans to have the database hold the used shares */
 	return SLURM_SUCCESS;
 }
 
@@ -10385,7 +11128,6 @@ extern int acct_storage_p_flush_jobs_on_cluster(
 	mysql_conn_t *mysql_conn, char *cluster, time_t event_time)
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_MYSQL
 	/* put end times for a clean start */
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
@@ -10462,7 +11204,6 @@ extern int acct_storage_p_flush_jobs_on_cluster(
 		rc = mysql_db_query(mysql_conn->db_conn, query);
 		xfree(query);
 	}
-#endif
 
 	return rc;
 }
diff --git a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
index 34e4b04a755ed4552e211b3c05bc3c78ef16905e..938c163ee2220490d4661c3fc7b8c1deddcf3132 100644
--- a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
+++ b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.c
@@ -4,12 +4,14 @@
  *                               storage.
  *****************************************************************************
  *
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -46,9 +48,26 @@
 #include "mysql_jobacct_process.h"
 #include <fcntl.h>
 
-#ifdef HAVE_MYSQL
 static pthread_mutex_t local_file_lock = PTHREAD_MUTEX_INITIALIZER;
 
+typedef struct {
+	hostlist_t hl;
+	time_t start;
+	time_t end;
+	bitstr_t *asked_bitmap;
+} local_cluster_t;
+
+static void _destroy_local_cluster(void *object)
+{
+	local_cluster_t *local_cluster = (local_cluster_t *)object;
+	if(local_cluster) {
+		if(local_cluster->hl)
+			hostlist_destroy(local_cluster->hl);
+		FREE_NULL_BITMAP(local_cluster->asked_bitmap);
+		xfree(local_cluster);
+	}
+}
+
 static int _write_to_file(int fd, char *data)
 {
 	int pos = 0, nwrite = strlen(data), amount;
@@ -67,6 +86,128 @@ static int _write_to_file(int fd, char *data)
 	return rc;
 }
 
+static int _write_archive_file(MYSQL_RES *result, int start_col, int col_count,
+			       time_t curr_end, char *arch_dir,
+			       char *arch_type, char *insert,
+			       bool with_deleted)
+{
+	time_t period_start = 0;
+	int fd = 0;
+	int rc = SLURM_SUCCESS;
+	MYSQL_ROW row;
+	struct tm time_tm;
+	char *old_file = NULL, *new_file = NULL, *reg_file = NULL;
+	char *values = NULL;
+	char start_char[32];
+	char end_char[32];
+	int i=0;
+
+	xassert(result);
+
+	//START_TIMER;
+	slurm_mutex_lock(&local_file_lock);
+	while((row = mysql_fetch_row(result))) {
+		if(period_start) {
+			xstrcat(values, ",\n(");
+		} else {
+			period_start = atoi(row[start_col]);
+			localtime_r((time_t *)&period_start, &time_tm);
+			time_tm.tm_sec = 0;
+			time_tm.tm_min = 0;
+			time_tm.tm_hour = 0;
+			time_tm.tm_mday = 1;
+			snprintf(start_char, sizeof(start_char),
+				 "%4.4u-%2.2u-%2.2u"
+				 "T%2.2u:%2.2u:%2.2u",
+				 (time_tm.tm_year + 1900),
+				 (time_tm.tm_mon+1), 
+				 time_tm.tm_mday,
+				 time_tm.tm_hour,
+				 time_tm.tm_min, 
+				 time_tm.tm_sec);
+
+			localtime_r((time_t *)&curr_end, &time_tm);
+			snprintf(end_char, sizeof(end_char),
+				 "%4.4u-%2.2u-%2.2u"
+				 "T%2.2u:%2.2u:%2.2u",
+				 (time_tm.tm_year + 1900),
+				 (time_tm.tm_mon+1), 
+				 time_tm.tm_mday,
+				 time_tm.tm_hour,
+				 time_tm.tm_min, 
+				 time_tm.tm_sec);
+
+			/* write the buffer to file */
+			reg_file = xstrdup_printf(
+				"%s/%s_archive_%s_%s.sql",
+				arch_dir, arch_type,
+				start_char, end_char);
+			debug("Storing event archive at %s", reg_file);
+			old_file = xstrdup_printf("%s.old", reg_file);
+			new_file = xstrdup_printf("%s.new", reg_file);
+					
+			fd = creat(new_file, 0600);
+			if (fd == 0) {
+				error("Can't save archive, "
+				      "create file %s error %m",
+				      new_file);
+				rc = errno;
+				xfree(insert);
+				break;
+			} 
+			values = xstrdup_printf("%s\nvalues\n(", insert);
+		}
+	
+		xstrfmtcat(values, "'%s'", row[0]);
+		for(i=1; i<col_count; i++) {
+			xstrfmtcat(values, ", '%s'", row[i]);
+		}
+
+		if(with_deleted)
+			xstrcat(values, ", '1')");
+		else
+			xstrcat(values, ")");
+				
+		if(!fd 
+		   || ((rc = _write_to_file(fd, values)) != SLURM_SUCCESS)) {
+			xfree(values);
+			break;
+		}
+		xfree(values);
+	}
+
+	if(with_deleted)
+	      	rc = _write_to_file(fd,
+			    " on duplicate key update "
+			    "deleted=1;");
+	else
+	      	rc = _write_to_file(fd,
+			    " on duplicate key update "
+			    "period_end=VALUES(period_end);");
+//			END_TIMER2("write file");
+//			info("write file took %s", TIME_STR);
+
+	fsync(fd);
+	close(fd);
+			
+	if (rc)
+		(void) unlink(new_file);
+	else {			/* file shuffle */
+		int ign;	/* avoid warning */
+		(void) unlink(old_file);
+		ign =  link(reg_file, old_file);
+		(void) unlink(reg_file);
+		ign =  link(new_file, reg_file);
+		(void) unlink(new_file);
+	}
+	xfree(old_file);
+	xfree(reg_file);
+	xfree(new_file);
+	slurm_mutex_unlock(&local_file_lock);
+
+	return rc;
+}
+
 static int _archive_script(acct_archive_cond_t *arch_cond, time_t last_submit)
 {
 	char * args[] = {arch_cond->archive_script, NULL};
@@ -105,30 +246,31 @@ static int _archive_script(acct_archive_cond_t *arch_cond, time_t last_submit)
 
 	env = env_array_create();
 
-	if(arch_cond->step_purge) {
+	if(arch_cond->purge_event) {
 		/* use localtime to avoid any daylight savings issues */
 		if(!localtime_r(&last_submit, &time_tm)) {
-			error("Couldn't get localtime from first step start %d",
+			error("Couldn't get localtime from "
+			      "first event start %d",
 			      last_submit);
 			return SLURM_ERROR;
 		}
-		time_tm.tm_mon -= arch_cond->step_purge;
+		time_tm.tm_mon -= arch_cond->purge_step;
 		time_tm.tm_isdst = -1;
 		curr_end = mktime(&time_tm);
-		env_array_append_fmt(&env, "SLURM_ARCHIVE_STEPS", "%u",
-				     arch_cond->archive_steps);
-		env_array_append_fmt(&env, "SLURM_ARCHIVE_LAST_STEP", "%d",
+		env_array_append_fmt(&env, "SLURM_ARCHIVE_EVENTS", "%u",
+				     arch_cond->archive_events);
+		env_array_append_fmt(&env, "SLURM_ARCHIVE_LAST_EVENT", "%d",
 				     curr_end);
 	}
 
-	if(arch_cond->job_purge) {
+	if(arch_cond->purge_job) {
 		/* use localtime to avoid any daylight savings issues */
 		if(!localtime_r(&last_submit, &time_tm)) {
 			error("Couldn't get localtime from first start %d",
 			      last_submit);
 			return SLURM_ERROR;
 		}
-		time_tm.tm_mon -= arch_cond->job_purge;
+		time_tm.tm_mon -= arch_cond->purge_job;
 		time_tm.tm_isdst = -1;
 		curr_end = mktime(&time_tm);
 		
@@ -138,6 +280,39 @@ static int _archive_script(acct_archive_cond_t *arch_cond, time_t last_submit)
 				      curr_end);
 	}
 
+	if(arch_cond->purge_step) {
+		/* use localtime to avoid any daylight savings issues */
+		if(!localtime_r(&last_submit, &time_tm)) {
+			error("Couldn't get localtime from first step start %d",
+			      last_submit);
+			return SLURM_ERROR;
+		}
+		time_tm.tm_mon -= arch_cond->purge_step;
+		time_tm.tm_isdst = -1;
+		curr_end = mktime(&time_tm);
+		env_array_append_fmt(&env, "SLURM_ARCHIVE_STEPS", "%u",
+				     arch_cond->archive_steps);
+		env_array_append_fmt(&env, "SLURM_ARCHIVE_LAST_STEP", "%d",
+				     curr_end);
+	}
+
+	if(arch_cond->purge_suspend) {
+		/* use localtime to avoid any daylight savings issues */
+		if(!localtime_r(&last_submit, &time_tm)) {
+			error("Couldn't get localtime from first "
+			      "suspend start %d",
+			      last_submit);
+			return SLURM_ERROR;
+		}
+		time_tm.tm_mon -= arch_cond->purge_step;
+		time_tm.tm_isdst = -1;
+		curr_end = mktime(&time_tm);
+		env_array_append_fmt(&env, "SLURM_ARCHIVE_SUSPEND", "%u",
+				     arch_cond->archive_steps);
+		env_array_append_fmt(&env, "SLURM_ARCHIVE_LAST_SUSPEND", "%d",
+				     curr_end);
+	}
+
 #ifdef _PATH_STDPATH
 	env_array_append (&env, "PATH", _PATH_STDPATH);
 #else
@@ -150,15 +325,151 @@ static int _archive_script(acct_archive_cond_t *arch_cond, time_t last_submit)
 	return SLURM_SUCCESS;
 }
 
+extern List setup_cluster_list_with_inx(mysql_conn_t *mysql_conn,
+					acct_job_cond_t *job_cond,
+					void **curr_cluster)
+{
+	List local_cluster_list = NULL;
+	time_t now = time(NULL);
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	hostlist_t temp_hl = NULL;
+	hostlist_iterator_t h_itr = NULL;
+	char *object = NULL;
+	char *query = NULL;
+	
+	if(!job_cond || !job_cond->used_nodes) 
+		return NULL;
+
+	if(!job_cond->cluster_list || list_count(job_cond->cluster_list) != 1) {
+		error("If you are doing a query against nodes "
+		      "you must only have 1 cluster "
+		      "you are asking for.");
+		return NULL;
+	}
+	
+	temp_hl = hostlist_create(job_cond->used_nodes);
+	if(!hostlist_count(temp_hl)) {
+		error("we didn't get any real hosts to look for.");
+		goto no_hosts;
+	}
+	h_itr = hostlist_iterator_create(temp_hl);
+
+	query = xstrdup_printf("select cluster_nodes, period_start, "
+			       "period_end from %s where node_name='' "
+			       "&& cluster_nodes !=''",
+			       event_table);
+
+	if((object = list_peek(job_cond->cluster_list)))
+		xstrfmtcat(query, " && cluster='%s'", object);
 
-extern int setup_job_cond_limits(acct_job_cond_t *job_cond, char **extra)
+	if(job_cond->usage_start) {
+		if(!job_cond->usage_end)
+			job_cond->usage_end = now;
+
+		xstrfmtcat(query, 
+			   " && ((period_start < %d) "
+			   "&& (period_end >= %d || period_end = 0))",
+			   job_cond->usage_end, job_cond->usage_start);
+	}
+		
+	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+	if(!(result = mysql_db_query_ret(mysql_conn->db_conn, query, 0))) {
+		xfree(query);
+		hostlist_destroy(temp_hl);
+		return NULL;
+	}
+	xfree(query);
+
+	local_cluster_list = list_create(_destroy_local_cluster);
+	while((row = mysql_fetch_row(result))) {
+		char *host = NULL;
+		int loc = 0;
+		local_cluster_t *local_cluster =
+			xmalloc(sizeof(local_cluster_t));
+		local_cluster->hl = hostlist_create(row[0]);
+		local_cluster->start = atoi(row[1]);
+		local_cluster->end   = atoi(row[2]);
+		local_cluster->asked_bitmap = 
+			bit_alloc(hostlist_count(local_cluster->hl));
+		while((host = hostlist_next(h_itr))) {
+			if((loc = hostlist_find(
+				    local_cluster->hl, host)) != -1) 
+				bit_set(local_cluster->asked_bitmap, loc);
+			free(host);
+		}
+		hostlist_iterator_reset(h_itr);
+		if(bit_ffs(local_cluster->asked_bitmap) != -1) {
+			list_append(local_cluster_list, local_cluster);
+			if(local_cluster->end == 0) {
+				local_cluster->end = now;
+				(*curr_cluster) = local_cluster;
+			}
+		} else 
+			_destroy_local_cluster(local_cluster);
+	}
+	mysql_free_result(result);
+	hostlist_iterator_destroy(h_itr);
+	if(!list_count(local_cluster_list)) {
+		hostlist_destroy(temp_hl);
+		list_destroy(local_cluster_list);
+		return NULL;
+	}
+no_hosts:
+
+	hostlist_destroy(temp_hl);
+
+	return local_cluster_list;
+}
+
+extern int good_nodes_from_inx(List local_cluster_list, 
+			       void **object, char *node_inx,
+			       int submit)
+{
+	local_cluster_t **curr_cluster = (local_cluster_t **)object;
+
+	/* check the bitmap to see if this is one of the jobs
+	   we are looking for */
+	if(*curr_cluster) {
+		bitstr_t *job_bitmap = NULL;
+		if(!node_inx || !node_inx[0])
+			return 0;
+		if((submit < (*curr_cluster)->start)
+		   || (submit > (*curr_cluster)->end)) {
+			local_cluster_t *local_cluster = NULL;
+			
+			ListIterator itr =
+				list_iterator_create(local_cluster_list);
+			while((local_cluster = list_next(itr))) {
+				if((submit >= local_cluster->start)
+				   && (submit <= local_cluster->end)) {
+					*curr_cluster = local_cluster;
+						break;
+				}
+			}
+			list_iterator_destroy(itr);
+			if(!local_cluster)
+				return 0;
+		}
+		job_bitmap = bit_alloc(hostlist_count((*curr_cluster)->hl));
+		bit_unfmt(job_bitmap, node_inx);
+		if(!bit_overlap((*curr_cluster)->asked_bitmap, job_bitmap)) {
+			FREE_NULL_BITMAP(job_bitmap);
+			return 0;
+		}
+		FREE_NULL_BITMAP(job_bitmap);
+	}
+	return 1;
+}
+
+extern int setup_job_cond_limits(mysql_conn_t *mysql_conn,
+				 acct_job_cond_t *job_cond, char **extra)
 {
 	int set = 0;
 	ListIterator itr = NULL;
 	char *object = NULL;
 	char *table_level = "t2";
 	jobacct_selected_step_t *selected_step = NULL;
-	time_t now = time(NULL);
 
 	if(!job_cond)
 		return 0;
@@ -254,6 +565,73 @@ extern int setup_job_cond_limits(acct_job_cond_t *job_cond, char **extra)
 		xstrcat(*extra, ")");
 	}
 
+	/* this must be done before resvid_list since we set
+	   resvid_list up here */
+	if(job_cond->resv_list && list_count(job_cond->resv_list)) {
+		char *query = xstrdup_printf(
+			"select distinct id from %s where (");
+		int my_set = 0;
+		MYSQL_RES *result = NULL;
+		MYSQL_ROW row;
+
+		if(job_cond->cluster_list
+		   && list_count(job_cond->cluster_list)) {
+			
+			itr = list_iterator_create(job_cond->cluster_list);
+			while((object = list_next(itr))) {
+				if(my_set) 
+					xstrcat(query, " || ");
+				xstrfmtcat(query, "cluster='%s'", object);
+				my_set = 1;
+			}
+			list_iterator_destroy(itr);
+		} 
+
+		if(my_set)
+			xstrcat(query, ") && (");
+		
+		itr = list_iterator_create(job_cond->resv_list);
+		while((object = list_next(itr))) {
+			if(my_set)
+				xstrcat(query, " || ");
+			xstrfmtcat(query, "name='%s'", object);
+			my_set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(query, ")");
+		if(!(result = mysql_db_query_ret(
+			     mysql_conn->db_conn, query, 0))) {
+			xfree(query);
+			error("couldn't query the database");
+			goto no_resv;
+		}
+		xfree(query);
+		if(!job_cond->resvid_list) 
+			job_cond->resvid_list = list_create(slurm_destroy_char);
+		while((row = mysql_fetch_row(result))) {
+			list_append(job_cond->resvid_list, xstrdup(row[0]));
+		}
+		mysql_free_result(result);
+	}
+	no_resv:
+
+	if(job_cond->resvid_list && list_count(job_cond->resvid_list)) {
+		set = 0;
+		if(*extra)
+			xstrcat(*extra, " && (");
+		else
+			xstrcat(*extra, " where (");
+		itr = list_iterator_create(job_cond->resvid_list);
+		while((object = list_next(itr))) {
+			if(set)
+				xstrcat(*extra, " || ");
+			xstrfmtcat(*extra, "t1.resvid='%s'", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(*extra, ")");
+	}
+
 	if(job_cond->step_list && list_count(job_cond->step_list)) {
 		set = 0;
 		if(*extra)
@@ -272,17 +650,20 @@ extern int setup_job_cond_limits(acct_job_cond_t *job_cond, char **extra)
 	}
 
 	if(job_cond->usage_start) {
-		if(!job_cond->usage_end)
-			job_cond->usage_end = now;
-
 		if(*extra)
 			xstrcat(*extra, " && (");
 		else
 			xstrcat(*extra, " where (");
-		xstrfmtcat(*extra, 
-			   "(t1.eligible < %d "
-			   "&& (t1.end >= %d || t1.end = 0)))",
-			   job_cond->usage_end, job_cond->usage_start);
+
+		if(!job_cond->usage_end)
+			xstrfmtcat(*extra, 
+				   "t1.end >= %d || t1.end = 0)",
+				   job_cond->usage_start);
+		else
+			xstrfmtcat(*extra, 
+				   "(t1.eligible < %d "
+				   "&& (t1.end >= %d || t1.end = 0)))",
+				   job_cond->usage_end, job_cond->usage_start);
 	} else if(job_cond->usage_end) {
 		if(*extra)
 			xstrcat(*extra, " && (");
@@ -371,7 +752,9 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 	List job_list = list_create(destroy_jobacct_job_rec);
 	uint16_t private_data = 0;
 	acct_user_rec_t user;
-		
+	local_cluster_t *curr_cluster = NULL;
+	List local_cluster_list = NULL;
+
 	/* if this changes you will need to edit the corresponding 
 	 * enum below also t1 is job_table */
 	char *job_req_inx[] = {
@@ -382,6 +765,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		"t1.wckeyid",
 		"t1.uid",
 		"t1.gid",
+		"t1.resvid",
 		"t1.partition",
 		"t1.blockid",
 		"t1.cluster",
@@ -398,7 +782,9 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		"t1.priority",
 		"t1.req_cpus",
 		"t1.alloc_cpus",
+		"t1.alloc_nodes",
 		"t1.nodelist",
+		"t1.node_inx",
 		"t1.kill_requid",
 		"t1.qos",
 		"t2.user",
@@ -407,6 +793,43 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		"t2.lft"
 	};
 
+	enum {
+		JOB_REQ_ID,
+		JOB_REQ_JOBID,
+		JOB_REQ_ASSOCID,
+		JOB_REQ_WCKEY,
+		JOB_REQ_WCKEYID,
+		JOB_REQ_UID,
+		JOB_REQ_GID,
+		JOB_REQ_RESVID,
+		JOB_REQ_PARTITION,
+		JOB_REQ_BLOCKID,
+		JOB_REQ_CLUSTER1,
+		JOB_REQ_ACCOUNT1,
+		JOB_REQ_ELIGIBLE,
+		JOB_REQ_SUBMIT,
+		JOB_REQ_START,
+		JOB_REQ_END,
+		JOB_REQ_SUSPENDED,
+		JOB_REQ_NAME,
+		JOB_REQ_TRACKSTEPS,
+		JOB_REQ_STATE,
+		JOB_REQ_COMP_CODE,
+		JOB_REQ_PRIORITY,
+		JOB_REQ_REQ_CPUS,
+		JOB_REQ_ALLOC_CPUS,
+		JOB_REQ_ALLOC_NODES,
+		JOB_REQ_NODELIST,
+		JOB_REQ_NODE_INX,
+		JOB_REQ_KILL_REQUID,
+		JOB_REQ_QOS,
+		JOB_REQ_USER_NAME,
+		JOB_REQ_CLUSTER,
+		JOB_REQ_ACCOUNT,
+		JOB_REQ_LFT,
+		JOB_REQ_COUNT		
+	};
+
 	/* if this changes you will need to edit the corresponding 
 	 * enum below also t1 is step_table */
 	char *step_req_inx[] = {
@@ -416,10 +839,14 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		"t1.suspended",
 		"t1.name",
 		"t1.nodelist",
+		"t1.node_inx",
 		"t1.state",
 		"t1.kill_requid",
 		"t1.comp_code",
+		"t1.nodes",
 		"t1.cpus",
+		"t1.tasks",
+		"t1.task_dist",
 		"t1.user_sec",
 		"t1.user_usec",
 		"t1.sys_sec",
@@ -442,39 +869,6 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		"t1.ave_cpu"
 	};
 
-	enum {
-		JOB_REQ_ID,
-		JOB_REQ_JOBID,
-		JOB_REQ_ASSOCID,
-		JOB_REQ_WCKEY,
-		JOB_REQ_WCKEYID,
-		JOB_REQ_UID,
-		JOB_REQ_GID,
-		JOB_REQ_PARTITION,
-		JOB_REQ_BLOCKID,
-		JOB_REQ_CLUSTER1,
-		JOB_REQ_ACCOUNT1,
-		JOB_REQ_ELIGIBLE,
-		JOB_REQ_SUBMIT,
-		JOB_REQ_START,
-		JOB_REQ_END,
-		JOB_REQ_SUSPENDED,
-		JOB_REQ_NAME,
-		JOB_REQ_TRACKSTEPS,
-		JOB_REQ_STATE,
-		JOB_REQ_COMP_CODE,
-		JOB_REQ_PRIORITY,
-		JOB_REQ_REQ_CPUS,
-		JOB_REQ_ALLOC_CPUS,
-		JOB_REQ_NODELIST,
-		JOB_REQ_KILL_REQUID,
-		JOB_REQ_QOS,
-		JOB_REQ_USER_NAME,
-		JOB_REQ_CLUSTER,
-		JOB_REQ_ACCOUNT,
-		JOB_REQ_LFT,
-		JOB_REQ_COUNT		
-	};
 	enum {
 		STEP_REQ_STEPID,
 		STEP_REQ_START,
@@ -482,10 +876,14 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		STEP_REQ_SUSPENDED,
 		STEP_REQ_NAME,
 		STEP_REQ_NODELIST,
+		STEP_REQ_NODE_INX,
 		STEP_REQ_STATE,
 		STEP_REQ_KILL_REQUID,
 		STEP_REQ_COMP_CODE,
+		STEP_REQ_NODES,
 		STEP_REQ_CPUS,
+		STEP_REQ_TASKS,
+		STEP_REQ_TASKDIST,
 		STEP_REQ_USER_SEC,
 		STEP_REQ_USER_USEC,
 		STEP_REQ_SYS_SEC,
@@ -531,12 +929,29 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 			   >= ACCT_ADMIN_OPERATOR) 
 				is_admin = 1;	
 			else {
-				assoc_mgr_fill_in_user(mysql_conn, &user, 1);
+				assoc_mgr_fill_in_user(mysql_conn, &user, 1,
+						       NULL);
 			}
 		}
 	}
 
-	setup_job_cond_limits(job_cond, &extra);
+
+	/* Here we set up environment to check used nodes of jobs.
+	   Since we store the bitmap of the entire cluster we can use
+	   that to set up a hostlist and set up the bitmap to make
+	   things work.  This should go before the setup of conds
+	   since we could update the start/end time.
+	*/
+	if(job_cond && job_cond->used_nodes) {
+		local_cluster_list = setup_cluster_list_with_inx(
+			mysql_conn, job_cond, (void **)&curr_cluster);
+		if(!local_cluster_list) {
+			list_destroy(job_list);
+			return NULL;
+		}
+	}
+
+	setup_job_cond_limits(mysql_conn, job_cond, &extra);
 
 	xfree(tmp);
 	xstrfmtcat(tmp, "%s", job_req_inx[0]);
@@ -565,6 +980,9 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 			     mysql_conn->db_conn, query, 0))) {
 			xfree(extra);
 			xfree(query);
+			list_destroy(job_list);
+			if(local_cluster_list)
+				list_destroy(local_cluster_list);
 			return NULL;
 		}
 		xfree(query);
@@ -603,6 +1021,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		xstrcat(query, extra);
 		xfree(extra);
 	}
+	
 	/* Here we want to order them this way in such a way so it is
 	   easy to look for duplicates 
 	*/
@@ -610,13 +1029,13 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		xstrcat(query, " order by t1.cluster, jobid, submit desc");
 	else
 		xstrcat(query, " order by t1.cluster, submit desc");
-		
 
 	debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
-	if(!(result = mysql_db_query_ret(
-		     mysql_conn->db_conn, query, 0))) {
+	if(!(result = mysql_db_query_ret(mysql_conn->db_conn, query, 0))) {
 		xfree(query);
 		list_destroy(job_list);
+		if(local_cluster_list)
+			list_destroy(local_cluster_list);
 		return NULL;
 	}
 	xfree(query);
@@ -624,6 +1043,7 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 	while((row = mysql_fetch_row(result))) {
 		char *id = row[JOB_REQ_ID];
 		bool job_ended = 0;
+		int submit = atoi(row[JOB_REQ_SUBMIT]);
 
 		curr_id = atoi(row[JOB_REQ_JOBID]);
 
@@ -632,13 +1052,26 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		
 		last_id = curr_id;
 
+		/* check the bitmap to see if this is one of the jobs
+		   we are looking for */
+		if(!good_nodes_from_inx(local_cluster_list,
+					(void **)&curr_cluster,
+					row[JOB_REQ_NODE_INX], submit))
+			continue;
+		
 		job = create_jobacct_job_rec();
+		list_append(job_list, job);
 
 		job->alloc_cpus = atoi(row[JOB_REQ_ALLOC_CPUS]);
+		job->alloc_nodes = atoi(row[JOB_REQ_ALLOC_NODES]);
 		job->associd = atoi(row[JOB_REQ_ASSOCID]);
+		job->resvid = atoi(row[JOB_REQ_RESVID]);
 
-		if(row[JOB_REQ_WCKEY] && row[JOB_REQ_WCKEY][0])
+		/* we want a blank wckey if the name is null */
+		if(row[JOB_REQ_WCKEY])
 			job->wckey = xstrdup(row[JOB_REQ_WCKEY]);
+		else
+			job->wckey = xstrdup("");
 		job->wckeyid = atoi(row[JOB_REQ_WCKEYID]);
 
 		if(row[JOB_REQ_CLUSTER] && row[JOB_REQ_CLUSTER][0])
@@ -663,23 +1096,28 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 			job->blockid = xstrdup(row[JOB_REQ_BLOCKID]);
 
 		job->eligible = atoi(row[JOB_REQ_ELIGIBLE]);
-		job->submit = atoi(row[JOB_REQ_SUBMIT]);
+		job->submit = submit;
 		job->start = atoi(row[JOB_REQ_START]);
 		job->end = atoi(row[JOB_REQ_END]);
+
 		/* since the job->end could be set later end it here */
-		if(job->end)
+		if(job->end) {
 			job_ended = 1;
+			if(!job->start || (job->start > job->end))
+				job->start = job->end;
+		}
 
-		if(job_cond && job_cond->usage_start) {
+		if(job_cond && !job_cond->without_usage_truncation
+		   && job_cond->usage_start) {
 			if(job->start && (job->start < job_cond->usage_start))
 				job->start = job_cond->usage_start;
 
-			if(!job->start && job->end)
-				job->start = job->end;
-
 			if(!job->end || job->end > job_cond->usage_end) 
 				job->end = job_cond->usage_end;
 
+			if(!job->start)
+				job->start = job->end;
+			
 			job->elapsed = job->end - job->start;
 
 			if(row[JOB_REQ_SUSPENDED]) {
@@ -772,8 +1210,6 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 		job->qos = atoi(row[JOB_REQ_QOS]);
 		job->show_full = 1;
 					
-		list_append(job_list, job);
-
 		if(job_cond && job_cond->step_list
 		   && list_count(job_cond->step_list)) {
 			set = 0;
@@ -820,12 +1256,29 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 			     mysql_conn->db_conn, query, 0))) {
 			xfree(query);
 			list_destroy(job_list);
+			if(local_cluster_list)
+				list_destroy(local_cluster_list);
 			return NULL;
 		}
 		xfree(query);
+		
+		/* Querying the steps in the fashion was faster than
+		   doing only 1 query and then matching the steps up
+		   later with the job.  
+		*/
 		while ((step_row = mysql_fetch_row(step_result))) {
+			/* check the bitmap to see if this is one of the steps
+			   we are looking for */
+			if(!good_nodes_from_inx(local_cluster_list,
+						(void **)&curr_cluster,
+						step_row[STEP_REQ_NODE_INX],
+						submit))
+				continue;
+		
 			step = create_jobacct_step_rec();
-			step->jobid = job->jobid;
+			step->job_ptr = job;
+			if(!job->first_step_ptr)
+				job->first_step_ptr = step;
 			list_append(job->steps, step);
 			step->stepid = atoi(step_row[STEP_REQ_STEPID]);
 			/* info("got step %u.%u", */
@@ -833,6 +1286,13 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 			step->state = atoi(step_row[STEP_REQ_STATE]);
 			step->exitcode = atoi(step_row[STEP_REQ_COMP_CODE]);
 			step->ncpus = atoi(step_row[STEP_REQ_CPUS]);
+			step->nnodes = atoi(step_row[STEP_REQ_NODES]);
+
+			step->ntasks = atoi(step_row[STEP_REQ_TASKS]);
+			step->task_dist = atoi(step_row[STEP_REQ_TASKDIST]);
+			if(!step->ntasks)
+				step->ntasks = step->ncpus;
+
 			step->start = atoi(step_row[STEP_REQ_START]);
 			
 			step->end = atoi(step_row[STEP_REQ_END]);
@@ -842,7 +1302,8 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 				step->state = job->state;
 			}
 
-			if(job_cond && job_cond->usage_start) {
+			if(job_cond && !job_cond->without_usage_truncation
+			   && job_cond->usage_start) {
 				if(step->start 
 				   && (step->start < job_cond->usage_start))
 					step->start = job_cond->usage_start;
@@ -855,7 +1316,6 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 					step->end = job_cond->usage_end;
 			}
 
-			step->elapsed = step->end - step->start;
 			/* figure this out by start stop */
 			step->suspended = atoi(step_row[STEP_REQ_SUSPENDED]);
 			if(!step->end) {
@@ -928,14 +1388,16 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 			if(list_count(job->steps) > 1) 
 				job->track_steps = 1;
 			else if(step && step->stepname && job->jobname) {
-				if(strcmp(step->stepname, job->jobname))
+				if(strcmp(step->stepname, job->jobname)) 
 					job->track_steps = 1;
 			}
-               }
+		}
 		/* need to reset here to make the above test valid */
 		step = NULL;
 	}
 	mysql_free_result(result);
+	if(local_cluster_list)
+		list_destroy(local_cluster_list);
 
 	return job_list;
 }
@@ -943,18 +1405,29 @@ extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 					 acct_archive_cond_t *arch_cond)
 {
-	int rc = SLURM_SUCCESS, fd = 0;
+	int rc = SLURM_SUCCESS;
 	char *query = NULL;
 	time_t last_submit = time(NULL);
 	time_t curr_end;
 	char *tmp = NULL;
 	int i=0;
-	char *old_file = NULL, *new_file = NULL, *reg_file = NULL;
 	struct tm time_tm;
-	char start_char[32];
-	char end_char[32];
+
 //	DEF_TIMERS;
 
+	/* if this changes you will need to edit the corresponding 
+	 * enum below */
+	char *event_req_inx[] = {
+		"node_name",
+		"cluster",
+		"cpu_count",
+		"state",
+		"period_start",
+		"period_end",
+		"reason",
+		"cluster_nodes",
+	};
+
 	/* if this changes you will need to edit the corresponding 
 	 * enum below */
 	char *job_req_inx[] = {
@@ -965,6 +1438,7 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 		"wckeyid",
 		"uid",
 		"gid",
+		"resvid",
 		"partition",
 		"blockid",
 		"cluster",
@@ -981,7 +1455,9 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 		"priority",
 		"req_cpus",
 		"alloc_cpus",
+		"alloc_nodes",
 		"nodelist",
+		"node_inx",
 		"kill_requid",
 		"qos"
 	};
@@ -996,10 +1472,14 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 		"suspended",
 		"name",
 		"nodelist",
+		"node_inx",
 		"state",
 		"kill_requid",
 		"comp_code",
+		"nodes",
 		"cpus",
+		"tasks",
+		"task_dist",
 		"user_sec",
 		"user_usec",
 		"sys_sec",
@@ -1022,6 +1502,28 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 		"ave_cpu"
 	};
 
+
+	/* if this changes you will need to edit the corresponding 
+	 * enum below */
+	char *suspend_req_inx[] = {
+		"id",
+		"associd",
+		"start",
+		"end",
+	};
+
+	enum {
+		EVENT_REQ_NODE,
+		EVENT_REQ_CLUSTER,
+		EVENT_REQ_CPUS,
+		EVENT_REQ_STATE,
+		EVENT_REQ_START,
+		EVENT_REQ_END,
+		EVENT_REQ_REASON,
+		EVENT_REQ_NODES,
+		EVENT_REQ_COUNT
+	};
+
 	enum {
 		JOB_REQ_ID,
 		JOB_REQ_JOBID,
@@ -1030,6 +1532,7 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 		JOB_REQ_WCKEYID,
 		JOB_REQ_UID,
 		JOB_REQ_GID,
+		JOB_REQ_RESVID,
 		JOB_REQ_PARTITION,
 		JOB_REQ_BLOCKID,
 		JOB_REQ_CLUSTER,
@@ -1046,11 +1549,14 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 		JOB_REQ_PRIORITY,
 		JOB_REQ_REQ_CPUS,
 		JOB_REQ_ALLOC_CPUS,
+		JOB_REQ_ALLOC_NODES,
 		JOB_REQ_NODELIST,
+		JOB_REQ_NODE_INX,
 		JOB_REQ_KILL_REQUID,
 		JOB_REQ_QOS,
 		JOB_REQ_COUNT		
 	};
+
 	enum {
 		STEP_REQ_ID,
 		STEP_REQ_STEPID,
@@ -1059,10 +1565,14 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 		STEP_REQ_SUSPENDED,
 		STEP_REQ_NAME,
 		STEP_REQ_NODELIST,
+		STEP_REQ_NODE_INX,
 		STEP_REQ_STATE,
 		STEP_REQ_KILL_REQUID,
 		STEP_REQ_COMP_CODE,
+		STEP_REQ_NODES,
 		STEP_REQ_CPUS,
+		STEP_REQ_TASKS,
+		STEP_REQ_TASKDIST,
 		STEP_REQ_USER_SEC,
 		STEP_REQ_USER_USEC,
 		STEP_REQ_SYS_SEC,
@@ -1086,6 +1596,14 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 		STEP_REQ_COUNT
 	};
 
+	enum {
+		SUSPEND_REQ_ID,
+		SUSPEND_REQ_ASSOCID,
+		SUSPEND_REQ_START,
+		SUSPEND_REQ_END,
+		SUSPEND_REQ_COUNT
+	};
+
 	if(!arch_cond) {
 		error("No arch_cond was given to archive from.  returning");
 		return SLURM_ERROR;
@@ -1103,7 +1621,7 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 	time_tm.tm_isdst = -1;
 	last_submit = mktime(&time_tm);
 	last_submit--;
-	debug("adjusted last submit is (%d)", last_submit);
+	debug("archive: adjusted last submit is (%d)", last_submit);
 	
 	if(arch_cond->archive_script)
 		return _archive_script(arch_cond, last_submit);
@@ -1112,9 +1630,176 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 	}
 
-	if(arch_cond->step_purge) {
+	if(arch_cond->purge_event) {
+		/* remove all data from step table that was older than
+		 * period_start * arch_cond->purge_event. 
+		 */
+		/* use localtime to avoid any daylight savings issues */
+		if(!localtime_r(&last_submit, &time_tm)) {
+			error("Couldn't get localtime from first submit %d",
+			      last_submit);
+			return SLURM_ERROR;
+		}
+		time_tm.tm_mday = 1;
+		time_tm.tm_mon -= arch_cond->purge_event;
+		time_tm.tm_isdst = -1;
+		curr_end = mktime(&time_tm);
+
+		debug4("from %d - %d months purging events from before %d", 
+		       last_submit, arch_cond->purge_event, curr_end);
+		
+		if(arch_cond->archive_events) {
+			char *insert = NULL;
+			MYSQL_RES *result = NULL;
+			
+			xfree(tmp);
+			xstrfmtcat(tmp, "%s", event_req_inx[0]);
+			for(i=1; i<EVENT_REQ_COUNT; i++) {
+				xstrfmtcat(tmp, ", %s", event_req_inx[i]);
+			}
+
+			/* get all the events started before this time
+			   listed */
+			query = xstrdup_printf("select %s from %s where "
+					       "period_start <= %d "
+					       "&& period_end != 0 "
+					       "order by period_start asc",
+					       tmp, event_table, curr_end);
+
+			insert = xstrdup_printf("insert into %s (%s) ",
+						event_table, tmp);
+			xfree(tmp);
+			
+//			START_TIMER;
+			debug3("%d(%d) query\n%s", mysql_conn->conn,
+			       __LINE__, query);
+			if(!(result = mysql_db_query_ret(
+				     mysql_conn->db_conn, query, 0))) {
+				xfree(insert);
+				xfree(query);
+				return SLURM_ERROR;
+			}
+			xfree(query);
+//			END_TIMER2("step query");
+//			info("event query took %s", TIME_STR);
+
+			if(!mysql_num_rows(result)) {
+				xfree(insert);
+				mysql_free_result(result);
+				goto exit_events;
+			}
+
+			rc = _write_archive_file(
+				result, EVENT_REQ_START, EVENT_REQ_COUNT,
+				curr_end, arch_cond->archive_dir, 
+				"event", insert, false);
+			
+			xfree(insert);
+			mysql_free_result(result);
+
+			if(rc != SLURM_SUCCESS)
+				return rc;
+		}
+		query = xstrdup_printf("delete from %s where "
+				       "period_start <= %d && period_end != 0",
+				       event_table, curr_end);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+		rc = mysql_db_query(mysql_conn->db_conn, query);
+		xfree(query);
+		if(rc != SLURM_SUCCESS) {
+			error("Couldn't remove old event data");
+			return SLURM_ERROR;
+		}
+	}
+
+exit_events:
+
+	if(arch_cond->purge_suspend) {
 		/* remove all data from step table that was older than
-		 * start * arch_cond->step_purge. 
+		 * period_start * arch_cond->purge_suspend. 
+		 */
+		/* use localtime to avoid any daylight savings issues */
+		if(!localtime_r(&last_submit, &time_tm)) {
+			error("Couldn't get localtime from first submit %d",
+			      last_submit);
+			return SLURM_ERROR;
+		}
+		time_tm.tm_mday = 1;
+		time_tm.tm_mon -= arch_cond->purge_suspend;
+		time_tm.tm_isdst = -1;
+		curr_end = mktime(&time_tm);
+
+		debug4("from %d - %d months purging suspend from before %d", 
+		       last_submit, arch_cond->purge_suspend, curr_end);
+		
+		if(arch_cond->archive_suspend) {
+			char *insert = NULL;
+			MYSQL_RES *result = NULL;
+			
+			xfree(tmp);
+			xstrfmtcat(tmp, "%s", suspend_req_inx[0]);
+			for(i=1; i<SUSPEND_REQ_COUNT; i++) {
+				xstrfmtcat(tmp, ", %s", suspend_req_inx[i]);
+			}
+
+			/* get all the suspend started before this time
+			   listed */
+			query = xstrdup_printf("select %s from %s where "
+					       "start <= %d && end != 0 "
+					       "order by start asc",
+					       tmp, suspend_table, curr_end);
+
+			insert = xstrdup_printf("insert into %s (%s) ",
+						suspend_table, tmp);
+			xfree(tmp);
+			
+//			START_TIMER;
+			debug3("%d(%d) query\n%s", mysql_conn->conn,
+			       __LINE__, query);
+			if(!(result = mysql_db_query_ret(
+				     mysql_conn->db_conn, query, 0))) {
+				xfree(insert);
+				xfree(query);
+				return SLURM_ERROR;
+			}
+			xfree(query);
+//			END_TIMER2("step query");
+//			info("suspend query took %s", TIME_STR);
+
+			if(!mysql_num_rows(result)) {
+				xfree(insert);
+				mysql_free_result(result);
+				goto exit_suspend;
+			}
+
+			rc = _write_archive_file(
+				result, SUSPEND_REQ_START, SUSPEND_REQ_COUNT,
+				curr_end, arch_cond->archive_dir, 
+				"suspend", insert, false);
+			
+			xfree(insert);
+			mysql_free_result(result);
+
+			if(rc != SLURM_SUCCESS)
+				return rc;
+		}
+		query = xstrdup_printf("delete from %s where start <= %d "
+				       "&& end != 0",
+				       suspend_table, curr_end);
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+		rc = mysql_db_query(mysql_conn->db_conn, query);
+		xfree(query);
+		if(rc != SLURM_SUCCESS) {
+			error("Couldn't remove old suspend data");
+			return SLURM_ERROR;
+		}
+	}
+
+exit_suspend:
+
+	if(arch_cond->purge_step) {
+		/* remove all data from step table that was older than
+		 * start * arch_cond->purge_step. 
 		 */
 		/* use localtime to avoid any daylight savings issues */
 		if(!localtime_r(&last_submit, &time_tm)) {
@@ -1122,19 +1807,16 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 			      last_submit);
 			return SLURM_ERROR;
 		}
-		time_tm.tm_mon -= arch_cond->step_purge;
+		time_tm.tm_mon -= arch_cond->purge_step;
 		time_tm.tm_isdst = -1;
 		curr_end = mktime(&time_tm);
 
 		debug4("from %d - %d months purging steps from before %d", 
-		       last_submit, arch_cond->step_purge, curr_end);
+		       last_submit, arch_cond->purge_step, curr_end);
 		
 		if(arch_cond->archive_steps) {
 			char *insert = NULL;
-			char *values = NULL;
-			int period_start = 0;
 			MYSQL_RES *result = NULL;
-			MYSQL_ROW row;
 
 			xfree(tmp);
 			xstrfmtcat(tmp, "%s", step_req_inx[0]);
@@ -1173,116 +1855,19 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 				mysql_free_result(result);
 				goto exit_steps;
 			}
-
-//			START_TIMER;
-			slurm_mutex_lock(&local_file_lock);
-			while((row = mysql_fetch_row(result))) {
-				if(period_start) {
-					xstrcat(values, ",\n(");
-				} else {
-					period_start = 
-						atoi(row[STEP_REQ_START]);
-					localtime_r((time_t *)&period_start,
-						    &time_tm);
-					time_tm.tm_sec = 0;
-					time_tm.tm_min = 0;
-					time_tm.tm_hour = 0;
-					time_tm.tm_mday = 1;
-					time_tm.tm_isdst = -1;
-					period_start = mktime(&time_tm);
-					localtime_r((time_t *)&period_start,
-						    &time_tm);
-					snprintf(start_char, sizeof(start_char),
-						 "%4.4u-%2.2u-%2.2u"
-						 "T%2.2u:%2.2u:%2.2u",
-						 (time_tm.tm_year + 1900),
-						 (time_tm.tm_mon+1), 
-						 time_tm.tm_mday,
-						 time_tm.tm_hour,
-						 time_tm.tm_min, 
-						 time_tm.tm_sec);
-
-					localtime_r((time_t *)&curr_end,
-						    &time_tm);
-					snprintf(end_char, sizeof(end_char),
-						 "%4.4u-%2.2u-%2.2u"
-						 "T%2.2u:%2.2u:%2.2u",
-						 (time_tm.tm_year + 1900),
-						 (time_tm.tm_mon+1), 
-						 time_tm.tm_mday,
-						 time_tm.tm_hour,
-						 time_tm.tm_min, 
-						 time_tm.tm_sec);
-
-					/* write the buffer to file */
-					reg_file = xstrdup_printf(
-						"%s/step_archive_%s_%s.sql",
-						arch_cond->archive_dir,
-						start_char, end_char);
-					debug("Storing step archive at %s",
-					      reg_file);
-					old_file = xstrdup_printf(
-						"%s.old", reg_file);
-					new_file = xstrdup_printf(
-						"%s.new", reg_file);
-					
-					fd = creat(new_file, 0600);
-					if (fd == 0) {
-						error("Can't save archive, "
-						      "create file %s error %m",
-						      new_file);
-						rc = errno;
-						xfree(insert);
-						break;
-					} 
-					values = xstrdup_printf("%s\nvalues\n(",
-								insert);
-					xfree(insert);
-				}
-	
-				xstrfmtcat(values, "'%s'", row[0]);
-				for(i=1; i<STEP_REQ_COUNT; i++) {
-					xstrfmtcat(values, ", '%s'", row[i]);
-				}
-				xstrcat(values, ", '1')");
-				
-				if(!fd || ((rc = _write_to_file(fd, values))
-					   != SLURM_SUCCESS)) {
-					xfree(values);
-					break;
-				}
-				xfree(values);
-			}
-			mysql_free_result(result);
-			rc = _write_to_file(
-				fd, " on duplicate key update deleted=1;");
-//			END_TIMER2("write file");
-//			info("write file took %s", TIME_STR);
-
-			fsync(fd);
-			close(fd);
 			
-			if (rc)
-				(void) unlink(new_file);
-			else {			/* file shuffle */
-				int ign;	/* avoid warning */
-				(void) unlink(old_file);
-				ign =  link(reg_file, old_file);
-				(void) unlink(reg_file);
-				ign =   link(new_file, reg_file);
-				(void) unlink(new_file);
-			}
-			xfree(old_file);
-			xfree(reg_file);
-			xfree(new_file);
-			slurm_mutex_unlock(&local_file_lock);
+			rc = _write_archive_file(
+				result, STEP_REQ_START, STEP_REQ_COUNT,
+				curr_end, arch_cond->archive_dir, 
+				"step", insert, true);
+			
+			xfree(insert);
+			mysql_free_result(result);
 
-			period_start = 0;
+			if(rc != SLURM_SUCCESS)
+				return rc;
 		}
 
-		if(rc != SLURM_SUCCESS) 
-			return rc;
-
 		query = xstrdup_printf("delete from %s where start <= %d "
 				       "&& end != 0",
 				       step_table, curr_end);
@@ -1296,9 +1881,9 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 	}
 exit_steps:
 	
-	if(arch_cond->job_purge) {
+	if(arch_cond->purge_job) {
 		/* remove all data from step table that was older than
-		 * last_submit * arch_cond->job_purge. 
+		 * last_submit * arch_cond->purge_job. 
 		 */
 		/* use localtime to avoid any daylight savings issues */
 		if(!localtime_r(&last_submit, &time_tm)) {
@@ -1307,20 +1892,17 @@ exit_steps:
 			return SLURM_ERROR;
 		}
 		time_tm.tm_mday = 1;
-		time_tm.tm_mon -= arch_cond->job_purge;
+		time_tm.tm_mon -= arch_cond->purge_job;
 		time_tm.tm_isdst = -1;
 		curr_end = mktime(&time_tm);
 
 		debug4("from %d - %d months purging jobs from before %d", 
-		       last_submit, arch_cond->job_purge, curr_end);
+		       last_submit, arch_cond->purge_job, curr_end);
 
 		if(arch_cond->archive_jobs) {
 			char *insert = NULL;
-			char *values = NULL;
-			int period_start = 0;
 			MYSQL_RES *result = NULL;
-			MYSQL_ROW row;
-			
+						
 			xfree(tmp);
 			xstrfmtcat(tmp, "%s", job_req_inx[0]);
 			for(i=1; i<JOB_REQ_COUNT; i++) {
@@ -1358,114 +1940,18 @@ exit_steps:
 				goto exit_jobs;
 			}
 			
-//			START_TIMER;
-			slurm_mutex_lock(&local_file_lock);
-			while((row = mysql_fetch_row(result))) {
-				if(period_start) {
-					xstrcat(values, ",\n(");
-				} else {
-					period_start = 
-						atoi(row[JOB_REQ_SUBMIT]);
-					localtime_r((time_t *)&period_start,
-						    &time_tm);
-					time_tm.tm_sec = 0;
-					time_tm.tm_min = 0;
-					time_tm.tm_hour = 0;
-					time_tm.tm_mday = 1;
-					time_tm.tm_isdst = -1;
-					period_start = mktime(&time_tm);
-					localtime_r((time_t *)&period_start,
-						    &time_tm);
-					snprintf(start_char, sizeof(start_char),
-						 "%4.4u-%2.2u-%2.2u"
-						 "T%2.2u:%2.2u:%2.2u",
-						 (time_tm.tm_year + 1900),
-						 (time_tm.tm_mon+1), 
-						 time_tm.tm_mday,
-						 time_tm.tm_hour,
-						 time_tm.tm_min, 
-						 time_tm.tm_sec);
-
-					localtime_r((time_t *)&curr_end,
-						    &time_tm);
-
-					snprintf(end_char, sizeof(end_char),
-						 "%4.4u-%2.2u-%2.2u"
-						 "T%2.2u:%2.2u:%2.2u",
-						 (time_tm.tm_year + 1900),
-						 (time_tm.tm_mon+1), 
-						 time_tm.tm_mday,
-						 time_tm.tm_hour,
-						 time_tm.tm_min, 
-						 time_tm.tm_sec);
-
-					/* write the buffer to file */
-					reg_file = xstrdup_printf(
-						"%s/job_archive_%s_%s.sql",
-						arch_cond->archive_dir,
-						start_char, end_char);
-					debug("Storing job archive at %s",
-					      reg_file);
-					old_file = xstrdup_printf(
-						"%s.old", reg_file);
-					new_file = xstrdup_printf(
-						"%s.new", reg_file);
-					
-					fd = creat(new_file, 0600);
-					if (fd == 0) {
-						error("Can't save archive, "
-						      "create file %s error %m",
-						      new_file);
-						rc = errno;
-						xfree(insert);
-						break;
-					} 
-					values = xstrdup_printf("%s\nvalues\n(",
-								insert);
-					xfree(insert);
-				}
-				
-				xstrfmtcat(values, "'%s'", row[0]);
-				for(i=1; i<JOB_REQ_COUNT; i++) {
-					xstrfmtcat(values, ", '%s'", row[i]);
-				}
-				xstrcat(values, ", '1')");
-				
-				if(!fd || ((rc = _write_to_file(fd, values))
-					   != SLURM_SUCCESS)) {
-					xfree(values);
-					break;
-				}
-				xfree(values);
-			}
-			mysql_free_result(result);
-
-			rc = _write_to_file(
-				fd, " on duplicate key update deleted=1;");
-//			END_TIMER2("write file");
-//			info("write file took %s", TIME_STR);
-			
+			rc = _write_archive_file(
+				result, JOB_REQ_SUBMIT, JOB_REQ_COUNT,
+				curr_end, arch_cond->archive_dir, 
+				"job", insert, true);
 			
-			fsync(fd);
-			close(fd);
-
-			if (rc)
-				(void) unlink(new_file);
-			else {			/* file shuffle */
-				int ign;	/* avoid warning */
-				(void) unlink(old_file);
-				ign =  link(reg_file, old_file);
-				(void) unlink(reg_file);
-				ign =  link(new_file, reg_file);
-				(void) unlink(new_file);
-			}
-			xfree(old_file);
-			xfree(reg_file);
-			xfree(new_file);
-			slurm_mutex_unlock(&local_file_lock);
+			xfree(insert);
+			mysql_free_result(result);
 
-			period_start = 0;
+			if(rc != SLURM_SUCCESS)
+				return rc;
 		}
+
 		query = xstrdup_printf("delete from %s where submit <= %d "
 				       "&& end != 0",
 				       job_table, curr_end);
@@ -1478,7 +1964,7 @@ exit_steps:
 		}
 	}
 exit_jobs:
-	
+
 	return SLURM_SUCCESS;
 }
 
@@ -1550,5 +2036,3 @@ extern int mysql_jobacct_process_archive_load(mysql_conn_t *mysql_conn,
 
 	return SLURM_SUCCESS;
 }
-
-#endif	
diff --git a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h
index 12d7a4c4529233d3a39d5d64ff076bf85a4af0bd..4adc1c3a62cc3bc2875cd76d0749a6e58c58b9f7 100644
--- a/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h
+++ b/src/plugins/accounting_storage/mysql/mysql_jobacct_process.h
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -53,16 +54,39 @@
 #include "src/database/mysql_common.h"
 #include "src/common/slurm_accounting_storage.h"
 
-#ifdef HAVE_MYSQL
-
 //extern int acct_db_init;
-
+extern char *acct_coord_table;
+extern char *acct_table;
+extern char *assoc_day_table;
+extern char *assoc_hour_table;
+extern char *assoc_month_table;
 extern char *assoc_table;
+extern char *cluster_day_table;
+extern char *cluster_hour_table;
+extern char *cluster_month_table;
+extern char *cluster_table;
+extern char *event_table;
 extern char *job_table;
+extern char *last_ran_table;
+extern char *qos_table;
+extern char *resv_table;
 extern char *step_table;
+extern char *txn_table;
+extern char *user_table;
 extern char *suspend_table;
+extern char *wckey_day_table;
+extern char *wckey_hour_table;
+extern char *wckey_month_table;
+extern char *wckey_table;
 
-extern int setup_job_cond_limits(acct_job_cond_t *job_cond, char **extra);
+extern List setup_cluster_list_with_inx(mysql_conn_t *mysql_conn,
+					acct_job_cond_t *job_cond,
+					void **curr_cluster);
+extern int good_nodes_from_inx(List local_cluster_list, 
+			       void **object, char *node_inx,
+			       int submit);
+extern int setup_job_cond_limits(mysql_conn_t *mysql_conn,
+				 acct_job_cond_t *job_cond, char **extra);
 
 extern List mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn, uid_t uid,
 					   acct_job_cond_t *job_cond);
@@ -72,7 +96,4 @@ extern int mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 
 extern int mysql_jobacct_process_archive_load(mysql_conn_t *mysql_conn,
 					      acct_archive_rec_t *arch_rec);
-
-#endif
-
 #endif
diff --git a/src/plugins/accounting_storage/mysql/mysql_rollup.c b/src/plugins/accounting_storage/mysql/mysql_rollup.c
index d338bd3c09b5b55c5c808b8cdcb996811fb43eca..801331617c0864def287236327ce2f4c04e4da35 100644
--- a/src/plugins/accounting_storage/mysql/mysql_rollup.c
+++ b/src/plugins/accounting_storage/mysql/mysql_rollup.c
@@ -7,10 +7,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -40,8 +41,6 @@
 
 #include "mysql_rollup.h"
 
-#ifdef HAVE_MYSQL
-
 typedef struct {
 	int id;
 	uint64_t a_cpu;
@@ -49,18 +48,31 @@ typedef struct {
 
 typedef struct {
 	char *name;
+	int id; /*only needed for reservations */
 	uint64_t total_time;
 	uint64_t a_cpu;
 	int cpu_count;
 	uint64_t d_cpu;
 	uint64_t i_cpu;
 	uint64_t o_cpu;
+	uint64_t pd_cpu;
 	uint64_t r_cpu;
 	time_t start;
 	time_t end;
 } local_cluster_usage_t;
 
-extern void _destroy_local_id_usage(void *object)
+typedef struct {
+	uint64_t a_cpu;
+	char *cluster;
+	int id;
+	List local_assocs; /* list of assocs to spread unused time
+			      over of type local_id_usage_t */
+	uint64_t total_time;
+	time_t start;
+	time_t end;
+} local_resv_usage_t;
+
+static void _destroy_local_id_usage(void *object)
 {
 	local_id_usage_t *a_usage = (local_id_usage_t *)object;
 	if(a_usage) {
@@ -68,7 +80,7 @@ extern void _destroy_local_id_usage(void *object)
 	}
 }
 
-extern void _destroy_local_cluster_usage(void *object)
+static void _destroy_local_cluster_usage(void *object)
 {
 	local_cluster_usage_t *c_usage = (local_cluster_usage_t *)object;
 	if(c_usage) {
@@ -77,6 +89,17 @@ extern void _destroy_local_cluster_usage(void *object)
 	}
 }
 
+static void _destroy_local_resv_usage(void *object)
+{
+	local_resv_usage_t *r_usage = (local_resv_usage_t *)object;
+	if(r_usage) {
+		xfree(r_usage->cluster);
+		if(r_usage->local_assocs)
+			list_destroy(r_usage->local_assocs);
+		xfree(r_usage);
+	}
+}
+
 extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			       time_t start, time_t end)
 {
@@ -92,18 +115,19 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 	ListIterator a_itr = NULL;
 	ListIterator c_itr = NULL;
 	ListIterator w_itr = NULL;
+	ListIterator r_itr = NULL;
 	List assoc_usage_list = list_create(_destroy_local_id_usage);
 	List cluster_usage_list = list_create(_destroy_local_cluster_usage);
 	List wckey_usage_list = list_create(_destroy_local_id_usage);
+	List resv_usage_list = list_create(_destroy_local_resv_usage);
 	uint16_t track_wckey = slurm_get_track_wckey();
-	local_cluster_usage_t *last_c_usage = NULL;
 
 	char *event_req_inx[] = {
 		"node_name",
 		"cluster",
 		"cpu_count",
 		"period_start",
-		"period_end"
+		"period_end",
 	};
 	char *event_str = NULL;
 	enum {
@@ -114,6 +138,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		EVENT_REQ_END,
 		EVENT_REQ_COUNT
 	};
+
 	char *job_req_inx[] = {
 		"id",
 		"jobid",
@@ -125,7 +150,9 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		"end",
 		"suspended",
 		"alloc_cpus",
-		"req_cpus"
+		"req_cpus",
+		"resvid"
+	   
 	};
 	char *job_str = NULL;
 	enum {
@@ -140,8 +167,10 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		JOB_REQ_SUSPENDED,
 		JOB_REQ_ACPU,
 		JOB_REQ_RCPU,
+		JOB_REQ_RESVID,
 		JOB_REQ_COUNT
 	};
+
 	char *suspend_req_inx[] = {
 		"start",
 		"end"
@@ -153,6 +182,27 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		SUSPEND_REQ_COUNT
 	};
 
+	char *resv_req_inx[] = {
+		"id",
+		"cluster",
+		"assoclist",
+		"cpus",
+		"flags",
+		"start",
+		"end"
+	};
+	char *resv_str = NULL;
+	enum {
+		RESV_REQ_ID,
+		RESV_REQ_CLUSTER,
+		RESV_REQ_ASSOCS,
+		RESV_REQ_CPU,
+		RESV_REQ_FLAGS,
+		RESV_REQ_START,
+		RESV_REQ_END,
+		RESV_REQ_COUNT
+	};
+
 	i=0;
 	xstrfmtcat(event_str, "%s", event_req_inx[i]);
 	for(i=1; i<EVENT_REQ_COUNT; i++) {
@@ -171,32 +221,43 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		xstrfmtcat(suspend_str, ", %s", suspend_req_inx[i]);
 	}
 
+	i=0;
+	xstrfmtcat(resv_str, "%s", resv_req_inx[i]);
+	for(i=1; i<RESV_REQ_COUNT; i++) {
+		xstrfmtcat(resv_str, ", %s", resv_req_inx[i]);
+	}
+
 /* 	info("begin start %s", ctime(&curr_start)); */
 /* 	info("begin end %s", ctime(&curr_end)); */
 	a_itr = list_iterator_create(assoc_usage_list);
 	c_itr = list_iterator_create(cluster_usage_list);
 	w_itr = list_iterator_create(wckey_usage_list);
+	r_itr = list_iterator_create(resv_usage_list);
 	while(curr_start < end) {
+		local_cluster_usage_t *last_c_usage = NULL;
 		int last_id = -1;
 		int last_wckeyid = -1;
 		int seconds = 0;
 		local_cluster_usage_t *c_usage = NULL;
+		local_resv_usage_t *r_usage = NULL;
 		local_id_usage_t *a_usage = NULL;
 		local_id_usage_t *w_usage = NULL;
 
-		last_c_usage = NULL;
-
 		debug3("curr hour is now %d-%d", curr_start, curr_end);
 /* 		info("start %s", ctime(&curr_start)); */
 /* 		info("end %s", ctime(&curr_end)); */
 		
-		// first get the events during this time
+		/* first get the events during this time.  All that is
+		 * except things with the maintainance flag set in the
+		 * state.  We handle those later with the reservations.
+		 */
 		query = xstrdup_printf("select %s from %s where "
-				       "(period_start < %d "
+				       "!(state & %d) && (period_start < %d "
 				       "&& (period_end >= %d "
 				       "|| period_end = 0)) "
 				       "order by node_name, period_start",
 				       event_str, event_table,
+				       NODE_STATE_MAINT,
 				       curr_end, curr_start);
 
 		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
@@ -211,7 +272,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			int row_start = atoi(row[EVENT_REQ_START]);
 			int row_end = atoi(row[EVENT_REQ_END]);
 			int row_cpu = atoi(row[EVENT_REQ_CPU]);
-					
+		
 			if(row_start < curr_start)
 				row_start = curr_start;
 		
@@ -299,6 +360,83 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		}
 		mysql_free_result(result);
 
+		// now get the reservations during this time
+		query = xstrdup_printf("select %s from %s where "
+				       "(start < %d && end >= %d) "
+				       "order by cluster, start",
+				       resv_str, resv_table,
+				       curr_end, curr_start);
+
+		debug3("%d(%d) query\n%s", mysql_conn->conn, __LINE__, query);
+		if(!(result = mysql_db_query_ret(
+			     mysql_conn->db_conn, query, 0))) {
+			xfree(query);
+			return SLURM_ERROR;
+		}
+		xfree(query);
+		
+		while((row = mysql_fetch_row(result))) {
+			int row_start = atoi(row[RESV_REQ_START]);
+			int row_end = atoi(row[RESV_REQ_END]);
+			int row_cpu = atoi(row[RESV_REQ_CPU]);
+			int row_flags = atoi(row[RESV_REQ_FLAGS]);
+
+			if(row_start < curr_start)
+				row_start = curr_start;
+		
+			if(!row_end || row_end > curr_end) 
+				row_end = curr_end;
+
+			/* Don't worry about it if the time is less
+			 * than 1 second.
+			 */
+			if((row_end - row_start) < 1)
+				continue;
+
+			r_usage = xmalloc(sizeof(local_resv_usage_t));
+			r_usage->id = atoi(row[RESV_REQ_ID]);
+
+			r_usage->local_assocs = list_create(slurm_destroy_char);
+			slurm_addto_char_list(r_usage->local_assocs, 
+					      row[RESV_REQ_ASSOCS]);
+
+			r_usage->cluster = xstrdup(row[RESV_REQ_CLUSTER]);
+			r_usage->total_time = (row_end - row_start) * row_cpu;
+			r_usage->start = row_start;
+			r_usage->end = row_end;
+			list_append(resv_usage_list, r_usage);
+
+			/* Since this reservation was added to the
+			   cluster and only certain people could run
+			   there we will use this as allocated time on
+			   the system.  If the reservation was a
+			   maintenance then we add the time to planned
+			   down time. 
+			*/
+			if(last_c_usage && !strcmp(last_c_usage->name,
+						   r_usage->cluster)) {
+				c_usage = last_c_usage;
+			} else {
+				list_iterator_reset(c_itr);
+				while((c_usage = list_next(c_itr))) {
+					if(!strcmp(c_usage->name,
+						   r_usage->cluster)) {
+						last_c_usage = c_usage;
+						break;
+					}
+				}				
+			}
+			if(row_flags & RESERVE_FLAG_MAINT)
+				c_usage->pd_cpu += r_usage->total_time;
+			else
+				c_usage->a_cpu += r_usage->total_time;
+/* 			info("adding this much %lld to cluster %s", */
+/* 			     r_usage->total_time, c_usage->name); */
+
+		}
+		mysql_free_result(result);
+
+		/* now get the jobs during this time only  */
 		query = xstrdup_printf("select %s from %s where "
 				       "(eligible < %d && (end >= %d "
 				       "|| end = 0)) "
@@ -318,6 +456,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			int job_id = atoi(row[JOB_REQ_JOBID]);
 			int assoc_id = atoi(row[JOB_REQ_ASSOCID]);
 			int wckey_id = atoi(row[JOB_REQ_WCKEYID]);
+			int resv_id = atoi(row[JOB_REQ_RESVID]);
 			int row_eligible = atoi(row[JOB_REQ_ELG]);
 			int row_start = atoi(row[JOB_REQ_START]);
 			int row_end = atoi(row[JOB_REQ_END]);
@@ -336,7 +475,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 
 			if(!row_start || ((row_end - row_start) < 1)) 
 				goto calc_cluster;
-
+			
 			seconds = (row_end - row_start);
 
 			if(row[JOB_REQ_SUSPENDED]) {
@@ -377,7 +516,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 
 					if((local_end - local_start) < 1)
 						continue;
-					
+
 					seconds -= (local_end - local_start);
 				}
 				mysql_free_result(result2);
@@ -386,14 +525,14 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				debug4("This job (%u) was suspended "
 				       "the entire hour", job_id);
 				continue;
-			}
+			} 
 
 			if(last_id != assoc_id) {
 				a_usage = xmalloc(sizeof(local_id_usage_t));
 				a_usage->id = assoc_id;
 				list_append(assoc_usage_list, a_usage);
 				last_id = assoc_id;
-			}
+			} 
 			
 			a_usage->a_cpu += seconds * row_acpu;
 
@@ -423,6 +562,56 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			if(!row[JOB_REQ_CLUSTER] || !row[JOB_REQ_CLUSTER][0]) 
 				continue;
 			
+			/* first figure out the reservation */
+			if(resv_id) {
+				if(seconds <= 0)
+					continue;
+				/* Since we have already added the
+				   entire reservation as used time on
+				   the cluster we only need to
+				   calculate the used time for the
+				   reservation and then divy up the
+				   unused time over the associations
+				   able to run in the reservation.
+				   Since the job was to run, or ran a
+				   reservation we don't care about eligible time
+				   since that could totally skew the
+				   clusters reserved time
+				   since the job may be able to run
+				   outside of the reservation. */
+				list_iterator_reset(r_itr);
+				while((r_usage = list_next(r_itr))) {
+					/* since the reservation could
+					   have changed in some way,
+					   thus making a new
+					   reservation record in the
+					   database, we have to make
+					   sure all the reservations
+					   are checked to see if such
+					   a thing has happened */
+					if((r_usage->id == resv_id)
+					   && !strcmp(r_usage->cluster,
+						      row[JOB_REQ_CLUSTER])) {
+						int temp_end = row_end;
+						int temp_start = row_start;
+						if(r_usage->start > temp_start)
+							temp_start =
+								r_usage->start;
+						if(r_usage->end < temp_end)
+							temp_end = r_usage->end;
+						
+						if((temp_end - temp_start)
+						   > 0) {
+							r_usage->a_cpu +=
+								(temp_end
+								 - temp_start)
+								* row_acpu;
+						}
+					}
+				}
+				continue;
+			}
+
 			if(last_c_usage && !strcmp(last_c_usage->name,
 						   row[JOB_REQ_CLUSTER])) {
 				c_usage = last_c_usage;
@@ -469,7 +658,8 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					row_end = c_usage->end;
 				
 				if((row_end - row_start) > 0) {
-					seconds = (row_end - row_start);
+					seconds = (row_end - row_start)
+						* row_rcpu;
 					
 /* 					info("%d assoc %d reserved " */
 /* 					     "(%d)(%d-%d) * %d = %d " */
@@ -481,23 +671,118 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 /* 					     row_rcpu, */
 /* 					     seconds * row_rcpu, */
 /* 					     row_rcpu); */
-					c_usage->r_cpu += seconds * row_rcpu;
+					c_usage->r_cpu += seconds;
 				}
 			}
 		}
 		mysql_free_result(result);
 
+		/* now figure out how much more to add to the
+		   associations that could had run in the reservation
+		*/
+		list_iterator_reset(r_itr);
+		while((r_usage = list_next(r_itr))) {
+			int64_t idle = r_usage->total_time - r_usage->a_cpu;
+			char *assoc = NULL;
+			ListIterator tmp_itr = NULL;
+
+			if(idle <= 0)
+				continue;
+			
+			/* now divide that time by the number of
+			   associations in the reservation and add
+			   them to each association */
+			seconds = idle / list_count(r_usage->local_assocs);
+/* 			info("resv %d got %d for seconds for %d assocs", */
+/* 			     r_usage->id, seconds, */
+/* 			     list_count(r_usage->local_assocs)); */
+			tmp_itr = list_iterator_create(r_usage->local_assocs);
+			while((assoc = list_next(tmp_itr))) {
+				int associd = atoi(assoc);
+				if(last_id != associd) {
+					list_iterator_reset(a_itr);
+					while((a_usage = list_next(a_itr))) {
+						if(!a_usage->id == associd) {
+							last_id = a_usage->id;
+							break;
+						}
+					}
+				}
+
+				if(!a_usage) {
+					a_usage = xmalloc(
+						sizeof(local_id_usage_t));
+					a_usage->id = associd;
+					list_append(assoc_usage_list, a_usage);
+					last_id = associd;
+				} 
+				
+				a_usage->a_cpu += seconds;
+			}
+			list_iterator_destroy(tmp_itr);
+		}
+
 		/* Now put the lists into the usage tables */
 		list_iterator_reset(c_itr);
 		while((c_usage = list_next(c_itr))) {
-			c_usage->i_cpu = c_usage->total_time - c_usage->a_cpu -
-				c_usage->d_cpu - c_usage->r_cpu;
+			uint64_t total_used = 0;
+				
+			/* sanity check to make sure we don't have more
+			   allocated cpus than possible. */
+			if(c_usage->total_time < c_usage->a_cpu) {
+				char *start_char = xstrdup(ctime(&curr_start));
+				char *end_char = xstrdup(ctime(&curr_end));
+				error("We have more allocated time than is "
+				      "possible (%llu > %llu) for "
+				      "cluster %s(%d) from %s - %s",
+				      c_usage->a_cpu, c_usage->total_time,
+				      c_usage->name, c_usage->cpu_count,
+				      start_char, end_char);
+				xfree(start_char);
+				xfree(end_char);
+				c_usage->a_cpu = c_usage->total_time;
+			}
+
+			total_used = c_usage->a_cpu +
+				c_usage->d_cpu + c_usage->pd_cpu;
+
+			/* Make sure the total time we care about
+			   doesn't go over the limit */
+			if(c_usage->total_time < (total_used)) {
+				char *start_char = xstrdup(ctime(&curr_start));
+				char *end_char = xstrdup(ctime(&curr_end));
+				error("We have more time than is "
+				      "possible (%llu+%llu+%llu)(%llu) "
+				      "> %llu) for "
+				      "cluster %s(%d) from %s - %s",
+				      c_usage->a_cpu, c_usage->d_cpu,
+				      c_usage->pd_cpu, total_used, 
+				      c_usage->total_time,
+				      c_usage->name, c_usage->cpu_count,
+				      start_char, end_char);
+				xfree(start_char);
+				xfree(end_char);
+
+				/* set the planned down to 0 and the
+				   down to what ever is left from the
+				   allocated. */
+				c_usage->pd_cpu = 0;
+				c_usage->d_cpu = 
+					c_usage->total_time - c_usage->a_cpu;
+
+				total_used = c_usage->a_cpu +
+					c_usage->d_cpu + c_usage->pd_cpu;
+			}
+
+			c_usage->i_cpu = c_usage->total_time -
+				total_used - c_usage->r_cpu;
 			/* sanity check just to make sure we have a
 			 * legitimate time after we calulated
 			 * idle/reserved time put extra in the over
 			 * commit field
 			 */
-			
+/* 			info("%s got idle of %lld", c_usage->name,  */
+/* 			     (int64_t)c_usage->i_cpu); */
 			if((int64_t)c_usage->i_cpu < 0) {
 /* 				info("got %d %d %d", c_usage->r_cpu, */
 /* 				     c_usage->i_cpu, c_usage->o_cpu); */
@@ -523,26 +808,30 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			if(query) {
 				xstrfmtcat(query, 
 					   ", (%d, %d, '%s', %d, %d, "
-					   "%llu, %llu, %llu, %llu, %llu)",
+					   "%llu, %llu, %llu, "
+					   "%llu, %llu, %llu)",
 					   now, now, 
 					   c_usage->name, c_usage->start, 
 					   c_usage->cpu_count, c_usage->a_cpu,
-					   c_usage->d_cpu, c_usage->i_cpu,
-					   c_usage->o_cpu, c_usage->r_cpu); 
+					   c_usage->d_cpu, c_usage->pd_cpu,
+					   c_usage->i_cpu, c_usage->o_cpu,
+					   c_usage->r_cpu); 
 			} else {
 				xstrfmtcat(query, 
 					   "insert into %s (creation_time, "
 					   "mod_time, cluster, period_start, "
 					   "cpu_count, alloc_cpu_secs, "
-					   "down_cpu_secs, idle_cpu_secs, "
-					   "over_cpu_secs, resv_cpu_secs) "
+					   "down_cpu_secs, pdown_cpu_secs, "
+					   "idle_cpu_secs, over_cpu_secs, "
+					   "resv_cpu_secs) "
 					   "values (%d, %d, '%s', %d, %d, "
-					   "%llu, %llu, %llu, %llu, %llu)",
+					   "%llu, %llu, %llu, "
+					   "%llu, %llu, %llu)",
 					   cluster_hour_table, now, now, 
 					   c_usage->name, c_usage->start, 
 					   c_usage->cpu_count,
-					   c_usage->a_cpu,
-					   c_usage->d_cpu, c_usage->i_cpu,
+					   c_usage->a_cpu, c_usage->d_cpu, 
+					   c_usage->pd_cpu, c_usage->i_cpu,
 					   c_usage->o_cpu, c_usage->r_cpu); 
 			}
 		}
@@ -553,6 +842,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				   "mod_time=%d, cpu_count=VALUES(cpu_count), "
 				   "alloc_cpu_secs=VALUES(alloc_cpu_secs), "
 				   "down_cpu_secs=VALUES(down_cpu_secs), "
+				   "pdown_cpu_secs=VALUES(pdown_cpu_secs), "
 				   "idle_cpu_secs=VALUES(idle_cpu_secs), "
 				   "over_cpu_secs=VALUES(over_cpu_secs), "
 				   "resv_cpu_secs=VALUES(resv_cpu_secs)",
@@ -593,7 +883,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			xstrfmtcat(query, 
 				   " on duplicate key update "
 				   "mod_time=%d, "
-				   "alloc_cpu_secs=VALUES(alloc_cpu_secs)",
+				   "alloc_cpu_secs=VALUES(alloc_cpu_secs);",
 				   now);
 					   	
 			debug3("%d(%d) query\n%s",
@@ -635,7 +925,7 @@ extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			xstrfmtcat(query, 
 				   " on duplicate key update "
 				   "mod_time=%d, "
-				   "alloc_cpu_secs=VALUES(alloc_cpu_secs)",
+				   "alloc_cpu_secs=VALUES(alloc_cpu_secs);",
 				   now);
 					   	
 			debug3("%d(%d) query\n%s",
@@ -659,19 +949,23 @@ end_it:
 	xfree(suspend_str);	
 	xfree(event_str);	
 	xfree(job_str);
+	xfree(resv_str);
 	list_iterator_destroy(a_itr);
 	list_iterator_destroy(c_itr);
 	list_iterator_destroy(w_itr);
+	list_iterator_destroy(r_itr);
 		
 	list_destroy(assoc_usage_list);
 	list_destroy(cluster_usage_list);
 	list_destroy(wckey_usage_list);
+	list_destroy(resv_usage_list);
+
 /* 	info("stop start %s", ctime(&curr_start)); */
 /* 	info("stop end %s", ctime(&curr_end)); */
 	return rc;
 }
 extern int mysql_daily_rollup(mysql_conn_t *mysql_conn, 
-			      time_t start, time_t end)
+			      time_t start, time_t end, uint16_t archive_data)
 {
 	/* can't just add 86400 since daylight savings starts and ends every
 	 * once in a while
@@ -712,12 +1006,13 @@ extern int mysql_daily_rollup(mysql_conn_t *mysql_conn,
 		xstrfmtcat(query,
 			   "insert into %s (creation_time, "
 			   "mod_time, cluster, period_start, cpu_count, "
-			   "alloc_cpu_secs, down_cpu_secs, idle_cpu_secs, "
-			   "over_cpu_secs, resv_cpu_secs) "
+			   "alloc_cpu_secs, down_cpu_secs, pdown_cpu_secs, "
+			   "idle_cpu_secs, over_cpu_secs, resv_cpu_secs) "
 			   "select %d, %d, cluster, "
 			   "%d, @CPU:=MAX(cpu_count), "
 			   "@ASUM:=SUM(alloc_cpu_secs), "
 			   "@DSUM:=SUM(down_cpu_secs), "
+			   "@PDSUM:=SUM(pdown_cpu_secs), "
 			   "@ISUM:=SUM(idle_cpu_secs), "
 			   "@OSUM:=SUM(over_cpu_secs), "
 			   "@RSUM:=SUM(resv_cpu_secs) from %s where "
@@ -725,8 +1020,8 @@ extern int mysql_daily_rollup(mysql_conn_t *mysql_conn,
 			   "group by cluster on duplicate key update "
 			   "mod_time=%d, cpu_count=@CPU, "
 			   "alloc_cpu_secs=@ASUM, down_cpu_secs=@DSUM, "
-			   "idle_cpu_secs=@ISUM, over_cpu_secs=@OSUM, "
-			   "resv_cpu_secs=@RSUM;",
+			   "pdown_cpu_secs=@PDSUM, idle_cpu_secs=@ISUM, "
+			   "over_cpu_secs=@OSUM, resv_cpu_secs=@RSUM;",
 			   cluster_day_table, now, now, curr_start,
 			   cluster_hour_table,
 			   curr_end, curr_start, now);
@@ -735,8 +1030,8 @@ extern int mysql_daily_rollup(mysql_conn_t *mysql_conn,
 				   "insert into %s (creation_time, "
 				   "mod_time, id, period_start, "
 				   "alloc_cpu_secs) select %d, %d, "
-				   "id, %d, @ASUM:=SUM(alloc_cpu_secs) from %s "
-				   "where (period_start < %d && "
+				   "id, %d, @ASUM:=SUM(alloc_cpu_secs) "
+				   "from %s where (period_start < %d && "
 				   "period_start >= %d) "
 				   "group by id on duplicate key update "
 				   "mod_time=%d, alloc_cpu_secs=@ASUM;",
@@ -765,17 +1060,6 @@ extern int mysql_daily_rollup(mysql_conn_t *mysql_conn,
 		start_tm.tm_isdst = -1;
 		curr_end = mktime(&start_tm);
 	}
-	/* remove all data from suspend table that was older than
-	 * start. 
-	 */
-	query = xstrdup_printf("delete from %s where end < %d && end != 0",
-			       suspend_table, start);
-	rc = mysql_db_query(mysql_conn->db_conn, query);
-	xfree(query);
-	if(rc != SLURM_SUCCESS) {
-		error("Couldn't remove old suspend data");
-		return SLURM_ERROR;
-	}
 			       
 /* 	info("stop start %s", ctime(&curr_start)); */
 /* 	info("stop end %s", ctime(&curr_end)); */
@@ -783,7 +1067,7 @@ extern int mysql_daily_rollup(mysql_conn_t *mysql_conn,
 	return SLURM_SUCCESS;
 }
 extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn,
-				time_t start, time_t end)
+				time_t start, time_t end, uint16_t archive_data)
 {
 	int rc = SLURM_SUCCESS;
 	struct tm start_tm;
@@ -823,12 +1107,13 @@ extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn,
 		xstrfmtcat(query,
 			   "insert into %s (creation_time, "
 			   "mod_time, cluster, period_start, cpu_count, "
-			   "alloc_cpu_secs, down_cpu_secs, idle_cpu_secs, "
-			   "over_cpu_secs, resv_cpu_secs) "
+			   "alloc_cpu_secs, down_cpu_secs, pdown_cpu_secs, "
+			   "idle_cpu_secs, over_cpu_secs, resv_cpu_secs) "
 			   "select %d, %d, cluster, "
 			   "%d, @CPU:=MAX(cpu_count), "
 			   "@ASUM:=SUM(alloc_cpu_secs), "
 			   "@DSUM:=SUM(down_cpu_secs), "
+			   "@PDSUM:=SUM(pdown_cpu_secs), "
 			   "@ISUM:=SUM(idle_cpu_secs), "
 			   "@OSUM:=SUM(over_cpu_secs), "
 			   "@RSUM:=SUM(resv_cpu_secs) from %s where "
@@ -836,17 +1121,17 @@ extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn,
 			   "group by cluster on duplicate key update "
 			   "mod_time=%d, cpu_count=@CPU, "
 			   "alloc_cpu_secs=@ASUM, down_cpu_secs=@DSUM, "
-			   "idle_cpu_secs=@ISUM, over_cpu_secs=@OSUM, "
-			   "resv_cpu_secs=@RSUM;",
+			   "pdown_cpu_secs=@PDSUM, idle_cpu_secs=@ISUM, "
+			   "over_cpu_secs=@OSUM, resv_cpu_secs=@RSUM;",
 			   cluster_month_table, now, now, curr_start,
 			   cluster_day_table,
 			   curr_end, curr_start, now);
 		if(track_wckey) {
 			xstrfmtcat(query,
 				   "insert into %s (creation_time, mod_time, "
-				   "id, "
-				   "period_start, alloc_cpu_secs) select %d, "
-				   "%d, id, %d, @ASUM:=SUM(alloc_cpu_secs) "
+				   "id, period_start, alloc_cpu_secs) "
+				   "select %d, %d, id, %d, "
+				   "@ASUM:=SUM(alloc_cpu_secs) "
 				   "from %s where (period_start < %d && "
 				   "period_start >= %d) "
 				   "group by id on duplicate key update "
@@ -877,30 +1162,26 @@ extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn,
 		curr_end = mktime(&start_tm);
 	}
 
-	/* remove all data from event table that was older than
-	 * start. 
-	 */
-	query = xstrdup_printf("delete from %s where period_end < %d "
-			       "&& period_end != 0",
-			       event_table, start);
-	rc = mysql_db_query(mysql_conn->db_conn, query);
-	xfree(query);
-	if(rc != SLURM_SUCCESS) {
-		error("Couldn't remove old event data");
-		return SLURM_ERROR;
-	}
+	/* if we didn't ask for archive data return here and don't do
+	   anything extra just rollup */
+
+	if(!archive_data)
+		return SLURM_SUCCESS;
+
 	if(!slurmdbd_conf) 
 		return SLURM_SUCCESS;
 
 	memset(&arch_cond, 0, sizeof(arch_cond));
 	arch_cond.archive_dir = slurmdbd_conf->archive_dir;
+	arch_cond.archive_events = slurmdbd_conf->archive_events;
 	arch_cond.archive_jobs = slurmdbd_conf->archive_jobs;
 	arch_cond.archive_script = slurmdbd_conf->archive_script;
 	arch_cond.archive_steps = slurmdbd_conf->archive_steps;
-	arch_cond.job_purge = slurmdbd_conf->job_purge;
-	arch_cond.step_purge = slurmdbd_conf->step_purge;
+	arch_cond.archive_suspend = slurmdbd_conf->archive_suspend;
+	arch_cond.purge_event = slurmdbd_conf->purge_event;
+	arch_cond.purge_job = slurmdbd_conf->purge_job;
+	arch_cond.purge_step = slurmdbd_conf->purge_step;
+	arch_cond.purge_suspend = slurmdbd_conf->purge_suspend;
 
 	return mysql_jobacct_process_archive(mysql_conn, &arch_cond);
 }
-
-#endif
diff --git a/src/plugins/accounting_storage/mysql/mysql_rollup.h b/src/plugins/accounting_storage/mysql/mysql_rollup.h
index c1a4a72ce063ed7dc06ba9edc429c73d0cbf5ad3..bfa1bdbbbed3dae0bbbe8c42449a8a905ed1e456 100644
--- a/src/plugins/accounting_storage/mysql/mysql_rollup.h
+++ b/src/plugins/accounting_storage/mysql/mysql_rollup.h
@@ -7,10 +7,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -43,27 +44,11 @@
 
 #include "mysql_jobacct_process.h"
 
-#ifdef HAVE_MYSQL
-extern char *assoc_table;
-extern char *assoc_day_table;
-extern char *assoc_hour_table;
-extern char *assoc_month_table;
-extern char *cluster_day_table;
-extern char *cluster_hour_table;
-extern char *cluster_month_table;
-extern char *event_table;
-extern char *suspend_table;
-extern char *wckey_day_table;
-extern char *wckey_hour_table;
-extern char *wckey_month_table;
-
 extern int mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			       time_t start, time_t end);
 extern int mysql_daily_rollup(mysql_conn_t *mysql_conn,
-			      time_t start, time_t end);
+			      time_t start, time_t end, uint16_t archive_data);
 extern int mysql_monthly_rollup(mysql_conn_t *mysql_conn,
-			       time_t start, time_t end);
-
-#endif
-
+				time_t start, time_t end,
+				uint16_t archive_data);
 #endif
diff --git a/src/plugins/accounting_storage/none/Makefile.in b/src/plugins/accounting_storage/none/Makefile.in
index 976404aa96c8c3f804d67ae3c2886f7e5714e8a4..91582b3702c9e09f34d08844c32abb21243a6f37 100644
--- a/src/plugins/accounting_storage/none/Makefile.in
+++ b/src/plugins/accounting_storage/none/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -111,6 +115,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/accounting_storage/none/accounting_storage_none.c b/src/plugins/accounting_storage/none/accounting_storage_none.c
index a0a8555a93ce65371afe574a3998c0ed96a0ad77..6001cab980087fe51058f2b9f7762ec15935808c 100644
--- a/src/plugins/accounting_storage/none/accounting_storage_none.c
+++ b/src/plugins/accounting_storage/none/accounting_storage_none.c
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -145,6 +146,12 @@ extern int acct_storage_p_add_wckeys(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
+extern int acct_storage_p_add_reservation(void *db_conn,
+					  acct_reservation_rec_t *resv)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
 				       acct_user_cond_t *user_q,
 				       acct_user_rec_t *user)
@@ -187,6 +194,12 @@ extern List acct_storage_p_modify_wckeys(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
+extern int acct_storage_p_modify_reservation(void *db_conn,
+					     acct_reservation_rec_t *resv)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 				       acct_user_cond_t *user_q)
 {
@@ -230,6 +243,12 @@ extern List acct_storage_p_remove_wckeys(void *db_conn, uint32_t uid,
 	return NULL;
 }
 
+extern int acct_storage_p_remove_reservation(void *db_conn,
+					     acct_reservation_rec_t *resv)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_get_users(void *db_conn, uid_t uid,
 				     acct_user_cond_t *user_q)
 {
@@ -248,6 +267,11 @@ extern List acct_storage_p_get_clusters(void *db_conn, uid_t uid,
 	return NULL;
 }
 
+extern List acct_storage_p_get_config(void *db_conn)
+{
+	return NULL;
+}
+
 extern List acct_storage_p_get_associations(void *db_conn, uid_t uid,
 					    acct_association_cond_t *assoc_q)
 {
@@ -266,6 +290,12 @@ extern List acct_storage_p_get_wckeys(void *db_conn, uid_t uid,
 	return NULL;
 }
 
+extern List acct_storage_p_get_reservations(void *mysql_conn, uid_t uid,
+					    acct_reservation_cond_t *resv_cond)
+{
+	return NULL;
+}
+
 extern List acct_storage_p_get_txn(void *db_conn, uid_t uid,
 				   acct_txn_cond_t *txn_cond)
 {
@@ -282,7 +312,8 @@ extern int acct_storage_p_get_usage(void *db_conn, uid_t uid,
 }
 
 extern int acct_storage_p_roll_usage(void *db_conn, 
-				     time_t sent_start)
+				     time_t sent_start, time_t sent_end,
+				     uint16_t archive_data)
 {
 	int rc = SLURM_SUCCESS;
 
@@ -313,6 +344,7 @@ extern int clusteracct_storage_p_register_ctld(void *db_conn,
 
 extern int clusteracct_storage_p_cluster_procs(void *db_conn,
 					       char *cluster,
+					       char *cluster_nodes,
 					       uint32_t procs,
 					       time_t event_time)
 {
diff --git a/src/plugins/accounting_storage/pgsql/Makefile.am b/src/plugins/accounting_storage/pgsql/Makefile.am
index c0c2f0637e8ee69ab1cc2728cee5e11643dc36e7..b4d649b830dabe75efc99e367b2e2777c100fcce 100644
--- a/src/plugins/accounting_storage/pgsql/Makefile.am
+++ b/src/plugins/accounting_storage/pgsql/Makefile.am
@@ -6,6 +6,7 @@ PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
+if WITH_PGSQL
 pkglib_LTLIBRARIES = accounting_storage_pgsql.la
 
 # Pgsql storage plugin.
@@ -19,3 +20,7 @@ accounting_storage_pgsql_la_LIBADD = \
 force:
 $(accounting_storage_pgsql_la_LIBADD) : force
 	@cd `dirname $@` && $(MAKE) `basename $@`
+else
+EXTRA_accounting_storage_pgsql_la_SOURCES = accounting_storage_pgsql.c \
+		pgsql_jobacct_process.c pgsql_jobacct_process.h
+endif
diff --git a/src/plugins/accounting_storage/pgsql/Makefile.in b/src/plugins/accounting_storage/pgsql/Makefile.in
index 45edbd3354d049988aa8fd7476456a7c94ec5b67..64c6572f24de77288167e26a38e06c6859b21d5f 100644
--- a/src/plugins/accounting_storage/pgsql/Makefile.in
+++ b/src/plugins/accounting_storage/pgsql/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,18 +81,24 @@ am__installdirs = "$(DESTDIR)$(pkglibdir)"
 pkglibLTLIBRARIES_INSTALL = $(INSTALL)
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
 am__DEPENDENCIES_1 =
-accounting_storage_pgsql_la_DEPENDENCIES =  \
-	$(top_builddir)/src/database/libslurm_pgsql.la \
-	$(am__DEPENDENCIES_1)
-am_accounting_storage_pgsql_la_OBJECTS =  \
-	accounting_storage_pgsql_la-accounting_storage_pgsql.lo \
-	accounting_storage_pgsql_la-pgsql_jobacct_process.lo
+@WITH_PGSQL_TRUE@accounting_storage_pgsql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_pgsql.la \
+@WITH_PGSQL_TRUE@	$(am__DEPENDENCIES_1)
+am__accounting_storage_pgsql_la_SOURCES_DIST =  \
+	accounting_storage_pgsql.c pgsql_jobacct_process.c \
+	pgsql_jobacct_process.h
+@WITH_PGSQL_TRUE@am_accounting_storage_pgsql_la_OBJECTS = accounting_storage_pgsql_la-accounting_storage_pgsql.lo \
+@WITH_PGSQL_TRUE@	accounting_storage_pgsql_la-pgsql_jobacct_process.lo
+am__EXTRA_accounting_storage_pgsql_la_SOURCES_DIST =  \
+	accounting_storage_pgsql.c pgsql_jobacct_process.c \
+	pgsql_jobacct_process.h
 accounting_storage_pgsql_la_OBJECTS =  \
 	$(am_accounting_storage_pgsql_la_OBJECTS)
 accounting_storage_pgsql_la_LINK = $(LIBTOOL) --tag=CC \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
 	$(accounting_storage_pgsql_la_CFLAGS) $(CFLAGS) \
 	$(accounting_storage_pgsql_la_LDFLAGS) $(LDFLAGS) -o $@
+@WITH_PGSQL_TRUE@am_accounting_storage_pgsql_la_rpath = -rpath \
+@WITH_PGSQL_TRUE@	$(pkglibdir)
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -101,8 +111,10 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(accounting_storage_pgsql_la_SOURCES)
-DIST_SOURCES = $(accounting_storage_pgsql_la_SOURCES)
+SOURCES = $(accounting_storage_pgsql_la_SOURCES) \
+	$(EXTRA_accounting_storage_pgsql_la_SOURCES)
+DIST_SOURCES = $(am__accounting_storage_pgsql_la_SOURCES_DIST) \
+	$(am__EXTRA_accounting_storage_pgsql_la_SOURCES_DIST)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -116,6 +128,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -278,16 +294,19 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-pkglib_LTLIBRARIES = accounting_storage_pgsql.la
+@WITH_PGSQL_TRUE@pkglib_LTLIBRARIES = accounting_storage_pgsql.la
 
 # Pgsql storage plugin.
-accounting_storage_pgsql_la_SOURCES = accounting_storage_pgsql.c \
-		pgsql_jobacct_process.c pgsql_jobacct_process.h
+@WITH_PGSQL_TRUE@accounting_storage_pgsql_la_SOURCES = accounting_storage_pgsql.c \
+@WITH_PGSQL_TRUE@		pgsql_jobacct_process.c pgsql_jobacct_process.h
 
-accounting_storage_pgsql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-accounting_storage_pgsql_la_CFLAGS = $(PGSQL_CFLAGS)
-accounting_storage_pgsql_la_LIBADD = \
-	$(top_builddir)/src/database/libslurm_pgsql.la $(PGSQL_LIBS)
+@WITH_PGSQL_TRUE@accounting_storage_pgsql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+@WITH_PGSQL_TRUE@accounting_storage_pgsql_la_CFLAGS = $(PGSQL_CFLAGS)
+@WITH_PGSQL_TRUE@accounting_storage_pgsql_la_LIBADD = \
+@WITH_PGSQL_TRUE@	$(top_builddir)/src/database/libslurm_pgsql.la $(PGSQL_LIBS)
+
+@WITH_PGSQL_FALSE@EXTRA_accounting_storage_pgsql_la_SOURCES = accounting_storage_pgsql.c \
+@WITH_PGSQL_FALSE@		pgsql_jobacct_process.c pgsql_jobacct_process.h
 
 all: all-am
 
@@ -350,7 +369,7 @@ clean-pkglibLTLIBRARIES:
 	  rm -f "$${dir}/so_locations"; \
 	done
 accounting_storage_pgsql.la: $(accounting_storage_pgsql_la_OBJECTS) $(accounting_storage_pgsql_la_DEPENDENCIES) 
-	$(accounting_storage_pgsql_la_LINK) -rpath $(pkglibdir) $(accounting_storage_pgsql_la_OBJECTS) $(accounting_storage_pgsql_la_LIBADD) $(LIBS)
+	$(accounting_storage_pgsql_la_LINK) $(am_accounting_storage_pgsql_la_rpath) $(accounting_storage_pgsql_la_OBJECTS) $(accounting_storage_pgsql_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
@@ -582,9 +601,9 @@ uninstall-am: uninstall-pkglibLTLIBRARIES
 	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
 
 
-force:
-$(accounting_storage_pgsql_la_LIBADD) : force
-	@cd `dirname $@` && $(MAKE) `basename $@`
+@WITH_PGSQL_TRUE@force:
+@WITH_PGSQL_TRUE@$(accounting_storage_pgsql_la_LIBADD) : force
+@WITH_PGSQL_TRUE@	@cd `dirname $@` && $(MAKE) `basename $@`
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
 .NOEXPORT:
diff --git a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
index 4d4ab0aa22b5cd731ace13c8aefc4d42262b1525..060d6b4a6f1a6799f5a9059b355f2cb25e739140 100644
--- a/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
+++ b/src/plugins/accounting_storage/pgsql/accounting_storage_pgsql.c
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -76,7 +77,6 @@ const uint32_t plugin_version = 100;
 #ifndef HAVE_PGSQL
 typedef void PGconn;
 #else
-#define DEFAULT_ACCT_DB "slurm_acct_db"
 
 static pgsql_db_info_t *pgsql_db_info = NULL;
 static char *pgsql_db_name = NULL;
@@ -137,7 +137,7 @@ static pgsql_db_info_t *_pgsql_acct_create_db_info()
 	/* it turns out it is better if using defaults to let postgres
 	   handle them on it's own terms */
 	if(!db_info->port) {
-		db_info->port = 5432;
+		db_info->port = DEFAULT_PGSQL_PORT;
 		slurm_set_accounting_storage_port(db_info->port);
 	}
 	db_info->host = slurm_get_accounting_storage_host();
@@ -731,20 +731,20 @@ extern int init ( void )
 
 	location = slurm_get_accounting_storage_loc();
 	if(!location)
-		pgsql_db_name = xstrdup(DEFAULT_ACCT_DB);
+		pgsql_db_name = xstrdup(DEFAULT_ACCOUNTING_DB);
 	else {
 		int i = 0;
 		while(location[i]) {
 			if(location[i] == '.' || location[i] == '/') {
 				debug("%s doesn't look like a database "
 				      "name using %s",
-				      location, DEFAULT_ACCT_DB);
+				      location, DEFAULT_ACCOUNTING_DB);
 				break;
 			}
 			i++;
 		}
 		if(location[i]) {
-			pgsql_db_name = xstrdup(DEFAULT_ACCT_DB);
+			pgsql_db_name = xstrdup(DEFAULT_ACCOUNTING_DB);
 			xfree(location);
 		} else
 			pgsql_db_name = location;
@@ -852,7 +852,13 @@ extern int acct_storage_p_add_qos(PGconn *acct_pgsql_db, uint32_t uid,
 }
 
 extern int acct_storage_p_add_wckeys(PGconn *acct_pgsql_db, uint32_t uid, 
-				  List wckey_list)
+				     List wckey_list)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int acct_storage_p_add_reservation(PGconn *acct_pgsql_db, 
+					   acct_reservation_rec_t *resv)
 {
 	return SLURM_SUCCESS;
 }
@@ -900,6 +906,12 @@ extern List acct_storage_p_modify_wckeys(PGconn *acct_pgsql_db, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
+extern int acct_storage_p_modify_reservation(PGconn *acct_pgsql_db, 
+					     acct_reservation_rec_t *resv)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_remove_users(PGconn *acct_pgsql_db, uint32_t uid,
 					acct_user_cond_t *user_cond)
 {
@@ -944,6 +956,12 @@ extern List acct_storage_p_remove_wckeys(void *db_conn, uint32_t uid,
 	return NULL;
 }
 
+extern int acct_storage_p_remove_reservation(PGconn *acct_pgsql_db, 
+					     acct_reservation_rec_t *resv)
+{
+	return SLURM_SUCCESS;
+}
+
 extern List acct_storage_p_get_users(PGconn *acct_pgsql_db, uid_t uid,
 				     acct_user_cond_t *user_cond)
 {
@@ -962,6 +980,11 @@ extern List acct_storage_p_get_clusters(PGconn *acct_pgsql_db, uid_t uid,
 	return NULL;
 }
 
+extern List acct_storage_p_get_config(void *db_conn)
+{
+	return NULL;
+}
+
 extern List acct_storage_p_get_associations(PGconn *acct_pgsql_db, uid_t uid,
 					    acct_association_cond_t *assoc_cond)
 {
@@ -980,6 +1003,12 @@ extern List acct_storage_p_get_wckeys(void *db_conn, uid_t uid,
 	return NULL;
 }
 
+extern List acct_storage_p_get_reservations(void *mysql_conn, uid_t uid,
+					    acct_reservation_cond_t *resv_cond)
+{
+	return NULL;
+}
+
 extern List acct_storage_p_get_txn(PGconn *acct_pgsql_db, uid_t uid,
 				   acct_txn_cond_t *txn_cond)
 {
@@ -996,7 +1025,8 @@ extern int acct_storage_p_get_usage(PGconn *acct_pgsql_db, uid_t uid,
 }
 
 extern int acct_storage_p_roll_usage(PGconn *acct_pgsql_db, 
-				     time_t sent_start)
+				     time_t sent_start, time_t sent_end,
+				     uint16_t archive_data)
 {
 	int rc = SLURM_SUCCESS;
 
@@ -1077,6 +1107,7 @@ extern int clusteracct_storage_p_register_ctld(PGconn *acct_pgsql_db,
 
 extern int clusteracct_storage_p_cluster_procs(PGconn *acct_pgsql_db,
 					       char *cluster,
+					       char *cluster_nodes,
 					       uint32_t procs,
 					       time_t event_time)
 {
@@ -1170,7 +1201,6 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 	char *block_id = NULL;
 	char *query = NULL;
 	int reinit = 0;
-	char *wckey = NULL;
 
 	if (!job_ptr->details || !job_ptr->details->submit_time) {
 		error("jobacct_storage_p_job_start: "
@@ -1188,25 +1218,9 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 	priority = (job_ptr->priority == NO_VAL) ?
 		-1L : (long) job_ptr->priority;
 
-	if (job_ptr->name && job_ptr->name[0]) {
-		char *temp = NULL;
-		/* first set the jname to the job_ptr->name */
+	if (job_ptr->name && job_ptr->name[0]) 
 		jname = xstrdup(job_ptr->name);
-		/* then grep for " since that is the delimiter for
-		   the wckey */
-		if((temp = strchr(jname, '\"'))) {
-			/* if we have a wckey set the " to NULL to
-			 * end the jname */
-			temp[0] = '\0';
-			/* increment and copy the remainder */
-			temp++;
-			wckey = xstrdup(temp);
-		}
-	}
-
-	if(!jname || !jname[0]) {
-		/* free jname if something is allocated here */
-		xfree(jname);
+	else {
 		jname = xstrdup("allocation");
 		track_steps = 1;
 	}
@@ -1243,7 +1257,7 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 			xstrcat(query, "partition, ");
 		if(block_id) 
 			xstrcat(query, "blockid, ");
-		if(wckey) 
+		if(job_ptr->wckey) 
 			xstrcat(query, "wckey, ");
 		
 		xstrfmtcat(query, 
@@ -1261,8 +1275,8 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 			xstrfmtcat(query, "'%s', ", job_ptr->partition);
 		if(block_id) 
 			xstrfmtcat(query, "'%s', ", block_id);
-		if(wckey) 
-			xstrfmtcat(query, "\"%s\", ", wckey);
+		if(job_ptr->wckey) 
+			xstrfmtcat(query, "\"%s\", ", job_ptr->wckey);
 		
 		xstrfmtcat(query, 
 			   "%d, %d, %d, '%s', %u, %u, %u, %u, %u)",
@@ -1301,8 +1315,8 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 				   job_ptr->partition);
 		if(block_id)
 			xstrfmtcat(query, "blockid='%s', ", block_id);
-		if(wckey) 
-			xstrfmtcat(query, ", wckey=\"%s\"", wckey);
+		if(job_ptr->wckey) 
+			xstrfmtcat(query, ", wckey=\"%s\"", job_ptr->wckey);
 
 		xstrfmtcat(query, "start=%d, name='%s', state=%u, "
 			   "alloc_cpus=%u, associd=%d where id=%d",
@@ -1314,8 +1328,6 @@ extern int jobacct_storage_p_job_start(PGconn *acct_pgsql_db,
 	}
 	xfree(block_id);
 	xfree(jname);
-	xfree(wckey);
-
 	xfree(query);
 	
 	return rc;
@@ -1519,7 +1531,7 @@ extern int jobacct_storage_p_step_complete(PGconn *acct_pgsql_db,
 	
 	if (jobacct == NULL) {
 		/* JobAcctGather=jobacct_gather/none, no data to process */
-		bzero(&dummy_jobacct, sizeof(dummy_jobacct));
+		memset(&dummy_jobacct, 0, sizeof(dummy_jobacct));
 		jobacct = &dummy_jobacct;
 	}
 
diff --git a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
index 2f5d422dc28e46d1bb703286e49acc589cbfad34..12f6b8e5c2f9e6ae3466da6509003acf10f52042 100644
--- a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
+++ b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.c
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -390,6 +391,7 @@ no_cond:
 
 	for (i = 0; i < PQntuples(result); i++) {
 		char *id = PQgetvalue(result, i, JOB_REQ_ID);
+		bool job_ended = 0;
 
 		curr_id = atoi(PQgetvalue(result, i, JOB_REQ_JOBID));
 
@@ -432,24 +434,25 @@ no_cond:
 		job->submit = atoi(PQgetvalue(result, i, JOB_REQ_SUBMIT));
 		job->start = atoi(PQgetvalue(result, i, JOB_REQ_START));
 		job->end = atoi(PQgetvalue(result, i, JOB_REQ_ENDTIME));
-		job->suspended = atoi(PQgetvalue(result, i, JOB_REQ_SUSPENDED));
-		if(!job->end) {
-			job->elapsed = now - job->start;
-		} else {
-			job->elapsed = job->end - job->start;
+
+		/* since the job->end could be set later end it here */
+		if(job->end) {
+			job_ended = 1;
+			if(!job->start || (job->start > job->end))
+				job->start = job->end;
 		}
-		job->elapsed -= job->suspended;
 
-		if(job_cond && job_cond->usage_start) {
+		if(job_cond && !job_cond->without_usage_truncation
+		   && job_cond->usage_start) {
 			if(job->start && (job->start < job_cond->usage_start))
 				job->start = job_cond->usage_start;
 
-			if(!job->start && job->end)
-				job->start = job->end;
-
 			if(!job->end || job->end > job_cond->usage_end) 
 				job->end = job_cond->usage_end;
 
+			if(!job->start && job->end)
+				job->start = job->end;
+			
 			job->elapsed = job->end - job->start;
 
 			if(atoi(PQgetvalue(result, i, JOB_REQ_SUSPENDED))) {
@@ -501,14 +504,21 @@ no_cond:
 		} else {
 			job->suspended =
 				atoi(PQgetvalue(result, i, JOB_REQ_SUSPENDED));
-			if(!job->end) {
+
+			if(!job->start) {
+				job->elapsed = 0;
+			} else if(!job->end) {
 				job->elapsed = now - job->start;
 			} else {
 				job->elapsed = job->end - job->start;
 			}
+
 			job->elapsed -= job->suspended;
 		}
 
+		if((int)job->elapsed < 0)
+			job->elapsed = 0;
+
 		job->jobid = curr_id;
 		job->jobname = xstrdup(PQgetvalue(result, i, JOB_REQ_NAME));
 		job->gid = atoi(PQgetvalue(result, i, JOB_REQ_GID));
@@ -583,7 +593,9 @@ no_cond:
 		xfree(query);
 		for(j = 0; j < PQntuples(step_result); j++) {
 			step = create_jobacct_step_rec();
-			step->jobid = job->jobid;
+			step->job_ptr = job;
+			if(!job->first_step_ptr)
+				job->first_step_ptr = step;
 			list_append(job->steps, step);
 			step->stepid = atoi(
 				PQgetvalue(step_result, j, STEP_REQ_STEPID));
@@ -600,6 +612,27 @@ no_cond:
 				PQgetvalue(step_result, j, STEP_REQ_START));
 			step->end = atoi(
 				PQgetvalue(step_result, j, STEP_REQ_ENDTIME));
+
+			/* if the job has ended end the step also */
+			if(!step->end && job_ended) {
+				step->end = job->end;
+				step->state = job->state;
+			}
+
+			if(job_cond && !job_cond->without_usage_truncation
+			   && job_cond->usage_start) {
+				if(step->start 
+				   && (step->start < job_cond->usage_start))
+					step->start = job_cond->usage_start;
+				
+				if(!step->start && step->end)
+					step->start = step->end;
+				
+				if(!step->end 
+				   || (step->end > job_cond->usage_end)) 
+					step->end = job_cond->usage_end;
+			}
+
 			/* figure this out by start stop */
 			step->suspended = atoi(
 				PQgetvalue(step_result, j, STEP_REQ_SUSPENDED));
@@ -609,6 +642,10 @@ no_cond:
 				step->elapsed = step->end - step->start;
 			}
 			step->elapsed -= step->suspended;
+
+			if((int)step->elapsed < 0)
+				step->elapsed = 0;
+
 			step->user_cpu_sec = atoi(
 				PQgetvalue(step_result, j, STEP_REQ_USER_SEC));
 			step->user_cpu_usec = atoi(
diff --git a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h
index d90cd7b15d26ae1ff8b339c34416400d4a28f13a..d12ed58276f146183a5327ff5988a5e232981823 100644
--- a/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h
+++ b/src/plugins/accounting_storage/pgsql/pgsql_jobacct_process.h
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/accounting_storage/slurmdbd/Makefile.in b/src/plugins/accounting_storage/slurmdbd/Makefile.in
index ada513ec4528801e9d846c461df75a1a246e591d..4ac742cede1728656d65757cb44c443f171d550e 100644
--- a/src/plugins/accounting_storage/slurmdbd/Makefile.in
+++ b/src/plugins/accounting_storage/slurmdbd/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -112,6 +116,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
index 86093f83b4ab16f966d59d7d899e16816c9ef84b..1c26ffef9cc2b4476ef304fb80d561392ed0aab6 100644
--- a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
+++ b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
@@ -7,7 +7,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -175,6 +176,7 @@ extern int acct_storage_p_add_users(void *db_conn, uint32_t uid, List user_list)
 	dbd_list_msg_t get_msg;
 	int rc, resp_code;
 
+	memset(&get_msg, 0, sizeof(dbd_list_msg_t));
 	get_msg.my_list = user_list;
 
 	req.msg_type = DBD_ADD_USERS;
@@ -195,6 +197,7 @@ extern int acct_storage_p_add_coord(void *db_conn, uint32_t uid,
 	dbd_acct_coord_msg_t get_msg;
 	int rc, resp_code;
 
+	memset(&get_msg, 0, sizeof(dbd_acct_coord_msg_t));
 	get_msg.acct_list = acct_list;
 	get_msg.cond = user_cond;
 
@@ -215,6 +218,7 @@ extern int acct_storage_p_add_accts(void *db_conn, uint32_t uid, List acct_list)
 	dbd_list_msg_t get_msg;
 	int rc, resp_code;
 
+	memset(&get_msg, 0, sizeof(dbd_list_msg_t));
 	get_msg.my_list = acct_list;
 
 	req.msg_type = DBD_ADD_ACCOUNTS;
@@ -235,6 +239,7 @@ extern int acct_storage_p_add_clusters(void *db_conn, uint32_t uid,
 	dbd_list_msg_t get_msg;
 	int rc, resp_code;
 
+	memset(&get_msg, 0, sizeof(dbd_list_msg_t));
 	get_msg.my_list = cluster_list;
 
 	req.msg_type = DBD_ADD_CLUSTERS;
@@ -256,6 +261,7 @@ extern int acct_storage_p_add_associations(void *db_conn, uint32_t uid,
 	dbd_list_msg_t get_msg;
 	int rc, resp_code;
 
+	memset(&get_msg, 0, sizeof(dbd_list_msg_t));
 	get_msg.my_list = association_list;
 
 	req.msg_type = DBD_ADD_ASSOCS;
@@ -276,6 +282,7 @@ extern int acct_storage_p_add_qos(void *db_conn, uint32_t uid,
 	dbd_list_msg_t get_msg;
 	int rc, resp_code;
 
+	memset(&get_msg, 0, sizeof(dbd_list_msg_t));
 	get_msg.my_list = qos_list;
 
 	req.msg_type = DBD_ADD_QOS;
@@ -290,12 +297,13 @@ extern int acct_storage_p_add_qos(void *db_conn, uint32_t uid,
 }
 
 extern int acct_storage_p_add_wckeys(void *db_conn, uint32_t uid,
-				  List wckey_list)
+				     List wckey_list)
 {
 	slurmdbd_msg_t req;
 	dbd_list_msg_t get_msg;
 	int rc, resp_code;
 
+	memset(&get_msg, 0, sizeof(dbd_list_msg_t));
 	get_msg.my_list = wckey_list;
 
 	req.msg_type = DBD_ADD_WCKEYS;
@@ -309,6 +317,28 @@ extern int acct_storage_p_add_wckeys(void *db_conn, uint32_t uid,
 	return rc;
 }
 
+extern int acct_storage_p_add_reservation(void *db_conn, 
+					   acct_reservation_rec_t *resv)
+{
+	slurmdbd_msg_t req;
+	dbd_rec_msg_t get_msg;
+	int rc, resp_code;
+
+	memset(&get_msg, 0, sizeof(dbd_rec_msg_t));
+	get_msg.rec = resv;
+
+	req.msg_type = DBD_ADD_RESV;
+	req.data = &get_msg;
+
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
+	
+	if(resp_code != SLURM_SUCCESS)
+		rc = resp_code;
+
+	return rc;
+}
+
 extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
 					acct_user_cond_t *user_cond,
 					acct_user_rec_t *user)
@@ -319,6 +349,7 @@ extern List acct_storage_p_modify_users(void *db_conn, uint32_t uid,
 	List ret_list = NULL;
 	int rc;
 
+	memset(&get_msg, 0, sizeof(dbd_modify_msg_t));
 	get_msg.cond = user_cond;
 	get_msg.rec = user;
 
@@ -359,6 +390,7 @@ extern List acct_storage_p_modify_accounts(void *db_conn, uint32_t uid,
 	int rc;
 	List ret_list = NULL;
 
+	memset(&get_msg, 0, sizeof(dbd_modify_msg_t));
 	get_msg.cond = acct_cond;
 	get_msg.rec = acct;
 
@@ -400,6 +432,7 @@ extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
 	dbd_list_msg_t *got_msg;
 	List ret_list = NULL;
 
+	memset(&get_msg, 0, sizeof(dbd_modify_msg_t));
 	get_msg.cond = cluster_cond;
 	get_msg.rec = cluster;
 
@@ -444,6 +477,7 @@ extern List acct_storage_p_modify_associations(
 	List ret_list = NULL;
 
 
+	memset(&get_msg, 0, sizeof(dbd_modify_msg_t));
 	get_msg.cond = assoc_cond;
 	get_msg.rec = assoc;
 
@@ -484,6 +518,7 @@ extern List acct_storage_p_modify_qos(void *db_conn, uint32_t uid,
 	List ret_list = NULL;
 	int rc;
 
+	memset(&get_msg, 0, sizeof(dbd_modify_msg_t));
 	get_msg.cond = qos_cond;
 	get_msg.rec = qos;
 
@@ -524,6 +559,7 @@ extern List acct_storage_p_modify_wckeys(void *db_conn, uint32_t uid,
 	List ret_list = NULL;
 	int rc;
 
+	memset(&get_msg, 0, sizeof(dbd_modify_msg_t));
 	get_msg.cond = wckey_cond;
 	get_msg.rec = wckey;
 
@@ -554,6 +590,28 @@ extern List acct_storage_p_modify_wckeys(void *db_conn, uint32_t uid,
 	return ret_list;
 }
 
+extern int acct_storage_p_modify_reservation(void *db_conn, 
+					   acct_reservation_rec_t *resv)
+{
+	slurmdbd_msg_t req;
+	dbd_rec_msg_t get_msg;
+	int rc, resp_code;
+
+	memset(&get_msg, 0, sizeof(dbd_rec_msg_t));
+	get_msg.rec = resv;
+
+	req.msg_type = DBD_MODIFY_RESV;
+	req.data = &get_msg;
+
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
+	
+	if(resp_code != SLURM_SUCCESS)
+		rc = resp_code;
+
+	return rc;
+}
+
 extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 					acct_user_cond_t *user_cond)
 {
@@ -564,7 +622,7 @@ extern List acct_storage_p_remove_users(void *db_conn, uint32_t uid,
 	dbd_list_msg_t *got_msg;
 	List ret_list = NULL;
 
-
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = user_cond;
 
 	req.msg_type = DBD_REMOVE_USERS;
@@ -605,7 +663,7 @@ extern List acct_storage_p_remove_coord(void *db_conn, uint32_t uid,
 	dbd_list_msg_t *got_msg;
 	List ret_list = NULL;
 
-
+	memset(&get_msg, 0, sizeof(dbd_acct_coord_msg_t));
 	get_msg.acct_list = acct_list;
 	get_msg.cond = user_cond;
 
@@ -646,7 +704,7 @@ extern List acct_storage_p_remove_accts(void *db_conn, uint32_t uid,
 	dbd_list_msg_t *got_msg;
 	List ret_list = NULL;
 
-
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = acct_cond;
 
 	req.msg_type = DBD_REMOVE_ACCOUNTS;
@@ -686,7 +744,7 @@ extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid,
 	dbd_list_msg_t *got_msg;
 	List ret_list = NULL;
 
-
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = cluster_cond;
 
 	req.msg_type = DBD_REMOVE_CLUSTERS;
@@ -728,6 +786,7 @@ extern List acct_storage_p_remove_associations(
 	List ret_list = NULL;
 
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = assoc_cond;
 
 	req.msg_type = DBD_REMOVE_ASSOCS;
@@ -769,6 +828,7 @@ extern List acct_storage_p_remove_qos(
 	List ret_list = NULL;
 
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = qos_cond;
 
 	req.msg_type = DBD_REMOVE_QOS;
@@ -810,6 +870,7 @@ extern List acct_storage_p_remove_wckeys(
 	List ret_list = NULL;
 
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = wckey_cond;
 
 	req.msg_type = DBD_REMOVE_WCKEYS;
@@ -839,6 +900,28 @@ extern List acct_storage_p_remove_wckeys(
 	return ret_list;
 }
 
+extern int acct_storage_p_remove_reservation(void *db_conn, 
+					   acct_reservation_rec_t *resv)
+{
+	slurmdbd_msg_t req;
+	dbd_rec_msg_t get_msg;
+	int rc, resp_code;
+
+	memset(&get_msg, 0, sizeof(dbd_rec_msg_t));
+	get_msg.rec = resv;
+
+	req.msg_type = DBD_REMOVE_RESV;
+	req.data = &get_msg;
+
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
+					     &req, &resp_code);
+	
+	if(resp_code != SLURM_SUCCESS)
+		rc = resp_code;
+
+	return rc;
+}
+
 extern List acct_storage_p_get_users(void *db_conn, uid_t uid,
 				     acct_user_cond_t *user_cond)
 {
@@ -848,6 +931,7 @@ extern List acct_storage_p_get_users(void *db_conn, uid_t uid,
 	int rc;
 	List ret_list = NULL;
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = user_cond;
 	
 	req.msg_type = DBD_GET_USERS;
@@ -886,6 +970,7 @@ extern List acct_storage_p_get_accts(void *db_conn, uid_t uid,
 	int rc;
 	List ret_list = NULL;
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = acct_cond;
 	
 	req.msg_type = DBD_GET_ACCOUNTS;
@@ -925,6 +1010,7 @@ extern List acct_storage_p_get_clusters(void *db_conn, uid_t uid,
 	int rc;
 	List ret_list = NULL;
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = cluster_cond;
 	
 	req.msg_type = DBD_GET_CLUSTERS;
@@ -955,6 +1041,40 @@ extern List acct_storage_p_get_clusters(void *db_conn, uid_t uid,
 	return ret_list;
 }
 
+extern List acct_storage_p_get_config(void)
+{
+	slurmdbd_msg_t req, resp;
+	dbd_list_msg_t *got_msg;
+	int rc;
+	List ret_list = NULL;
+	
+	req.msg_type = DBD_GET_CONFIG;
+	req.data = NULL;
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
+
+	if (rc != SLURM_SUCCESS)
+		error("slurmdbd: DBD_GET_CONFIG failure: %m");
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_CONFIG) {
+		error("slurmdbd: response type not DBD_GOT_CONFIG: %u", 
+		      resp.msg_type);
+	} else {
+		got_msg = (dbd_list_msg_t *) resp.data;
+		ret_list = got_msg->my_list;
+		got_msg->my_list = NULL;
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
+	}
+
+	return ret_list;
+}
+
 extern List acct_storage_p_get_associations(void *db_conn, uid_t uid,
 					    acct_association_cond_t *assoc_cond)
 {
@@ -964,6 +1084,7 @@ extern List acct_storage_p_get_associations(void *db_conn, uid_t uid,
 	int rc;
 	List ret_list = NULL;
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = assoc_cond;
 	
 	req.msg_type = DBD_GET_ASSOCS;
@@ -1002,6 +1123,7 @@ extern List acct_storage_p_get_qos(void *db_conn, uid_t uid,
 	int rc;
 	List ret_list = NULL;
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = qos_cond;
 
 	req.msg_type = DBD_GET_QOS;
@@ -1047,6 +1169,7 @@ extern List acct_storage_p_get_wckeys(void *db_conn, uid_t uid,
 	int rc;
 	List ret_list = NULL;
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = wckey_cond;
 
 	req.msg_type = DBD_GET_WCKEYS;
@@ -1083,6 +1206,52 @@ extern List acct_storage_p_get_wckeys(void *db_conn, uid_t uid,
 	return ret_list;
 }
 
+extern List acct_storage_p_get_reservations(void *mysql_conn, uid_t uid,
+					    acct_reservation_cond_t *resv_cond)
+{
+	slurmdbd_msg_t req, resp;
+	dbd_cond_msg_t get_msg;
+	dbd_list_msg_t *got_msg;
+	int rc;
+	List ret_list = NULL;
+
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
+	get_msg.cond = resv_cond;
+
+	req.msg_type = DBD_GET_RESVS;
+	req.data = &get_msg;
+	rc = slurm_send_recv_slurmdbd_msg(SLURMDBD_VERSION, &req, &resp);
+
+	if (rc != SLURM_SUCCESS)
+		error("slurmdbd: DBD_GET_RESVS failure: %m");
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if(msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else
+			error("%s", msg->comment);
+		slurmdbd_free_rc_msg(SLURMDBD_VERSION, msg);
+	} else if (resp.msg_type != DBD_GOT_RESVS) {
+		error("slurmdbd: response type not DBD_GOT_RESVS: %u", 
+		      resp.msg_type);
+	} else {
+		got_msg = (dbd_list_msg_t *) resp.data;
+		/* do this just for this type since it could be called
+		 * multiple times, and if we send back and empty list
+		 * instead of no list we will only call this once.
+		 */
+		if(!got_msg->my_list)
+		        ret_list = list_create(NULL);
+		else 
+			ret_list = got_msg->my_list;
+		got_msg->my_list = NULL;
+		slurmdbd_free_list_msg(SLURMDBD_VERSION, got_msg);
+	}
+
+	return ret_list;
+}
+
 extern List acct_storage_p_get_txn(void *db_conn, uid_t uid,
 				   acct_txn_cond_t *txn_cond)
 {
@@ -1092,6 +1261,7 @@ extern List acct_storage_p_get_txn(void *db_conn, uid_t uid,
 	int rc;
 	List ret_list = NULL;
 
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
 	get_msg.cond = txn_cond;
 
 	req.msg_type = DBD_GET_TXN;
@@ -1133,6 +1303,7 @@ extern int acct_storage_p_get_usage(void *db_conn, uid_t uid,
 	List *my_list = NULL;
 	int rc;
 
+	memset(&get_msg, 0, sizeof(dbd_usage_msg_t));
 	get_msg.rec = in;
 	get_msg.start = start;
 	get_msg.end = end;
@@ -1195,13 +1366,17 @@ extern int acct_storage_p_get_usage(void *db_conn, uid_t uid,
 }
 
 extern int acct_storage_p_roll_usage(void *db_conn, 
-				     time_t sent_start)
+				     time_t sent_start, time_t sent_end,
+				     uint16_t archive_data)
 {
 	slurmdbd_msg_t req;
 	dbd_roll_usage_msg_t get_msg;
 	int rc, resp_code;
 	
+	memset(&get_msg, 0, sizeof(dbd_roll_usage_msg_t));
+	get_msg.end = sent_end;
 	get_msg.start = sent_start;
+	get_msg.archive_data = archive_data;
 
 	req.msg_type = DBD_ROLL_USAGE;
 
@@ -1212,7 +1387,8 @@ extern int acct_storage_p_roll_usage(void *db_conn,
 
 	if(resp_code != SLURM_SUCCESS)
 		rc = resp_code;
-	
+	else
+		info("SUCCESS");
 	return rc;
 }
 
@@ -1236,12 +1412,14 @@ extern int clusteracct_storage_p_node_down(void *db_conn,
 	else
 		my_reason = node_ptr->reason;
 
+	memset(&req, 0, sizeof(dbd_node_state_msg_t));
 	req.cluster_name = cluster;
 	req.cpu_count = cpus;
 	req.hostlist   = node_ptr->name;
 	req.new_state  = DBD_NODE_STATE_DOWN;
 	req.event_time = event_time;
 	req.reason     = my_reason;
+	req.state      = node_ptr->node_state;
 	msg.msg_type   = DBD_NODE_STATE;
 	msg.data       = &req;
 
@@ -1250,6 +1428,7 @@ extern int clusteracct_storage_p_node_down(void *db_conn,
 
 	return SLURM_SUCCESS;
 }
+
 extern int clusteracct_storage_p_node_up(void *db_conn,
 					 char *cluster,
 					 struct node_record *node_ptr,
@@ -1258,6 +1437,7 @@ extern int clusteracct_storage_p_node_up(void *db_conn,
 	slurmdbd_msg_t msg;
 	dbd_node_state_msg_t req;
 
+	memset(&req, 0, sizeof(dbd_node_state_msg_t));
 	req.cluster_name = cluster;
 	req.hostlist   = node_ptr->name;
 	req.new_state  = DBD_NODE_STATE_UP;
@@ -1274,6 +1454,7 @@ extern int clusteracct_storage_p_node_up(void *db_conn,
 
 extern int clusteracct_storage_p_cluster_procs(void *db_conn,
 					       char *cluster,
+					       char *cluster_nodes,
 					       uint32_t procs,
 					       time_t event_time)
 {
@@ -1282,7 +1463,9 @@ extern int clusteracct_storage_p_cluster_procs(void *db_conn,
 	int rc = SLURM_ERROR;
 
 	debug2("Sending info for cluster %s", cluster);
+	memset(&req, 0, sizeof(dbd_cluster_procs_msg_t));
 	req.cluster_name = cluster;
+	req.cluster_nodes = cluster_nodes;
 	req.proc_count   = procs;
 	req.event_time   = event_time;
 	msg.msg_type     = DBD_CLUSTER_PROCS;
@@ -1301,6 +1484,8 @@ extern int clusteracct_storage_p_register_ctld(void *db_conn,
 	dbd_register_ctld_msg_t req;
 	info("Registering slurmctld for cluster %s at port %u with slurmdbd.",
 	     cluster, port);
+	memset(&req, 0, sizeof(dbd_register_ctld_msg_t));
+
 	req.cluster_name = cluster;
 	req.port         = port;
 	msg.msg_type     = DBD_REGISTER_CTLD;
@@ -1323,6 +1508,8 @@ extern int clusteracct_storage_p_get_usage(
 	acct_cluster_rec_t *got_rec;
 	int rc;
 
+	memset(&get_msg, 0, sizeof(dbd_usage_msg_t));
+
 	get_msg.rec = cluster_rec;
 	get_msg.start = start;
 	get_msg.end = end;
@@ -1366,15 +1553,17 @@ extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 {
 	slurmdbd_msg_t msg, msg_rc;
 	dbd_job_start_msg_t req;
-	dbd_job_start_rc_msg_t *resp;
+	dbd_id_rc_msg_t *resp;
 	char *block_id = NULL;
 	int rc = SLURM_SUCCESS;
+	char temp_bit[BUF_SIZE];
 
 	if (!job_ptr->details || !job_ptr->details->submit_time) {
 		error("jobacct_storage_p_job_start: "
 		      "Not inputing this job, it has no submit time.");
 		return SLURM_ERROR;
 	}
+	memset(&req, 0, sizeof(dbd_job_start_msg_t));
 
 	req.alloc_cpus    = job_ptr->total_procs;
 	req.cluster       = cluster_name;
@@ -1384,6 +1573,11 @@ extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 	select_g_get_jobinfo(job_ptr->select_jobinfo, 
 			     SELECT_DATA_BLOCK_ID, 
 			     &block_id);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_NODE_CNT, 
+			     &req.alloc_nodes);
+#else
+	req.alloc_nodes      = job_ptr->node_cnt;
 #endif
 	req.block_id      = block_id;
 	req.db_index      = job_ptr->db_index;
@@ -1394,10 +1588,17 @@ extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 	req.job_state     = job_ptr->job_state & (~JOB_COMPLETING);
 	req.name          = job_ptr->name;
 	req.nodes         = job_ptr->nodes;
+	if(job_ptr->node_bitmap) 
+		req.node_inx = bit_fmt(temp_bit, sizeof(temp_bit), 
+				       job_ptr->node_bitmap);
+	
 	req.partition     = job_ptr->partition;
 	req.req_cpus      = job_ptr->num_procs;
+	req.resv_id       = job_ptr->resv_id;
 	req.priority      = job_ptr->priority;
 	req.start_time    = job_ptr->start_time;
+	req.timelimit     = job_ptr->time_limit;
+	req.wckey         = job_ptr->wckey;
 	if (job_ptr->details)
 		req.submit_time   = job_ptr->details->submit_time;
 	req.uid           = job_ptr->user_id;
@@ -1426,15 +1627,15 @@ extern int jobacct_storage_p_job_start(void *db_conn, char *cluster_name,
 			xfree(block_id);
 			return SLURM_ERROR;
 		}
-	} else if (msg_rc.msg_type != DBD_JOB_START_RC) {
-		error("slurmdbd: response type not DBD_GOT_JOBS: %u", 
+	} else if (msg_rc.msg_type != DBD_ID_RC) {
+		error("slurmdbd: response type not DBD_ID_RC: %u", 
 		      msg_rc.msg_type);
 	} else {
-		resp = (dbd_job_start_rc_msg_t *) msg_rc.data;
-		job_ptr->db_index = resp->db_index;
+		resp = (dbd_id_rc_msg_t *) msg_rc.data;
+		job_ptr->db_index = resp->id;
 		rc = resp->return_code;
 		//info("here got %d for return code", resp->return_code);
-		slurmdbd_free_job_start_rc_msg(SLURMDBD_VERSION, resp);
+		slurmdbd_free_id_rc_msg(SLURMDBD_VERSION, resp);
 	}
 	xfree(block_id);
 	
@@ -1457,6 +1658,8 @@ extern int jobacct_storage_p_job_complete(void *db_conn,
 		return SLURM_ERROR;
 	}
 
+	memset(&req, 0, sizeof(dbd_job_comp_msg_t));
+
 	req.assoc_id    = job_ptr->assoc_id;
 	req.db_index    = job_ptr->db_index;
 	req.end_time    = job_ptr->end_time;
@@ -1483,15 +1686,16 @@ extern int jobacct_storage_p_job_complete(void *db_conn,
 extern int jobacct_storage_p_step_start(void *db_conn,
 					struct step_record *step_ptr)
 {
-	uint32_t cpus = 0;
+	uint32_t cpus = 0, tasks = 0, nodes = 0, task_dist = 0;
 	char node_list[BUFFER_SIZE];
 	slurmdbd_msg_t msg;
 	dbd_step_start_msg_t req;
+	char temp_bit[BUF_SIZE];
 
 #ifdef HAVE_BG
 	char *ionodes = NULL;
 
-	cpus = step_ptr->job_ptr->num_procs;
+	cpus = tasks = step_ptr->job_ptr->num_procs;
 	select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, 
 			     SELECT_DATA_IONODES, 
 			     &ionodes);
@@ -1503,14 +1707,20 @@ extern int jobacct_storage_p_step_start(void *db_conn,
 		snprintf(node_list, BUFFER_SIZE, "%s",
 			 step_ptr->job_ptr->nodes);
 	}
-	
+	select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, 
+			     SELECT_DATA_NODE_CNT, 
+			     &nodes);
 #else
 	if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) {
-		cpus = step_ptr->job_ptr->total_procs;
+		cpus = tasks = step_ptr->job_ptr->total_procs;
 		snprintf(node_list, BUFFER_SIZE, "%s",
 			 step_ptr->job_ptr->nodes);
+		nodes = step_ptr->job_ptr->node_cnt;
 	} else {
-		cpus = step_ptr->step_layout->task_cnt;
+		cpus = step_ptr->cpu_count; 
+		tasks = step_ptr->step_layout->task_cnt;
+		nodes = step_ptr->step_layout->node_cnt;
+		task_dist = step_ptr->step_layout->task_dist;
 		snprintf(node_list, BUFFER_SIZE, "%s", 
 			 step_ptr->step_layout->node_list);
 	}
@@ -1523,17 +1733,26 @@ extern int jobacct_storage_p_step_start(void *db_conn,
 		      "Not inputing this job, it has no submit time.");
 		return SLURM_ERROR;
 	}
+	memset(&req, 0, sizeof(dbd_step_start_msg_t));
 
 	req.assoc_id    = step_ptr->job_ptr->assoc_id;
 	req.db_index    = step_ptr->job_ptr->db_index;
 	req.job_id      = step_ptr->job_ptr->job_id;
 	req.name        = step_ptr->name;
 	req.nodes       = node_list;
+	if(step_ptr->step_node_bitmap) 
+		req.node_inx = bit_fmt(temp_bit, sizeof(temp_bit), 
+				       step_ptr->step_node_bitmap);
+	req.node_cnt    = nodes;
 	req.start_time  = step_ptr->start_time;
 	if (step_ptr->job_ptr->details)
 		req.job_submit_time   = step_ptr->job_ptr->details->submit_time;
 	req.step_id     = step_ptr->step_id;
+	if (step_ptr->step_layout)
+		req.task_dist   = step_ptr->step_layout->task_dist;
+	req.task_dist   = task_dist;
 	req.total_procs = cpus;
+	req.total_tasks = tasks;
 
 	msg.msg_type    = DBD_STEP_START;
 	msg.data        = &req;
@@ -1550,7 +1769,7 @@ extern int jobacct_storage_p_step_start(void *db_conn,
 extern int jobacct_storage_p_step_complete(void *db_conn,
 					   struct step_record *step_ptr)
 {
-	uint32_t cpus = 0;
+	uint32_t cpus = 0, tasks = 0;
 	char node_list[BUFFER_SIZE];
 	slurmdbd_msg_t msg;
 	dbd_step_comp_msg_t req;
@@ -1558,7 +1777,7 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 #ifdef HAVE_BG
 	char *ionodes = NULL;
 
-	cpus = step_ptr->job_ptr->num_procs;
+	cpus = tasks = step_ptr->job_ptr->num_procs;
 	select_g_get_jobinfo(step_ptr->job_ptr->select_jobinfo, 
 			     SELECT_DATA_IONODES, 
 			     &ionodes);
@@ -1573,11 +1792,12 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 	
 #else
 	if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) {
-		cpus = step_ptr->job_ptr->total_procs;
+		cpus = tasks = step_ptr->job_ptr->total_procs;
 		snprintf(node_list, BUFFER_SIZE, "%s", 
 			 step_ptr->job_ptr->nodes);
 	} else {
-		cpus = step_ptr->step_layout->task_cnt;
+		cpus = step_ptr->cpu_count; 
+		tasks = step_ptr->step_layout->task_cnt;
 		snprintf(node_list, BUFFER_SIZE, "%s", 
 			 step_ptr->step_layout->node_list);
 	}
@@ -1591,6 +1811,8 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 		return SLURM_ERROR;
 	}
 
+	memset(&req, 0, sizeof(dbd_step_comp_msg_t));
+
 	req.assoc_id    = step_ptr->job_ptr->assoc_id;
 	req.db_index    = step_ptr->job_ptr->db_index;
 	req.end_time    = time(NULL);	/* called at step completion */
@@ -1603,6 +1825,7 @@ extern int jobacct_storage_p_step_complete(void *db_conn,
 		req.job_submit_time   = step_ptr->job_ptr->details->submit_time;
 	req.step_id     = step_ptr->step_id;
 	req.total_procs = cpus;
+	req.total_tasks = tasks;
 
 	msg.msg_type    = DBD_STEP_COMPLETE;
 	msg.data        = &req;
@@ -1622,6 +1845,8 @@ extern int jobacct_storage_p_suspend(void *db_conn,
 	slurmdbd_msg_t msg;
 	dbd_job_suspend_msg_t req;
 
+	memset(&req, 0, sizeof(dbd_job_suspend_msg_t));
+	
 	req.assoc_id     = job_ptr->assoc_id;
 	req.job_id       = job_ptr->job_id;
 	req.db_index     = job_ptr->db_index;
@@ -1652,6 +1877,8 @@ extern List jobacct_storage_p_get_jobs_cond(void *db_conn, uid_t uid,
 	int rc;
 	List job_list = NULL;
 		
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
+
 	get_msg.cond = job_cond;
 
 	req.msg_type = DBD_GET_JOBS_COND;
@@ -1692,6 +1919,8 @@ extern int jobacct_storage_p_archive(void *db_conn,
 	dbd_cond_msg_t msg;
 	int rc = SLURM_SUCCESS;
 
+	memset(&msg, 0, sizeof(dbd_cond_msg_t));
+
 	msg.cond     = arch_cond;
 
 	req.msg_type = DBD_ARCHIVE_DUMP;
@@ -1726,7 +1955,7 @@ extern int jobacct_storage_p_archive_load(void *db_conn,
 {
 	slurmdbd_msg_t req, resp;
 	int rc = SLURM_SUCCESS;
-
+	
 	req.msg_type = DBD_ARCHIVE_LOAD;
 	req.data     = arch_rec;
 
@@ -1754,21 +1983,7 @@ extern int jobacct_storage_p_archive_load(void *db_conn,
 extern int acct_storage_p_update_shares_used(void *db_conn,
 					     List shares_used)
 {
-	slurmdbd_msg_t req;
-	dbd_list_msg_t shares_used_msg;
-	int rc, resp_code;
-
-	shares_used_msg.my_list = shares_used;
-
-	req.msg_type = DBD_UPDATE_SHARES_USED;
-	req.data = &shares_used_msg;
-	rc = slurm_send_slurmdbd_recv_rc_msg(SLURMDBD_VERSION,
-					     &req, &resp_code);
-
-	if(resp_code != SLURM_SUCCESS)
-		rc = resp_code;
-
-	return rc;
+	return SLURM_SUCCESS;
 }
 
 extern int acct_storage_p_flush_jobs_on_cluster(void *db_conn, char *cluster,
@@ -1779,9 +1994,13 @@ extern int acct_storage_p_flush_jobs_on_cluster(void *db_conn, char *cluster,
 
 	info("Ending any jobs in accounting that were running when controller "
 	     "went down on cluster %s", cluster);
+
+	memset(&req, 0, sizeof(dbd_cluster_procs_msg_t));
+
 	req.cluster_name = cluster;
 	req.proc_count   = 0;
 	req.event_time   = event_time;
+
 	msg.msg_type     = DBD_FLUSH_JOBS;
 	msg.data         = &req;
 
diff --git a/src/plugins/auth/Makefile.in b/src/plugins/auth/Makefile.in
index 03dbec9fea2c219ab5ef160ff64328d4ae3ee8c8..1165aa6a8761e6bbf3e9f90098abf57a0d0a86c1 100644
--- a/src/plugins/auth/Makefile.in
+++ b/src/plugins/auth/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/auth/authd/Makefile.in b/src/plugins/auth/authd/Makefile.in
index ff45e95f018b63607ae4f8bca72e290ed641a967..632c5062790c962ad4825cca8062ba0564bee3e6 100644
--- a/src/plugins/auth/authd/Makefile.in
+++ b/src/plugins/auth/authd/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -110,6 +114,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/auth/authd/auth_authd.c b/src/plugins/auth/authd/auth_authd.c
index 06ef977592111e53db082f9f50abbeaa91ce97c4..f026204f5bb87a8ee4574a339d60f96f9478c228 100644
--- a/src/plugins/auth/authd/auth_authd.c
+++ b/src/plugins/auth/authd/auth_authd.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/auth/munge/Makefile.in b/src/plugins/auth/munge/Makefile.in
index 9a5272ebaed0c099f29540c4af415ce168e23016..73c7e2015a3091e62baf70bc373c1945b46cc231 100644
--- a/src/plugins/auth/munge/Makefile.in
+++ b/src/plugins/auth/munge/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -111,6 +115,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/auth/munge/auth_munge.c b/src/plugins/auth/munge/auth_munge.c
index aa01a42dd87485423642a9058e37a946a2830d18..d43f05a34b05a303ca20e7e903186a35f855fc57 100644
--- a/src/plugins/auth/munge/auth_munge.c
+++ b/src/plugins/auth/munge/auth_munge.c
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  auth_munge.c - SLURM auth implementation via Chris Dunlap's Munge
- *  $Id: auth_munge.c 14770 2008-08-14 18:24:35Z da $
+ *  $Id: auth_munge.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov> 
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/auth/none/Makefile.in b/src/plugins/auth/none/Makefile.in
index b62381dd421a0087ebe2626696b421872f8b8674..23f553c63d6d25b10063df0c5f2d529586ceee20 100644
--- a/src/plugins/auth/none/Makefile.in
+++ b/src/plugins/auth/none/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/auth/none/auth_none.c b/src/plugins/auth/none/auth_none.c
index 307fcb2bbe3f95c94f4959be2d04dd648402db0f..ee61c54d26ed4c66891848358343d6e3b5ecb395 100644
--- a/src/plugins/auth/none/auth_none.c
+++ b/src/plugins/auth/none/auth_none.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/checkpoint/Makefile.am b/src/plugins/checkpoint/Makefile.am
index 0527fc065beed577bd95a06deaf5f48eb6f045b7..4d42811d12c165976e94794bf58251d3baa1ecbe 100644
--- a/src/plugins/checkpoint/Makefile.am
+++ b/src/plugins/checkpoint/Makefile.am
@@ -1,3 +1,3 @@
 # Makefile for checkpoint plugins
 
-SUBDIRS = aix none ompi xlch
+SUBDIRS = aix blcr none ompi xlch
diff --git a/src/plugins/checkpoint/Makefile.in b/src/plugins/checkpoint/Makefile.in
index 076aca867a522a4285d9dab536c0882cb5114841..c6872fe7d8e1c5ba53ee25073892fdf0ae173fb1 100644
--- a/src/plugins/checkpoint/Makefile.in
+++ b/src/plugins/checkpoint/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -250,7 +258,7 @@ target_os = @target_os@
 target_vendor = @target_vendor@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = aix none ompi xlch
+SUBDIRS = aix blcr none ompi xlch
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/checkpoint/aix/Makefile.in b/src/plugins/checkpoint/aix/Makefile.in
index bc5e93f5fa1cb487d1720b077c559fea3d76955f..a7a02b70efd72fb4912c5b622b4216904ad6bda2 100644
--- a/src/plugins/checkpoint/aix/Makefile.in
+++ b/src/plugins/checkpoint/aix/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -114,6 +118,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/checkpoint/aix/checkpoint_aix.c b/src/plugins/checkpoint/aix/checkpoint_aix.c
index fef01c9f3dd7cecff104ae4215d68edff5ea8b69..aa748abce8b07514435c320e81c70e3f5888c046 100644
--- a/src/plugins/checkpoint/aix/checkpoint_aix.c
+++ b/src/plugins/checkpoint/aix/checkpoint_aix.c
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  *  checkpoint_aix.c - AIX slurm checkpoint plugin.
- *  $Id: checkpoint_aix.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: checkpoint_aix.c 17706 2009-06-03 23:47:58Z jette $
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -56,6 +58,7 @@
 #include <slurm/slurm.h>
 #include <slurm/slurm_errno.h>
 
+#include "src/common/slurm_xlator.h"
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/pack.h"
@@ -65,6 +68,13 @@
 #include "src/slurmctld/agent.h"
 #include "src/slurmctld/slurmctld.h"
 
+/* These are defined here so when we link with something other than
+ * the slurmctld we will have these symbols defined.  They will get
+ * overwritten when linking with the slurmctld. 
+ */
+struct node_record *node_record_table_ptr = NULL;
+int node_record_count = 0;
+
 struct check_job_info {
 	uint16_t disabled;	/* counter, checkpointable only if zero */
 	uint16_t node_cnt;
@@ -131,7 +141,7 @@ static void  _ckpt_signal_step(struct ckpt_timeout_info *rec);
  */
 const char plugin_name[]       	= "Checkpoint AIX plugin";
 const char plugin_type[]       	= "checkpoint/aix";
-const uint32_t plugin_version	= 90;
+const uint32_t plugin_version	= 100;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -176,14 +186,16 @@ extern int fini ( void )
  * The remainder of this file implements the standard SLURM checkpoint API.
  */
 
-extern int slurm_ckpt_op ( uint16_t op, uint16_t data,
-		struct step_record * step_ptr, time_t * event_time, 
-		uint32_t *error_code, char **error_msg )
+extern int slurm_ckpt_op (uint32_t job_id, uint32_t step_id, 
+			  struct step_record *step_ptr, uint16_t op,
+			  uint16_t data, char *image_dir, time_t * event_time, 
+			  uint32_t *error_code, char **error_msg )
 {
 	int rc = SLURM_SUCCESS;
 	struct check_job_info *check_ptr;
 
-	xassert(step_ptr);
+	if (!step_ptr)
+		return ESLURM_INVALID_JOB_ID;
 	check_ptr = (struct check_job_info *) step_ptr->check_job;
 	xassert(check_ptr);
 
@@ -507,8 +519,24 @@ static void _ckpt_dequeue_timeout(uint32_t job_id, uint32_t step_id,
 	slurm_mutex_unlock(&ckpt_agent_mutex);
 }
 
-extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, uint32_t task_id,
-				  time_t event_time, uint32_t error_code, char *error_msg )
+extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, 
+				  uint32_t task_id, time_t event_time,
+				  uint32_t error_code, char *error_msg )
+{
+	return SLURM_SUCCESS;
+}
+
+extern int slurm_ckpt_stepd_prefork(void *slurmd_job)
 {
 	return SLURM_SUCCESS;
 }
+
+extern int slurm_ckpt_signal_tasks(void *slurmd_job)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
+
+extern int slurm_ckpt_restart_task(void *slurmd_job, char *image_dir, int gtid)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
diff --git a/src/plugins/checkpoint/blcr/Makefile.am b/src/plugins/checkpoint/blcr/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..bf629b6ca2c9ecaf93e4ea57a5246f973c56b2a8
--- /dev/null
+++ b/src/plugins/checkpoint/blcr/Makefile.am
@@ -0,0 +1,22 @@
+# Makefile for checkpoint/blcr plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+if WITH_BLCR
+pkglib_LTLIBRARIES = checkpoint_blcr.la
+checkpoint_blcr_la_SOURCES = checkpoint_blcr.c
+checkpoint_blcr_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+
+dist_bin_SCRIPTS = cr_checkpoint.sh cr_restart.sh
+
+force:
+
+$(checkpoint_blcr_LDADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+else
+EXTRA_checkpoint_blcr_la_SOURCES = checkpoint_blcr.c
+endif
diff --git a/src/plugins/checkpoint/blcr/Makefile.in b/src/plugins/checkpoint/blcr/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..df674f5f0529064bf4a8d6397bc1d3d4dbb4d518
--- /dev/null
+++ b/src/plugins/checkpoint/blcr/Makefile.in
@@ -0,0 +1,607 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for checkpoint/blcr plugin
+
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/checkpoint/blcr
+DIST_COMMON = $(am__dist_bin_SCRIPTS_DIST) $(srcdir)/Makefile.am \
+	$(srcdir)/Makefile.in $(srcdir)/cr_checkpoint.sh.in \
+	$(srcdir)/cr_restart.sh.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES = cr_checkpoint.sh cr_restart.sh
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
+am__installdirs = "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(bindir)"
+pkglibLTLIBRARIES_INSTALL = $(INSTALL)
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+checkpoint_blcr_la_LIBADD =
+am__checkpoint_blcr_la_SOURCES_DIST = checkpoint_blcr.c
+@WITH_BLCR_TRUE@am_checkpoint_blcr_la_OBJECTS = checkpoint_blcr.lo
+am__EXTRA_checkpoint_blcr_la_SOURCES_DIST = checkpoint_blcr.c
+checkpoint_blcr_la_OBJECTS = $(am_checkpoint_blcr_la_OBJECTS)
+checkpoint_blcr_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(checkpoint_blcr_la_LDFLAGS) $(LDFLAGS) -o $@
+@WITH_BLCR_TRUE@am_checkpoint_blcr_la_rpath = -rpath $(pkglibdir)
+am__dist_bin_SCRIPTS_DIST = cr_checkpoint.sh cr_restart.sh
+dist_binSCRIPT_INSTALL = $(INSTALL_SCRIPT)
+SCRIPTS = $(dist_bin_SCRIPTS)
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(checkpoint_blcr_la_SOURCES) \
+	$(EXTRA_checkpoint_blcr_la_SOURCES)
+DIST_SOURCES = $(am__checkpoint_blcr_la_SOURCES_DIST) \
+	$(am__EXTRA_checkpoint_blcr_la_SOURCES_DIST)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+@WITH_BLCR_TRUE@pkglib_LTLIBRARIES = checkpoint_blcr.la
+@WITH_BLCR_TRUE@checkpoint_blcr_la_SOURCES = checkpoint_blcr.c
+@WITH_BLCR_TRUE@checkpoint_blcr_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+@WITH_BLCR_TRUE@dist_bin_SCRIPTS = cr_checkpoint.sh cr_restart.sh
+@WITH_BLCR_FALSE@EXTRA_checkpoint_blcr_la_SOURCES = checkpoint_blcr.c
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/plugins/checkpoint/blcr/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  src/plugins/checkpoint/blcr/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+cr_checkpoint.sh: $(top_builddir)/config.status $(srcdir)/cr_checkpoint.sh.in
+	cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+cr_restart.sh: $(top_builddir)/config.status $(srcdir)/cr_restart.sh.in
+	cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  if test -f $$p; then \
+	    f=$(am__strip_dir) \
+	    echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	    $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \
+	  else :; fi; \
+	done
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  p=$(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+checkpoint_blcr.la: $(checkpoint_blcr_la_OBJECTS) $(checkpoint_blcr_la_DEPENDENCIES) 
+	$(checkpoint_blcr_la_LINK) $(am_checkpoint_blcr_la_rpath) $(checkpoint_blcr_la_OBJECTS) $(checkpoint_blcr_la_LIBADD) $(LIBS)
+install-dist_binSCRIPTS: $(dist_bin_SCRIPTS)
+	@$(NORMAL_INSTALL)
+	test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+	@list='$(dist_bin_SCRIPTS)'; for p in $$list; do \
+	  if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+	  if test -f $$d$$p; then \
+	    f=`echo "$$p" | sed 's|^.*/||;$(transform)'`; \
+	    echo " $(dist_binSCRIPT_INSTALL) '$$d$$p' '$(DESTDIR)$(bindir)/$$f'"; \
+	    $(dist_binSCRIPT_INSTALL) "$$d$$p" "$(DESTDIR)$(bindir)/$$f"; \
+	  else :; fi; \
+	done
+
+uninstall-dist_binSCRIPTS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(dist_bin_SCRIPTS)'; for p in $$list; do \
+	  f=`echo "$$p" | sed 's|^.*/||;$(transform)'`; \
+	  echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \
+	  rm -f "$(DESTDIR)$(bindir)/$$f"; \
+	done
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint_blcr.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES) $(SCRIPTS)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)" "$(DESTDIR)$(bindir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-dist_binSCRIPTS install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-dist_binSCRIPTS uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am \
+	install-dist_binSCRIPTS install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-man install-pdf \
+	install-pdf-am install-pkglibLTLIBRARIES install-ps \
+	install-ps-am install-strip installcheck installcheck-am \
+	installdirs maintainer-clean maintainer-clean-generic \
+	mostlyclean mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \
+	uninstall-am uninstall-dist_binSCRIPTS \
+	uninstall-pkglibLTLIBRARIES
+
+
+@WITH_BLCR_TRUE@force:
+
+@WITH_BLCR_TRUE@$(checkpoint_blcr_LDADD) : force
+@WITH_BLCR_TRUE@	@cd `dirname $@` && $(MAKE) `basename $@`
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/checkpoint/blcr/checkpoint_blcr.c b/src/plugins/checkpoint/blcr/checkpoint_blcr.c
new file mode 100644
index 0000000000000000000000000000000000000000..ba66d0f7fab36e27a6aa5f93eab87807fce8f060
--- /dev/null
+++ b/src/plugins/checkpoint/blcr/checkpoint_blcr.c
@@ -0,0 +1,715 @@
+/*****************************************************************************\
+ *  checkpoint_blcr.c - BLCR slurm checkpoint plugin.
+ *  $Id: checkpoint_blcr.c 0001 2008-12-29 16:50:11Z hjcao $
+ *****************************************************************************
+ *  Derived from checkpoint_aix.c
+ *  Copyright (C) 2007-2009 National University of Defense Technology, China.
+ *  Written by Hongia Cao.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#if HAVE_STDINT_H
+#  include <stdint.h>
+#endif
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>
+#endif
+#ifdef WITH_PTHREADS
+#  include <pthread.h>
+#endif
+
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <unistd.h>
+#include <libgen.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <slurm/slurm.h>
+#include <slurm/slurm_errno.h>
+
+#include "src/common/list.h"
+#include "src/common/log.h"
+#include "src/common/pack.h"
+#include "src/common/xassert.h"
+#include "src/common/xstring.h"
+#include "src/common/xmalloc.h"
+#include "src/slurmctld/agent.h"
+#include "src/slurmctld/slurmctld.h"
+#include "src/slurmctld/locks.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
+
+#define MAX_PATH_LEN 1024
+
+struct check_job_info {
+	uint16_t disabled;	/* counter, checkpointable only if zero */
+	time_t   time_stamp;	/* begin or end checkpoint time */
+	uint32_t error_code;
+	char    *error_msg;
+};
+
+struct ckpt_req {
+	uint32_t gid;
+	uint32_t uid;
+	uint32_t job_id;
+	uint32_t step_id;
+	time_t begin_time;
+	uint16_t wait;
+	char *image_dir;
+	char *nodelist;
+	uint16_t sig_done;
+};
+
+static void _send_sig(uint32_t job_id, uint32_t step_id, uint16_t signal, 
+		      char *nodelist);
+static void _send_sig(uint32_t job_id, uint32_t step_id,
+		      uint16_t signal, char *nodelist);
+static void *_ckpt_agent_thr(void *arg);
+static void _ckpt_req_free(void *ptr);
+static int _on_ckpt_complete(uint32_t group_id, uint32_t user_id,
+			     uint32_t job_id, uint32_t step_id,
+			     char *image_dir, uint32_t error_code);
+
+
+/* path to shell scripts */
+static char *scch_path = SLURM_PREFIX "/sbin/scch";
+static char *cr_checkpoint_path = SLURM_PREFIX "/bin/cr_checkpoint.sh";
+static char *cr_restart_path = SLURM_PREFIX "/bin/cr_restart.sh";
+
+static uint32_t ckpt_agent_jobid = 0;
+static uint16_t ckpt_agent_count = 0;
+static pthread_mutex_t ckpt_agent_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t ckpt_agent_cond = PTHREAD_COND_INITIALIZER;
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *	<application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "checkpoint" for SLURM checkpoint) and <method>
+ * is a description of how this plugin satisfies that application.  SLURM will
+ * only load checkpoint plugins if the plugin_type string has a 
+ * prefix of "checkpoint/".
+ *
+ * plugin_version - an unsigned 32-bit integer giving the version number
+ * of the plugin.  If major and minor revisions are desired, the major
+ * version number may be multiplied by a suitable magnitude constant such
+ * as 100 or 1000.  Various SLURM versions will likely require a certain
+ * minimum versions for their plugins as the checkpoint API matures.
+ */
+const char plugin_name[]       	= "BLCR checkpoint plugin";
+const char plugin_type[]       	= "checkpoint/blcr";
+const uint32_t plugin_version	= 100;
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called.  Put global initialization here.
+ */
+extern int init ( void )
+{
+	info("checkpoint/blcr init");
+	return SLURM_SUCCESS;
+}
+
+
+extern int fini ( void )
+{
+	info("checkpoint/blcr fini");
+	return SLURM_SUCCESS;
+}
+
+/*
+ * The remainder of this file implements the standard SLURM checkpoint API.
+ */
+extern int slurm_ckpt_op (uint32_t job_id, uint32_t step_id, 
+			  struct step_record *step_ptr, uint16_t op,
+			  uint16_t data, char *image_dir, time_t * event_time, 
+			  uint32_t *error_code, char **error_msg )
+{
+	int rc = SLURM_SUCCESS;
+	struct check_job_info *check_ptr;
+	uint16_t done_sig = 0;
+	struct job_record *job_ptr;
+	struct node_record *node_ptr;
+	pthread_attr_t attr;
+	pthread_t ckpt_agent_tid = 0;
+	char *nodelist;
+	struct ckpt_req *req_ptr;
+
+	/* job/step checked already */
+	job_ptr = find_job_record(job_id);
+	if (!job_ptr)
+		return ESLURM_INVALID_JOB_ID;
+	if (step_id == SLURM_BATCH_SCRIPT) {
+		check_ptr = (struct check_job_info *)job_ptr->check_job;
+		node_ptr = find_first_node_record(job_ptr->node_bitmap);
+		nodelist = node_ptr->name;
+	} else {
+		step_ptr = find_step_record(job_ptr, step_id);
+		if (!step_ptr)
+			return ESLURM_INVALID_JOB_ID;
+		check_ptr = (struct check_job_info *)step_ptr->check_job;
+		nodelist = step_ptr->step_layout->node_list;
+	}
+	xassert(check_ptr);
+
+	switch (op) {
+	case CHECK_ABLE:
+		if (check_ptr->disabled)
+			rc = ESLURM_DISABLED;
+		else {
+			*event_time = check_ptr->time_stamp;
+			rc = SLURM_SUCCESS;
+		}
+		break;
+	case CHECK_DISABLE:
+		check_ptr->disabled++;
+		break;
+	case CHECK_ENABLE:
+		check_ptr->disabled--;
+		break;
+	case CHECK_VACATE:
+		done_sig = SIGTERM;
+		/* no break */
+	case CHECK_CREATE:
+		if (check_ptr->disabled) {
+			rc = ESLURM_DISABLED;
+			break;
+		}
+		if (check_ptr->time_stamp != 0) {
+			rc = EALREADY;
+			break;
+		}
+			
+		check_ptr->time_stamp = time(NULL);
+		check_ptr->error_code = 0;
+		xfree(check_ptr->error_msg);
+
+		req_ptr = xmalloc(sizeof(struct ckpt_req));
+		if (!req_ptr) {
+			rc = ENOMEM;
+			break;
+		}
+		req_ptr->gid = job_ptr->group_id;
+		req_ptr->uid = job_ptr->user_id;
+		req_ptr->job_id = job_id;
+		req_ptr->step_id = step_id;
+		req_ptr->begin_time = check_ptr->time_stamp;
+		req_ptr->wait = data;
+		req_ptr->image_dir = xstrdup(image_dir);
+		req_ptr->nodelist = xstrdup(nodelist);
+		req_ptr->sig_done = done_sig;
+
+		slurm_attr_init(&attr);
+		if (pthread_attr_setdetachstate(&attr, 
+						PTHREAD_CREATE_DETACHED)) {
+			error("pthread_attr_setdetachstate: %m");
+			rc = errno;
+			break;
+		}
+		
+		if (pthread_create(&ckpt_agent_tid, &attr, _ckpt_agent_thr, 
+				   req_ptr)) {
+			error("pthread_create: %m");
+			rc = errno;
+			break;
+		}
+		slurm_attr_destroy(&attr);
+
+		break;
+			
+	case CHECK_RESTART:
+		if (step_id != SLURM_BATCH_SCRIPT) {
+			rc = ESLURM_NOT_SUPPORTED;
+			break;
+		}
+		/* create a batch job from saved desc */
+		rc = ESLURM_NOT_SUPPORTED;
+		/* TODO: save job script */
+		break;
+			
+	case CHECK_ERROR:
+		xassert(error_code);
+		xassert(error_msg);
+		*error_code = check_ptr->error_code;
+		xfree(*error_msg);
+		*error_msg = xstrdup(check_ptr->error_msg);
+		break;
+	default:
+		error("Invalid checkpoint operation: %d", op);
+		rc = EINVAL;
+	}
+
+	return rc;
+}
+
+extern int slurm_ckpt_comp ( struct step_record * step_ptr, time_t event_time,
+		uint32_t error_code, char *error_msg )
+{
+	error("checkpoint/blcr: slurm_ckpt_comp not implemented");
+	return SLURM_FAILURE; 
+}
+
+extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, 
+				  uint32_t task_id, time_t event_time, 
+				  uint32_t error_code, char *error_msg )
+{
+	error("checkpoint/blcr: slurm_ckpt_task_comp not implemented");
+	return SLURM_FAILURE; 
+}
+
+extern int slurm_ckpt_alloc_job(check_jobinfo_t *jobinfo)
+{
+	struct check_job_info *check_ptr;
+	check_ptr = xmalloc(sizeof(struct check_job_info));
+	*jobinfo = (check_jobinfo_t) check_ptr;
+	return SLURM_SUCCESS;
+}
+
+extern int slurm_ckpt_free_job(check_jobinfo_t jobinfo)
+{
+	struct check_job_info *check_ptr = (struct check_job_info *)jobinfo;
+	if (check_ptr) {
+		xfree(check_ptr->error_msg);
+		xfree(check_ptr);
+	}
+	return SLURM_SUCCESS;
+}
+
+extern int slurm_ckpt_pack_job(check_jobinfo_t jobinfo, Buf buffer)
+{
+	struct check_job_info *check_ptr = 
+		(struct check_job_info *)jobinfo;
+ 
+	pack16(check_ptr->disabled, buffer);
+	pack_time(check_ptr->time_stamp, buffer);
+	pack32(check_ptr->error_code, buffer);
+	packstr(check_ptr->error_msg, buffer);
+
+	return SLURM_SUCCESS;
+}
+
+extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer)
+{
+	uint32_t uint32_tmp;
+	struct check_job_info *check_ptr =
+		(struct check_job_info *)jobinfo;
+
+	safe_unpack16(&check_ptr->disabled, buffer);
+	safe_unpack_time(&check_ptr->time_stamp, buffer);
+	safe_unpack32(&check_ptr->error_code, buffer);
+	safe_unpackstr_xmalloc(&check_ptr->error_msg, &uint32_tmp, buffer);
+	
+	return SLURM_SUCCESS; 
+
+    unpack_error:
+	xfree(check_ptr->error_msg);
+	return SLURM_ERROR;
+}
+
+extern int slurm_ckpt_stepd_prefork(slurmd_job_t *job)
+{
+	char *old_env = NULL, *new_env = NULL, *ptr = NULL, *save_ptr = NULL;
+	
+	/*
+	 * I was thinking that a thread can be created here to
+	 * communicate with the tasks via sockets/pipes.
+	 * Maybe this is not needed - we can modify MVAPICH2
+	 */
+
+	/* set LD_PRELOAD for batch script shell */
+	//if (job->batch) {
+		old_env = getenvp(job->env, "LD_PRELOAD");
+		if (old_env) {
+			/* search and replace all libcr_run and libcr_omit
+			 * the old env value is messed up --
+			 * it will be replaced */
+			while ((ptr = strtok_r(old_env, " :", &save_ptr))) {
+				old_env = NULL;
+				if (!ptr)
+					break;
+				if (!strncmp(ptr, "libcr_run.so", 12) ||
+				    !strncmp(ptr, "libcr_omit.so", 13))
+					continue;
+				xstrcat(new_env, ptr);
+				xstrcat(new_env, ":");
+			}
+		}
+		ptr = xstrdup("libcr_run.so");
+		if (new_env)
+			xstrfmtcat(ptr, ":%s", new_env);
+		setenvf(&job->env, "LD_PRELOAD", ptr);
+		xfree(new_env);
+		xfree(ptr);
+		//}
+	return SLURM_SUCCESS;
+}
+
+extern int slurm_ckpt_signal_tasks(slurmd_job_t *job, char *image_dir)
+{
+	char *argv[4];
+	char context_file[MAX_PATH_LEN];
+	char pid[16];
+	int status;
+	pid_t *children = NULL;
+	int *fd = NULL;
+	int rc = SLURM_SUCCESS;
+	int i;
+	char c;
+
+	debug3("checkpoint/blcr: slurm_ckpt_signal_tasks: image_dir=%s", 
+	       image_dir);
+	/*
+	 * the tasks must be checkpointed concurrently.
+	 */
+	children = xmalloc(sizeof(pid_t) * job->ntasks);
+	fd = xmalloc(sizeof(int) * 2 * job->ntasks);
+	if (!children || !fd) {
+		error("slurm_ckpt_signal_tasks: memory exhausted");
+		rc = SLURM_FAILURE;
+		goto out;
+	}
+	for (i = 0; i < job->ntasks; i ++) {
+		fd[i*2] = -1;
+		fd[i*2+1] = -1;
+	}
+
+	for (i = 0; i < job->ntasks; i ++) {
+		if (job->batch) {
+			sprintf(context_file, "%s/script.ckpt", image_dir);
+		} else {
+			sprintf(context_file, "%s/task.%d.ckpt",
+				image_dir, job->task[i]->gtid);
+		}
+		sprintf(pid, "%u", (unsigned int)job->task[i]->pid);
+
+		if (pipe(&fd[i*2]) < 0) {
+			error("failed to create pipes: %m");
+			rc = SLURM_ERROR;
+			goto out_wait;
+		}
+
+		children[i] = fork();
+		if (children[i] < 0) {
+			error("error forking cr_checkpoint");
+			rc = SLURM_ERROR;
+			goto out_wait;
+		} else if (children[i] == 0) {
+			close(fd[i*2+1]);
+
+			while(read(fd[i*2], &c, 1) < 0 && errno == EINTR);
+			if (c)
+				exit(-1);
+
+			/* change cred to job owner */
+			if (setgid(job->gid) < 0) {
+				error ("checkpoint/blcr: "
+				       "slurm_ckpt_signal_tasks: "
+				       "failed to setgid: %m");
+				exit(errno);
+			}
+			if (setuid(job->uid) < 0) {
+				error ("checkpoint/blcr: "
+				       "slurm_ckpt_signal_tasks: "
+				       "failed to setuid: %m");
+				exit(errno);
+			}
+			if (chdir(job->cwd) < 0) {
+				error ("checkpoint/blcr: "
+				       "slurm_ckpt_signal_tasks: "
+				       "failed to chdir: %m");
+				exit(errno);
+			}
+			
+			argv[0] = cr_checkpoint_path;
+			argv[1] = pid;
+			argv[2] = context_file;
+			argv[3] = NULL;
+
+			execv(argv[0], argv);
+			exit(errno);
+		}
+		close(fd[i*2]);
+	}
+
+ out_wait:
+	c = (rc == SLURM_SUCCESS) ? 0 : 1;
+	for (i = 0; i < job->ntasks; i ++) {
+		if (fd[i*2+1] >= 0) {
+			while(write(fd[i*2+1], &c, 1) < 0 && errno == EINTR);
+		}
+	}
+	/* wait children in sequence is OK */
+	for (i = 0; i < job->ntasks; i ++) {
+		if (children[i] == 0)
+			continue;
+		while(waitpid(children[i], &status, 0) < 0 && errno == EINTR);
+		if (! (WIFEXITED(status) && WEXITSTATUS(status))== 0)
+			rc = SLURM_ERROR;
+	}
+ out:
+	xfree(children);
+	xfree(fd);
+
+	return rc;
+}
+
+extern int slurm_ckpt_restart_task(slurmd_job_t *job, char *image_dir, int gtid)
+{
+	char *argv[3];
+	char context_file[MAX_PATH_LEN];
+
+	/* jobid and stepid must NOT be spelled here, 
+	 * since it is a new job/step */
+	if (job->batch) {
+		sprintf(context_file, "%s/script.ckpt", image_dir);
+	} else {
+		sprintf(context_file, "%s/task.%d.ckpt", image_dir, gtid);
+	}
+
+	argv[0] = cr_restart_path;
+	argv[1] = context_file;
+	argv[2] = NULL;
+
+	execv(argv[0], argv);
+
+	error("execv failure: %m");
+	return SLURM_ERROR;
+}
+
+
+/* Send a signal RPC to a list of nodes */
+static void _send_sig(uint32_t job_id, uint32_t step_id, uint16_t signal, 
+		      char *nodelist)
+{
+	agent_arg_t *agent_args;
+	kill_tasks_msg_t *kill_tasks_msg;
+
+	kill_tasks_msg = xmalloc(sizeof(kill_tasks_msg_t));
+	kill_tasks_msg->job_id		= job_id;
+	kill_tasks_msg->job_step_id	= step_id;
+	kill_tasks_msg->signal		= signal;
+
+	agent_args = xmalloc(sizeof(agent_arg_t));
+	agent_args->msg_type		= REQUEST_SIGNAL_TASKS;
+	agent_args->retry		= 1;
+	agent_args->msg_args		= kill_tasks_msg;
+	agent_args->hostlist		= hostlist_create(nodelist);
+	agent_args->node_count		= hostlist_count(agent_args->hostlist);
+
+	agent_queue_request(agent_args);
+}
+
+
+/* Checkpoint processing pthread
+ * Never returns, but is cancelled on plugin termiantion */
+static void *_ckpt_agent_thr(void *arg)
+{
+	struct ckpt_req *req = (struct ckpt_req *)arg;
+	int rc;
+	/* Locks: write job */
+	slurmctld_lock_t job_write_lock = { 
+		NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	struct job_record *job_ptr;
+	struct step_record *step_ptr;
+	struct check_job_info *check_ptr;
+
+	/* only perform ckpt operation of ONE JOB */
+	slurm_mutex_lock(&ckpt_agent_mutex);
+	while (ckpt_agent_jobid && ckpt_agent_jobid != req->job_id) {
+		pthread_cond_wait(&ckpt_agent_cond, &ckpt_agent_mutex);
+	}
+	ckpt_agent_jobid = req->job_id;
+	ckpt_agent_count ++;
+	slurm_mutex_unlock(&ckpt_agent_mutex);
+
+	debug3("checkpoint/blcr: sending checkpoint tasks request to %u.%u",
+	       req->job_id, req->step_id);
+	
+	rc = checkpoint_tasks(req->job_id, req->step_id, req->begin_time,
+			      req->image_dir, req->wait, req->nodelist);
+
+	lock_slurmctld(job_write_lock);
+	
+	job_ptr = find_job_record(req->job_id);
+	if (!job_ptr) {
+		error("_ckpt_agent_thr: job finished");
+		goto out;
+	}
+	if (req->step_id == SLURM_BATCH_SCRIPT) {	/* batch job */
+		check_ptr = (struct check_job_info *)job_ptr->check_job;
+	} else {
+		step_ptr = find_step_record(job_ptr, req->step_id);
+		if (! step_ptr) {
+			error("_ckpt_agent_thr: step finished");
+			goto out;
+		}
+		check_ptr = (struct check_job_info *)step_ptr->check_job;
+	}
+	check_ptr->time_stamp = 0;
+	check_ptr->error_code = rc;
+	if (check_ptr->error_code != SLURM_SUCCESS)
+		check_ptr->error_msg = xstrdup(slurm_strerror(rc));
+
+ out:
+	unlock_slurmctld(job_write_lock);
+		
+	if (req->sig_done) {
+		_send_sig(req->job_id, req->step_id, req->sig_done, 
+			  req->nodelist);
+	}
+
+	_on_ckpt_complete(req->gid, req->uid, req->job_id, req->step_id, 
+			  req->image_dir, rc);
+
+	slurm_mutex_lock(&ckpt_agent_mutex);
+	ckpt_agent_count --;
+	if (ckpt_agent_count == 0) {
+		ckpt_agent_jobid = 0;
+		pthread_cond_broadcast(&ckpt_agent_cond);
+	}
+	slurm_mutex_unlock(&ckpt_agent_mutex);
+	_ckpt_req_free(req);
+	return NULL;
+}
+
+
+static void _ckpt_req_free(void *ptr)
+{
+	struct ckpt_req *req = (struct ckpt_req *)ptr;
+	
+	if (req) {
+		xfree(req->image_dir);
+		xfree(req->nodelist);
+		xfree(req);
+	}
+}
+
+
+/* a checkpoint completed, process the images files */
+static int _on_ckpt_complete(uint32_t group_id, uint32_t user_id,
+			     uint32_t job_id, uint32_t step_id,
+			     char *image_dir, uint32_t error_code)
+{
+	int status;
+	pid_t cpid;
+
+	if (access(scch_path, R_OK | X_OK) < 0) {
+		info("Access denied for %s: %m", scch_path);
+		return SLURM_ERROR;
+	}
+
+	if ((cpid = fork()) < 0) {
+		error ("_on_ckpt_complete: fork: %m");
+		return SLURM_ERROR;
+	}
+	
+	if (cpid == 0) {
+		/*
+		 * We don't fork and wait the child process because the job 
+		 * read lock is held. It could take minutes to delete/move 
+		 * the checkpoint image files. So there is a race condition
+		 * of the user requesting another checkpoint before SCCH
+		 * finishes.
+		 */
+		/* fork twice to avoid zombies */
+		if ((cpid = fork()) < 0) {
+			error("_on_ckpt_complete: second fork: %m");
+			exit(127);
+		}
+		/* grand child execs */
+		if (cpid == 0) {
+			char *args[6];
+			char str_job[11];
+			char str_step[11];
+			char str_err[11];
+		
+			/*
+			 * XXX: if slurmctld is running as root, we must setuid here.
+			 * But what if slurmctld is running as SlurmUser?
+			 * How about we make scch setuid and pass the user/group to it?
+			 */
+			if (geteuid() == 0) { /* root */
+				if (setgid(group_id) < 0) {
+					error("_on_ckpt_complete: failed to "
+					      "setgid: %m");
+					exit(127);
+				}
+				if (setuid(user_id) < 0) {
+					error("_on_ckpt_complete: failed to "
+					      "setuid: %m");
+					exit(127);
+				}
+			}
+			snprintf(str_job,  sizeof(str_job),  "%u", job_id);
+			snprintf(str_step, sizeof(str_step), "%u", step_id);
+			snprintf(str_err,  sizeof(str_err),  "%u", error_code);
+
+			args[0] = scch_path;
+			args[1] = str_job;
+			args[2] = str_step;
+			args[3] = str_err;
+			args[4] = image_dir;
+			args[5] = NULL;
+
+			execv(scch_path, args);
+			error("execv failure: %m");
+			exit(127);
+		}
+		/* child just exits */
+		exit(0);
+	}
+
+	while(1) {
+		if (waitpid(cpid, &status, 0) < 0 && errno == EINTR)
+			continue;
+		break;
+	}
+
+	return SLURM_SUCCESS;
+}
diff --git a/src/plugins/checkpoint/blcr/cr_checkpoint.sh.in b/src/plugins/checkpoint/blcr/cr_checkpoint.sh.in
new file mode 100644
index 0000000000000000000000000000000000000000..23181dd379c691396caa0c8abb6fbcfe2bb49e3b
--- /dev/null
+++ b/src/plugins/checkpoint/blcr/cr_checkpoint.sh.in
@@ -0,0 +1,4 @@
+#!/bin/sh
+image_dir=`dirname $2`
+mkdir -p $image_dir
+exec @BLCR_HOME@/bin/cr_checkpoint -T $1 -f $2
diff --git a/src/plugins/checkpoint/blcr/cr_restart.sh.in b/src/plugins/checkpoint/blcr/cr_restart.sh.in
new file mode 100644
index 0000000000000000000000000000000000000000..2975987747660d25b0d4b2e54691d3df266448d9
--- /dev/null
+++ b/src/plugins/checkpoint/blcr/cr_restart.sh.in
@@ -0,0 +1,2 @@
+#!/bin/sh
+exec @BLCR_HOME@/bin/cr_restart $1
diff --git a/src/plugins/checkpoint/none/Makefile.in b/src/plugins/checkpoint/none/Makefile.in
index 06685cc82bc28444022e23b351040bc9b130a749..b3a6ea2b3058ba63d4ace1067131aa2217171479 100644
--- a/src/plugins/checkpoint/none/Makefile.in
+++ b/src/plugins/checkpoint/none/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/checkpoint/none/checkpoint_none.c b/src/plugins/checkpoint/none/checkpoint_none.c
index 78a441782275b6127c83816361123ff3da347ae0..b05b00dd27f7e9a8a18dd983c61c9783a49a0590 100644
--- a/src/plugins/checkpoint/none/checkpoint_none.c
+++ b/src/plugins/checkpoint/none/checkpoint_none.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  checkpoint_none.c - NO-OP slurm checkpoint plugin.
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -82,7 +84,7 @@
  */
 const char plugin_name[]       	= "Checkpoint NONE plugin";
 const char plugin_type[]       	= "checkpoint/none";
-const uint32_t plugin_version	= 90;
+const uint32_t plugin_version	= 100;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -102,14 +104,15 @@ extern int fini ( void )
  * The remainder of this file implements the standard SLURM checkpoint API.
  */
 
-extern int slurm_ckpt_op ( uint16_t op, uint16_t data,
-		struct step_record * step_ptr, time_t * event_time,
-		uint32_t *error_code, char **error_msg )
+extern int slurm_ckpt_op (uint32_t job_id, uint32_t step_id, 
+			  struct step_record *step_ptr, uint16_t op,
+			  uint16_t data, char *image_dir, time_t * event_time, 
+			  uint32_t *error_code, char **error_msg )
 {
 	return ESLURM_NOT_SUPPORTED;
 }
 
-extern int slurm_ckpt_comp ( struct step_record * step_ptr, time_t event_time,
+extern int slurm_ckpt_comp (struct step_record * step_ptr, time_t event_time,
 		uint32_t error_code, char *error_msg)
 {
 	return ESLURM_NOT_SUPPORTED;
@@ -135,9 +138,24 @@ extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer)
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, uint32_t task_id,
-				  time_t event_time, uint32_t error_code, char *error_msg )
+extern int slurm_ckpt_task_comp (struct step_record * step_ptr, 
+				 uint32_t task_id, time_t event_time, 
+				 uint32_t error_code, char *error_msg )
 {
 	return SLURM_SUCCESS;
 }
 
+extern int slurm_ckpt_stepd_prefork(void *slurmd_job)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int slurm_ckpt_signal_tasks(void *slurmd_job)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
+
+extern int slurm_ckpt_restart_task(void *slurmd_job, char *image_dir, int gtid)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
diff --git a/src/plugins/checkpoint/ompi/Makefile.in b/src/plugins/checkpoint/ompi/Makefile.in
index 82ca1614294572c1c73732d4474c8a014852fe9b..bfa81bac3ae606f2ed90a19d6f295ab398ec7410 100644
--- a/src/plugins/checkpoint/ompi/Makefile.in
+++ b/src/plugins/checkpoint/ompi/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/checkpoint/ompi/checkpoint_ompi.c b/src/plugins/checkpoint/ompi/checkpoint_ompi.c
index 37a60f63a926a9cfa1f9a34c761c63f182815651..513fc93f289c2199f69dc3007a28dc43cd6db370 100644
--- a/src/plugins/checkpoint/ompi/checkpoint_ompi.c
+++ b/src/plugins/checkpoint/ompi/checkpoint_ompi.c
@@ -2,12 +2,14 @@
  *  checkpoint_ompi.c - OpenMPI slurm checkpoint plugin.
  *****************************************************************************
  *  Copyright (C) 2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -98,7 +100,7 @@ static int _ckpt_step(struct step_record * step_ptr, uint16_t wait, int vacate);
  */
 const char plugin_name[]       	= "OpenMPI checkpoint plugin";
 const char plugin_type[]       	= "checkpoint/ompi";
-const uint32_t plugin_version	= 90;
+const uint32_t plugin_version	= 100;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -121,15 +123,17 @@ extern int fini ( void )
  * The remainder of this file implements the standard SLURM checkpoint API.
  */
 
-extern int slurm_ckpt_op ( uint16_t op, uint16_t data,
-		struct step_record * step_ptr, time_t * event_time,
-		uint32_t *error_code, char **error_msg )
+extern int slurm_ckpt_op (uint32_t job_id, uint32_t step_id, 
+			  struct step_record *step_ptr, uint16_t op,
+			  uint16_t data, char *image_dir, time_t * event_time, 
+			  uint32_t *error_code, char **error_msg )
 {
 	int rc = SLURM_SUCCESS;
 	struct check_job_info *check_ptr;
 
-	xassert(step_ptr);
-	check_ptr = (struct check_job_info *) step_ptr->check_job;
+	if (!step_ptr)	/* batch job restore */
+		return ESLURM_NOT_SUPPORTED;
+	check_ptr = (struct check_job_info *)step_ptr->check_job;
 	xassert(check_ptr);
 
 	switch (op) {
@@ -308,9 +312,24 @@ static int _ckpt_step(struct step_record * step_ptr, uint16_t wait, int vacate)
 	return SLURM_SUCCESS;
 }
 
-extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, uint32_t task_id,
-				  time_t event_time, uint32_t error_code, char *error_msg )
+extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, 
+				  uint32_t task_id, time_t event_time,
+				  uint32_t error_code, char *error_msg )
+{
+	return SLURM_SUCCESS;
+}
+
+extern int slurm_ckpt_stepd_prefork(void *slurmd_job)
 {
 	return SLURM_SUCCESS;
 }
 
+extern int slurm_ckpt_signal_tasks(void *slurmd_job)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
+
+extern int slurm_ckpt_restart_task(void *slurmd_job, char *image_dir, int gtid)
+{
+	return ESLURM_NOT_SUPPORTED;
+}
diff --git a/src/plugins/checkpoint/xlch/Makefile.am b/src/plugins/checkpoint/xlch/Makefile.am
index 93d4975109f2daf6906ed4ec26941e9c2a4d8f09..67c1f5c6439093d747003766e26b15988db307dd 100644
--- a/src/plugins/checkpoint/xlch/Makefile.am
+++ b/src/plugins/checkpoint/xlch/Makefile.am
@@ -7,12 +7,9 @@ PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
 pkglib_LTLIBRARIES = checkpoint_xlch.la
-checkpoint_xlch_la_SOURCES = checkpoint_xlch.c config.c
+checkpoint_xlch_la_SOURCES = checkpoint_xlch.c
 checkpoint_xlch_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 
-config.c: Makefile
-	@( echo "char *scch_path = \"$(prefix)/sbin/scch\";"\
-         ) > config.c
 
 force:
 
diff --git a/src/plugins/checkpoint/xlch/Makefile.in b/src/plugins/checkpoint/xlch/Makefile.in
index 8ce983abc6dcc340a4a50fb62b5959ee5cb00460..e6ac9386a794e0129d021905b8cde5da520a1aec 100644
--- a/src/plugins/checkpoint/xlch/Makefile.in
+++ b/src/plugins/checkpoint/xlch/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,7 +81,7 @@ am__installdirs = "$(DESTDIR)$(pkglibdir)"
 pkglibLTLIBRARIES_INSTALL = $(INSTALL)
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
 checkpoint_xlch_la_LIBADD =
-am_checkpoint_xlch_la_OBJECTS = checkpoint_xlch.lo config.lo
+am_checkpoint_xlch_la_OBJECTS = checkpoint_xlch.lo
 checkpoint_xlch_la_OBJECTS = $(am_checkpoint_xlch_la_OBJECTS)
 checkpoint_xlch_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -272,7 +280,7 @@ AUTOMAKE_OPTIONS = foreign
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 pkglib_LTLIBRARIES = checkpoint_xlch.la
-checkpoint_xlch_la_SOURCES = checkpoint_xlch.c config.c
+checkpoint_xlch_la_SOURCES = checkpoint_xlch.c
 checkpoint_xlch_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 all: all-am
 
@@ -344,7 +352,6 @@ distclean-compile:
 	-rm -f *.tab.c
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint_xlch.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/config.Plo@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@@ -553,10 +560,6 @@ uninstall-am: uninstall-pkglibLTLIBRARIES
 	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
 
 
-config.c: Makefile
-	@( echo "char *scch_path = \"$(prefix)/sbin/scch\";"\
-         ) > config.c
-
 force:
 
 $(checkpoint_xlch_LDADD) : force
diff --git a/src/plugins/checkpoint/xlch/checkpoint_xlch.c b/src/plugins/checkpoint/xlch/checkpoint_xlch.c
index 6089588cb435612a9997e97c00f071603b004584..f2afc85287e798fb352d7340a8256ddf4073e705 100644
--- a/src/plugins/checkpoint/xlch/checkpoint_xlch.c
+++ b/src/plugins/checkpoint/xlch/checkpoint_xlch.c
@@ -2,15 +2,14 @@
  *  checkpoint_xlch.c - XLCH slurm checkpoint plugin.
  *  $Id: checkpoint_xlch.c 0001 2006-10-31 10:55:11Z hjcao $
  *****************************************************************************
- *  Copied from checkpoint_aix.c
- *  
- *  Copyright (C) 2004 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  Derived from checkpoint_aix.c
+ *  Copyright (C) 2007-2009 National University of Defense Technology, China.
+ *  Written by Hongia Cao.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -68,8 +67,9 @@
 #include "src/common/xmalloc.h"
 #include "src/slurmctld/agent.h"
 #include "src/slurmctld/slurmctld.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
-#define SIGCKPT 20
+#define SIGCKPT SIGUSR2
 
 struct check_job_info {
 	uint16_t disabled;	/* counter, checkpointable only if zero */
@@ -87,10 +87,9 @@ struct check_job_info {
 
 static void _send_sig(uint32_t job_id, uint32_t step_id, uint16_t signal, 
 		      char *nodelist);
-static void _send_ckpt(uint32_t job_id, uint32_t step_id, uint16_t signal, 
-		       time_t timestamp, char *nodelist);
+
 static int _step_ckpt(struct step_record * step_ptr, uint16_t wait, 
-		      uint16_t signal, uint16_t sig_timeout);
+		      char *image_dir, uint16_t sig_timeout);
 
 /* checkpoint request timeout processing */
 static pthread_t	ckpt_agent_tid = 0;
@@ -115,7 +114,7 @@ static void  _ckpt_signal_step(struct ckpt_timeout_info *rec);
 
 static int _on_ckpt_complete(struct step_record *step_ptr, uint32_t error_code);
 
-extern char *scch_path;
+static char *scch_path = SLURM_PREFIX "/sbin/scch";
 
 /*
  * These variables are required by the generic plugin interface.  If they
@@ -147,7 +146,7 @@ extern char *scch_path;
  */
 const char plugin_name[]       	= "XLCH checkpoint plugin";
 const char plugin_type[]       	= "checkpoint/xlch";
-const uint32_t plugin_version	= 10;
+const uint32_t plugin_version	= 100;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -192,13 +191,18 @@ extern int fini ( void )
  * The remainder of this file implements the standard SLURM checkpoint API.
  */
 
-extern int slurm_ckpt_op ( uint16_t op, uint16_t data,
-			   struct step_record * step_ptr, time_t * event_time, 
-			   uint32_t *error_code, char **error_msg )
+extern int slurm_ckpt_op (uint32_t job_id, uint32_t step_id, 
+			  struct step_record *step_ptr, uint16_t op,
+			  uint16_t data, char *image_dir, time_t * event_time, 
+			  uint32_t *error_code, char **error_msg )
 {
 	int rc = SLURM_SUCCESS;
 	struct check_job_info *check_ptr;
 
+	/* checkpoint/xlch does not support checkpoint batch jobs */
+	if (step_id == SLURM_BATCH_SCRIPT)
+		return ESLURM_NOT_SUPPORTED;
+	
 	xassert(step_ptr);
 	check_ptr = (struct check_job_info *) step_ptr->check_job;
 	check_ptr->task_cnt = step_ptr->step_layout->task_cnt; /* set it early */
@@ -233,7 +237,7 @@ extern int slurm_ckpt_op ( uint16_t op, uint16_t data,
 			check_ptr->error_code = 0;
 			check_ptr->sig_done = 0;
 			xfree(check_ptr->error_msg);
-			rc = _step_ckpt(step_ptr, data, SIGCKPT, SIGKILL);
+			rc = _step_ckpt(step_ptr, data, image_dir, SIGKILL);
 			break;
 		case CHECK_VACATE:
 			if (check_ptr->time_stamp != 0) {
@@ -246,7 +250,7 @@ extern int slurm_ckpt_op ( uint16_t op, uint16_t data,
 			check_ptr->error_code = 0;
 			check_ptr->sig_done = SIGTERM; /* exit elegantly */
 			xfree(check_ptr->error_msg);
-			rc = _step_ckpt(step_ptr, data, SIGCKPT, SIGKILL);
+			rc = _step_ckpt(step_ptr, data, image_dir, SIGKILL);
 			break;
 		case CHECK_RESTART:
 			rc = ESLURM_NOT_SUPPORTED;
@@ -273,7 +277,7 @@ extern int slurm_ckpt_comp ( struct step_record * step_ptr, time_t event_time,
 		uint32_t error_code, char *error_msg )
 {
 	error("checkpoint/xlch: slurm_ckpt_comp not implemented");
-	return SLURM_FAILURE; 
+	return ESLURM_NOT_SUPPORTED; 
 }
 
 extern int slurm_ckpt_task_comp ( struct step_record * step_ptr, uint32_t task_id,
@@ -427,29 +431,6 @@ extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer)
 	return SLURM_ERROR;
 }
 
-/* Send a checkpoint RPC to a specific job step */
-static void _send_ckpt(uint32_t job_id, uint32_t step_id, uint16_t signal, 
-		       time_t timestamp, char *nodelist)
-{
-	agent_arg_t *agent_args;
-	checkpoint_tasks_msg_t *ckpt_tasks_msg;
-
-	ckpt_tasks_msg = xmalloc(sizeof(checkpoint_tasks_msg_t));
-	ckpt_tasks_msg->job_id		= job_id;
-	ckpt_tasks_msg->job_step_id	= step_id;
-	ckpt_tasks_msg->signal		= signal;
-	ckpt_tasks_msg->timestamp       = timestamp;
-
-	agent_args = xmalloc(sizeof(agent_arg_t));
-	agent_args->msg_type		= REQUEST_CHECKPOINT_TASKS;
-	agent_args->retry		= 1; /* keep retrying until all nodes receives the request */
-	agent_args->msg_args		= ckpt_tasks_msg;
-	agent_args->hostlist 		= hostlist_create(nodelist);
-	agent_args->node_count		= hostlist_count(agent_args->hostlist);
-
-	agent_queue_request(agent_args);
-}
-
 /* Send a signal RPC to a list of nodes */
 static void _send_sig(uint32_t job_id, uint32_t step_id, uint16_t signal, 
 		      char *nodelist)
@@ -474,8 +455,8 @@ static void _send_sig(uint32_t job_id, uint32_t step_id, uint16_t signal,
 
 /* Send checkpoint request to the processes of a job step.
  * If the request times out, send sig_timeout. */
-static int _step_ckpt(struct step_record * step_ptr, uint16_t wait, 
-		      uint16_t signal, uint16_t sig_timeout)
+static int _step_ckpt(struct step_record * step_ptr, uint16_t wait,
+		      char *image_dir, uint16_t sig_timeout)
 {
 	struct check_job_info *check_ptr;
 	struct job_record *job_ptr;
@@ -501,9 +482,9 @@ static int _step_ckpt(struct step_record * step_ptr, uint16_t wait,
 	char* nodelist = xstrdup (step_ptr->step_layout->node_list);
 	check_ptr->wait_time  = wait; /* TODO: how about change wait_time according to task_cnt? */
 
-	_send_ckpt(step_ptr->job_ptr->job_id, step_ptr->step_id,
-		   signal, check_ptr->time_stamp, nodelist);
-
+	checkpoint_tasks(step_ptr->job_ptr->job_id, step_ptr->step_id,
+			 check_ptr->time_stamp, image_dir, wait, nodelist);
+	
 	_ckpt_enqueue_timeout(step_ptr->job_ptr->job_id, 
 			      step_ptr->step_id, check_ptr->time_stamp, 
 			      sig_timeout, check_ptr->wait_time, nodelist);  
@@ -675,7 +656,7 @@ static int _on_ckpt_complete(struct step_record *step_ptr, uint32_t error_code)
 			args[1] = str_job;
 			args[2] = str_step;
 			args[3] = str_err;
-			args[4] = step_ptr->ckpt_path;
+			args[4] = step_ptr->ckpt_dir;
 			args[5] = NULL;
 
 			execv(scch_path, args);
@@ -694,3 +675,31 @@ static int _on_ckpt_complete(struct step_record *step_ptr, uint32_t error_code)
 
 	return SLURM_SUCCESS;
 }
+
+extern int slurm_ckpt_stepd_prefork(void *slurmd_job)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int slurm_ckpt_signal_tasks(void *slurmd_job)
+{
+	/* send SIGCKPT to all tasks */
+	return killpg(((slurmd_job_t *)slurmd_job)->pgid, SIGCKPT);
+}
+
+extern int slurm_ckpt_restart_task(void *slurmd_job, char *image_dir, int gtid)
+{
+	char buf[256];
+	
+	if (snprintf(buf, sizeof(buf), "%s/task.%d.ckpt", image_dir, gtid) >= sizeof(buf)) {
+		error("slurm buffer size too small");
+		return SLURM_FAILURE;
+	}
+	/* restart the task and update its environment */
+#if 0
+	restart(buf, ((slurmd_job_t *)slurmd_job)->env);
+#endif
+
+	error("restart() failed: rank=%d, file=%s: %m", gtid, buf);
+	return SLURM_FAILURE;
+}
diff --git a/src/plugins/crypto/Makefile.in b/src/plugins/crypto/Makefile.in
index 9145fcfaa8b21c022e8ac80cc42927f7c5527ccb..6b37a5fd3d3a0deda4a4b1b085c52e67c5a3c214 100644
--- a/src/plugins/crypto/Makefile.in
+++ b/src/plugins/crypto/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/crypto/munge/Makefile.in b/src/plugins/crypto/munge/Makefile.in
index 9ab86121201fe3d3c672d44b3d6bb2fe54b91779..443a7afd643080bfc1c68f1e86e68c89528db437 100644
--- a/src/plugins/crypto/munge/Makefile.in
+++ b/src/plugins/crypto/munge/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -111,6 +115,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/crypto/munge/crypto_munge.c b/src/plugins/crypto/munge/crypto_munge.c
index 1f9e0df5cdb7561e1cda3cc12035616f5b9a4f3c..2021d6cd53ec200f7c7ab696137733d7f5467cfe 100644
--- a/src/plugins/crypto/munge/crypto_munge.c
+++ b/src/plugins/crypto/munge/crypto_munge.c
@@ -2,12 +2,14 @@
  *  crypto_munge.c - Munge based cryptographic signature plugin
  *****************************************************************************
  *  Copyright (C) 2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark A. Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -94,7 +96,17 @@ const char plugin_name[]        = "Munge cryptographic signature plugin";
 const char plugin_type[]        = "crypto/munge";
 const uint32_t plugin_version   = 90;
 
-static munge_err_t munge_err;
+
+/*
+ *  Error codes local to this plugin:
+ */
+enum local_error_code {
+	ESIG_BUF_DATA_MISMATCH = 5000,
+	ESIG_BUF_SIZE_MISMATCH,
+	ESIG_BAD_USERID,
+};
+
+static uid_t slurm_user = 0;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -126,102 +138,122 @@ crypto_destroy_key(void *key)
 extern void *
 crypto_read_private_key(const char *path)
 {
-	return (void *) munge_ctx_create();
-}
+	munge_ctx_t ctx;
+	munge_err_t err;
+
+	if ((ctx = munge_ctx_create()) == NULL) {
+		error ("crypto_read_private_key: munge_ctx_create failed");
+		return (NULL);
+	}
+
+	/*
+	 *   Only allow slurmd_user (usually root) to decode job
+	 *   credentials created by
+	 *   slurmctld. This provides a slight layer of extra security,
+	 *   as non-privileged users cannot get at the contents of job
+	 *   credentials.
+	 */
+	err = munge_ctx_set(ctx, MUNGE_OPT_UID_RESTRICTION, 
+			    slurm_get_slurmd_user_id());
+
+	if (err != EMUNGE_SUCCESS) {
+		error("Unable to set uid restriction on munge credentials: %s",
+		      munge_ctx_strerror (ctx));
+		munge_ctx_destroy(ctx);
+		return(NULL);
+	}
 
+	return ((void *) ctx);
+}
 
 extern void *
 crypto_read_public_key(const char *path)
 {
+	/*
+	 * Get slurm user id once. We use it later to verify credentials.
+	 */
+	slurm_user = slurm_get_slurm_user_id();
+
 	return (void *) munge_ctx_create();
 }
 
-extern char *
-crypto_str_error(void)
+extern const char *
+crypto_str_error(int errnum)
 {
-	return (char *) munge_strerror(munge_err); 
+	if (errnum == ESIG_BUF_DATA_MISMATCH)
+		return "Credential data mismatch";
+	else if (errnum == ESIG_BUF_SIZE_MISMATCH)
+		return "Credential data size mismatch";
+	else if (errnum == ESIG_BAD_USERID)
+		return "Credential created by invalid user";
+	else
+		return munge_strerror ((munge_err_t) errnum);
 }
 
 /* NOTE: Caller must xfree the signature returned by sig_pp */
 extern int
 crypto_sign(void * key, char *buffer, int buf_size, char **sig_pp, 
-		unsigned int *sig_size_p) 
+	    unsigned int *sig_size_p) 
 {
 	char *cred;
+	munge_err_t err;
 
-	munge_err = munge_encode(&cred, (munge_ctx_t) key,
-				 buffer, buf_size);
+	err = munge_encode(&cred, (munge_ctx_t) key,
+			   buffer, buf_size);
 
-	if (munge_err != EMUNGE_SUCCESS)
-		return SLURM_ERROR;
+	if (err != EMUNGE_SUCCESS)
+		return err;
 
 	*sig_size_p = strlen(cred) + 1;
 	*sig_pp = xstrdup(cred);
 	free(cred); 
-	return SLURM_SUCCESS;
+	return 0;
 }
 
 extern int
 crypto_verify_sign(void * key, char *buffer, unsigned int buf_size, 
-		char *signature, unsigned int sig_size)
+		   char *signature, unsigned int sig_size)
 {
-	static uid_t slurm_user = 0;
-	static int got_slurm_user = 0;
 	uid_t uid;
 	gid_t gid;
 	void *buf_out;
 	int   buf_out_size;
+	int   rc = 0;
+	munge_err_t err;
 
-	munge_err = munge_decode(signature, (munge_ctx_t) key,
-				 &buf_out, &buf_out_size, 
-				 &uid, &gid);
+	err = munge_decode(signature, (munge_ctx_t) key,
+			   &buf_out, &buf_out_size, 
+			   &uid, &gid);
 
-	if (munge_err != EMUNGE_SUCCESS) {
+	if (err != EMUNGE_SUCCESS) {
 #ifdef MULTIPLE_SLURMD
 		/* In multple slurmd mode this will happen all the
 		 * time since we are authenticating with the same
 		 * munged.
 		 */
-		if (munge_err != EMUNGE_CRED_REPLAYED) {
-			return SLURM_ERROR;
+		if (err != EMUNGE_CRED_REPLAYED) {
+			return err;
 		} else {
 			debug2("We had a replayed crypto, "
 			       "but this is expected in multiple "
 			       "slurmd mode.");
-			munge_err = 0;
 		}
 #else
-		return SLURM_ERROR;
+		return err;
 #endif
 	}
 
-	if (!got_slurm_user) {
-		slurm_user = slurm_get_slurm_user_id();
-		got_slurm_user = 1;
-	}
 
 	if ((uid != slurm_user) && (uid != 0)) {
-		error("crypto/munge: bad user id (%d != %d)", 
-			(int) slurm_user, (int) uid);
-		munge_err = EMUNGE_CRED_UNAUTHORIZED;
-		free(buf_out);
-		return SLURM_ERROR;
-	}
-
-	if (buf_size != buf_out_size) {
-		error("crypto/munge: buf_size bad");
-		munge_err = EMUNGE_CRED_INVALID;
-		free(buf_out);
-		return SLURM_ERROR;
-	}
-
-	if (memcmp(buffer, buf_out, buf_size)) {
-		error("crypto/munge: buffers different");
-		munge_err = EMUNGE_CRED_INVALID;
-		free(buf_out);
-		return SLURM_ERROR;
+		error("crypto/munge: Unexpected uid (%d) != SLURM uid (%d)",
+		      (int) uid, (int) slurm_user);
+		rc = ESIG_BAD_USERID;
 	}
+	else if (buf_size != buf_out_size)
+		rc = ESIG_BUF_SIZE_MISMATCH;
+	else if (memcmp(buffer, buf_out, buf_size))
+		rc = ESIG_BUF_DATA_MISMATCH;
 
 	free(buf_out);
-	return SLURM_SUCCESS;
+	return rc;
 }
diff --git a/src/plugins/crypto/openssl/Makefile.in b/src/plugins/crypto/openssl/Makefile.in
index 957c0746fe5dc786f9089937d58e246e6b9ef693..27290006edc810ea59f02584b5177f121bd6965f 100644
--- a/src/plugins/crypto/openssl/Makefile.in
+++ b/src/plugins/crypto/openssl/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -116,6 +120,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/crypto/openssl/crypto_openssl.c b/src/plugins/crypto/openssl/crypto_openssl.c
index a6ab7c8de6121b1a040e8af8193a6f34e86a2a7d..6d0e5c33ea01c4e0b6457766406243a68a026de3 100644
--- a/src/plugins/crypto/openssl/crypto_openssl.c
+++ b/src/plugins/crypto/openssl/crypto_openssl.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark A. Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -162,8 +163,8 @@ crypto_read_public_key(const char *path)
 	return (void *) pk;
 }
 
-extern char *
-crypto_str_error(void)
+extern const char *
+crypto_str_error(int errnum)
 {
 	static int loaded = 0;
 
diff --git a/src/plugins/jobacct_gather/Makefile.in b/src/plugins/jobacct_gather/Makefile.in
index 575ea7ad654918fe40892f2f38cb7cc1468d7222..0cc8a1f868db6681916589f22be401f1b1801da6 100644
--- a/src/plugins/jobacct_gather/Makefile.in
+++ b/src/plugins/jobacct_gather/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/jobacct_gather/aix/Makefile.in b/src/plugins/jobacct_gather/aix/Makefile.in
index 2618af22a0bd21ee918fcc8b396e8f3681954203..7a9cb5ce98b3f845b0cb572ae73b4138c04ada21 100644
--- a/src/plugins/jobacct_gather/aix/Makefile.in
+++ b/src/plugins/jobacct_gather/aix/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
index 2abb0afd2fa6f88ef46583d33bf6d14ded1f1bf0..ab461450d700c5cb1eb6a66b424015ab79b434d5 100644
--- a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
+++ b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *  Written by Andy Riebs, <andy.riebs@hp.com>, who borrowed heavily
  *  from other parts of SLURM, and Danny Auble, <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/jobacct_gather/linux/Makefile.in b/src/plugins/jobacct_gather/linux/Makefile.in
index 88e4b0cdc1ed8c67b021e48417e92d70001668cd..420acc4c2226f2520bd000c31c86f5570fc9c4a9 100644
--- a/src/plugins/jobacct_gather/linux/Makefile.in
+++ b/src/plugins/jobacct_gather/linux/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -111,6 +115,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
index ba5abdc5080e2641007f71e317ca9a6f5f68b6c5..bfd0a22c79d9fcc4e059a46fd94902c5bd5d3913 100644
--- a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
+++ b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *  Written by Andy Riebs, <andy.riebs@hp.com>, who borrowed heavily
  *  from other parts of SLURM, and Danny Auble, <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/jobacct_gather/none/Makefile.in b/src/plugins/jobacct_gather/none/Makefile.in
index fb0ecbc42a37dc7e99d505fd68e4b7d968809469..1f301a82c34c2349c868fa95696b4ecfde4b7751 100644
--- a/src/plugins/jobacct_gather/none/Makefile.in
+++ b/src/plugins/jobacct_gather/none/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/jobacct_gather/none/jobacct_gather_none.c b/src/plugins/jobacct_gather/none/jobacct_gather_none.c
index 48df0bf2d23481485837767a1684b18123f41453..16afda5a08df03b2bd6e48adffba7e2fd247d72f 100644
--- a/src/plugins/jobacct_gather/none/jobacct_gather_none.c
+++ b/src/plugins/jobacct_gather/none/jobacct_gather_none.c
@@ -4,10 +4,11 @@
  *
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *  Written by Andy Riebs, <andy.riebs@hp.com>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/jobcomp/Makefile.in b/src/plugins/jobcomp/Makefile.in
index 35743b775c94d5deffa54531a030540259d36dbb..48317d2b45bac3ba774f5ce94c749e66af31972e 100644
--- a/src/plugins/jobcomp/Makefile.in
+++ b/src/plugins/jobcomp/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/jobcomp/filetxt/Makefile.in b/src/plugins/jobcomp/filetxt/Makefile.in
index 1615d81b0f24c368d6118d55ae5ee211ff6349b0..9c9e409035b28404dc7024cd07ffc55827032c30 100644
--- a/src/plugins/jobcomp/filetxt/Makefile.in
+++ b/src/plugins/jobcomp/filetxt/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -110,6 +114,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c
index 1a5048f1ed81afe0076d26ad491c9e8628635ca9..3a4b17450b67b5f91a1a6fb754f9777e05b2c005 100644
--- a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c
+++ b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h
index 178e85efbd6dad380693c5f1a3c9af41c678ea2c..3d2c54380fc55133b87e715a879a07bc609cd580 100644
--- a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h
+++ b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.h
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
index 56b145c4df0f0a8234150f38b6449e1219e526d6..b1cde9af7f335b73707a876704c6c18b1e482722 100644
--- a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
+++ b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2003 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -92,7 +93,8 @@ const char plugin_type[]       	= "jobcomp/filetxt";
 const uint32_t plugin_version	= 100;
 
 #define JOB_FORMAT "JobId=%lu UserId=%s(%lu) GroupId=%s(%lu) Name=%s JobState=%s Partition=%s "\
-		"TimeLimit=%s StartTime=%s EndTime=%s NodeList=%s NodeCnt=%u ProcCnt=%u %s\n"
+		"TimeLimit=%s StartTime=%s EndTime=%s NodeList=%s NodeCnt=%u ProcCnt=%u "\
+		"WorkDir=%s %s\n"
  
 /* Type for error string table entries */
 typedef struct {
@@ -245,7 +247,7 @@ extern int slurm_jobcomp_log_record ( struct job_record *job_ptr )
 	int rc = SLURM_SUCCESS;
 	char job_rec[1024];
 	char usr_str[32], grp_str[32], start_str[32], end_str[32], lim_str[32];
-	char select_buf[128];
+	char select_buf[128], *work_dir;
 	size_t offset = 0, tot_size, wrote;
 	enum job_states job_state;
 
@@ -271,6 +273,11 @@ extern int slurm_jobcomp_log_record ( struct job_record *job_ptr )
 	_make_time_str(&(job_ptr->start_time), start_str, sizeof(start_str));
 	_make_time_str(&(job_ptr->end_time), end_str, sizeof(end_str));
 
+	if (job_ptr->details && job_ptr->details->work_dir)
+		work_dir = job_ptr->details->work_dir;
+	else
+		work_dir = "unknown";
+
 	select_g_sprint_jobinfo(job_ptr->select_jobinfo,
 		select_buf, sizeof(select_buf), SELECT_PRINT_MIXED);
 
@@ -281,7 +288,7 @@ extern int slurm_jobcomp_log_record ( struct job_record *job_ptr )
 		 job_state_string(job_state), 
 		 job_ptr->partition, lim_str, start_str, 
 		 end_str, job_ptr->nodes, job_ptr->node_cnt,
-		 job_ptr->total_procs,
+		 job_ptr->total_procs, work_dir,
 		 select_buf);
 	tot_size = strlen(job_rec);
 
diff --git a/src/plugins/jobcomp/mysql/Makefile.am b/src/plugins/jobcomp/mysql/Makefile.am
index 44da93ece2005b4765cb8faf5fe158e404b3047a..5b01a39a9180e272de58f1756fb61d16dc9a3579 100644
--- a/src/plugins/jobcomp/mysql/Makefile.am
+++ b/src/plugins/jobcomp/mysql/Makefile.am
@@ -6,6 +6,7 @@ PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
+if WITH_MYSQL
 pkglib_LTLIBRARIES = jobcomp_mysql.la
 
 # Mysql storage plugin.
@@ -16,4 +17,7 @@ jobcomp_mysql_la_CFLAGS = $(MYSQL_CFLAGS)
 jobcomp_mysql_la_LIBADD  = $(top_builddir)/src/database/libslurm_mysql.la \
 	$(MYSQL_LIBS)
 jobcomp_mysql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_mysql.la
-
+else
+EXTRA_jobcomp_mysql_la_SOURCES = jobcomp_mysql.c \
+		mysql_jobcomp_process.c mysql_jobcomp_process.h
+endif
diff --git a/src/plugins/jobcomp/mysql/Makefile.in b/src/plugins/jobcomp/mysql/Makefile.in
index d60d778c9c9c14d9d48fa0d8e319e1b808064dd3..ae76368d28fcaaf97b829909f412db88e60b4fac 100644
--- a/src/plugins/jobcomp/mysql/Makefile.in
+++ b/src/plugins/jobcomp/mysql/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,12 +81,18 @@ am__installdirs = "$(DESTDIR)$(pkglibdir)"
 pkglibLTLIBRARIES_INSTALL = $(INSTALL)
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
 am__DEPENDENCIES_1 =
-am_jobcomp_mysql_la_OBJECTS = jobcomp_mysql_la-jobcomp_mysql.lo \
-	jobcomp_mysql_la-mysql_jobcomp_process.lo
+am__jobcomp_mysql_la_SOURCES_DIST = jobcomp_mysql.c \
+	mysql_jobcomp_process.c mysql_jobcomp_process.h
+@WITH_MYSQL_TRUE@am_jobcomp_mysql_la_OBJECTS =  \
+@WITH_MYSQL_TRUE@	jobcomp_mysql_la-jobcomp_mysql.lo \
+@WITH_MYSQL_TRUE@	jobcomp_mysql_la-mysql_jobcomp_process.lo
+am__EXTRA_jobcomp_mysql_la_SOURCES_DIST = jobcomp_mysql.c \
+	mysql_jobcomp_process.c mysql_jobcomp_process.h
 jobcomp_mysql_la_OBJECTS = $(am_jobcomp_mysql_la_OBJECTS)
 jobcomp_mysql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(jobcomp_mysql_la_CFLAGS) \
 	$(CFLAGS) $(jobcomp_mysql_la_LDFLAGS) $(LDFLAGS) -o $@
+@WITH_MYSQL_TRUE@am_jobcomp_mysql_la_rpath = -rpath $(pkglibdir)
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -95,8 +105,10 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(jobcomp_mysql_la_SOURCES)
-DIST_SOURCES = $(jobcomp_mysql_la_SOURCES)
+SOURCES = $(jobcomp_mysql_la_SOURCES) \
+	$(EXTRA_jobcomp_mysql_la_SOURCES)
+DIST_SOURCES = $(am__jobcomp_mysql_la_SOURCES_DIST) \
+	$(am__EXTRA_jobcomp_mysql_la_SOURCES_DIST)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -110,6 +122,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -272,18 +288,21 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-pkglib_LTLIBRARIES = jobcomp_mysql.la
+@WITH_MYSQL_TRUE@pkglib_LTLIBRARIES = jobcomp_mysql.la
 
 # Mysql storage plugin.
-jobcomp_mysql_la_SOURCES = jobcomp_mysql.c \
-			mysql_jobcomp_process.c mysql_jobcomp_process.h
+@WITH_MYSQL_TRUE@jobcomp_mysql_la_SOURCES = jobcomp_mysql.c \
+@WITH_MYSQL_TRUE@			mysql_jobcomp_process.c mysql_jobcomp_process.h
 
-jobcomp_mysql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-jobcomp_mysql_la_CFLAGS = $(MYSQL_CFLAGS)
-jobcomp_mysql_la_LIBADD = $(top_builddir)/src/database/libslurm_mysql.la \
-	$(MYSQL_LIBS)
+@WITH_MYSQL_TRUE@jobcomp_mysql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+@WITH_MYSQL_TRUE@jobcomp_mysql_la_CFLAGS = $(MYSQL_CFLAGS)
+@WITH_MYSQL_TRUE@jobcomp_mysql_la_LIBADD = $(top_builddir)/src/database/libslurm_mysql.la \
+@WITH_MYSQL_TRUE@	$(MYSQL_LIBS)
+
+@WITH_MYSQL_TRUE@jobcomp_mysql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_mysql.la
+@WITH_MYSQL_FALSE@EXTRA_jobcomp_mysql_la_SOURCES = jobcomp_mysql.c \
+@WITH_MYSQL_FALSE@		mysql_jobcomp_process.c mysql_jobcomp_process.h
 
-jobcomp_mysql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_mysql.la
 all: all-am
 
 .SUFFIXES:
@@ -345,7 +364,7 @@ clean-pkglibLTLIBRARIES:
 	  rm -f "$${dir}/so_locations"; \
 	done
 jobcomp_mysql.la: $(jobcomp_mysql_la_OBJECTS) $(jobcomp_mysql_la_DEPENDENCIES) 
-	$(jobcomp_mysql_la_LINK) -rpath $(pkglibdir) $(jobcomp_mysql_la_OBJECTS) $(jobcomp_mysql_la_LIBADD) $(LIBS)
+	$(jobcomp_mysql_la_LINK) $(am_jobcomp_mysql_la_rpath) $(jobcomp_mysql_la_OBJECTS) $(jobcomp_mysql_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
diff --git a/src/plugins/jobcomp/mysql/jobcomp_mysql.c b/src/plugins/jobcomp/mysql/jobcomp_mysql.c
index bb61175eed63e354ba050ccfe4ffbcc2a5be988a..753255c9d7442a29ba8782e2e029569119dbe541 100644
--- a/src/plugins/jobcomp/mysql/jobcomp_mysql.c
+++ b/src/plugins/jobcomp/mysql/jobcomp_mysql.c
@@ -4,11 +4,13 @@
  *  $Id: storage_mysql.c 10893 2007-01-29 21:53:48Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -77,9 +79,6 @@ const char plugin_name[] = "Job completion MYSQL plugin";
 const char plugin_type[] = "jobcomp/mysql";
 const uint32_t plugin_version = 100;
 
-#ifdef HAVE_MYSQL
-
-#define DEFAULT_JOBCOMP_DB "slurm_jobcomp_db"
 
 MYSQL *jobcomp_mysql_db = NULL;
 
@@ -97,12 +96,12 @@ storage_field_t jobcomp_table_fields[] = {
 	{ "starttime", "int unsigned default 0 not null" }, 
 	{ "endtime", "int unsigned default 0 not null" },
 	{ "nodelist", "text" }, 
-	{ "nodecnt", "mediumint unsigned not null" },
-	{ "proc_cnt", "mediumint unsigned not null" },
+	{ "nodecnt", "int unsigned not null" },
+	{ "proc_cnt", "int unsigned not null" },
 	{ "connect_type", "tinytext" },
 	{ "reboot", "tinytext" },
 	{ "rotate", "tinytext" },
-	{ "maxprocs", "mediumint unsigned default 0 not null" },
+	{ "maxprocs", "int unsigned default 0 not null" },
 	{ "geometry", "tinytext" },
 	{ "start", "tinytext" },
 	{ "blockid", "tinytext" },
@@ -133,7 +132,7 @@ static mysql_db_info_t *_mysql_jobcomp_create_db_info()
 	mysql_db_info_t *db_info = xmalloc(sizeof(mysql_db_info_t));
 	db_info->port = slurm_get_jobcomp_port();
 	if(!db_info->port) {
-		db_info->port = 3306;
+		db_info->port = DEFAULT_MYSQL_PORT;
 		slurm_set_jobcomp_port(db_info->port);
 	}
 	db_info->host = slurm_get_jobcomp_host();	
@@ -209,7 +208,6 @@ static char *_lookup_slurm_api_errtab(int errnum)
 	}
 	return res;
 }
-#endif
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -218,11 +216,7 @@ static char *_lookup_slurm_api_errtab(int errnum)
 extern int init ( void )
 {
 	static int first = 1;
-#ifndef HAVE_MYSQL
-	fatal("No MySQL storage was found on the machine. "
-	      "Please check the config.log from the run of configure "
-	      "and run again.");	
-#endif
+
 	if(first) {
 		/* since this can be loaded from many different places
 		   only tell us once. */
@@ -237,20 +231,15 @@ extern int init ( void )
 
 extern int fini ( void )
 {
-#ifdef HAVE_MYSQL
 	if (jobcomp_mysql_db) {
 		mysql_close(jobcomp_mysql_db);
 		jobcomp_mysql_db = NULL;
 	}
 	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif 
 }
 
 extern int slurm_jobcomp_set_location(char *location)
 {
-#ifdef HAVE_MYSQL
 	mysql_db_info_t *db_info = _mysql_jobcomp_create_db_info();
 	int rc = SLURM_SUCCESS;
 	char *db_name = NULL;
@@ -260,19 +249,19 @@ extern int slurm_jobcomp_set_location(char *location)
 		return SLURM_SUCCESS;
 	
 	if(!location)
-		db_name = DEFAULT_JOBCOMP_DB;
+		db_name = DEFAULT_JOB_COMP_DB;
 	else {
 		while(location[i]) {
 			if(location[i] == '.' || location[i] == '/') {
 				debug("%s doesn't look like a database "
 				      "name using %s",
-				      location, DEFAULT_JOBCOMP_DB);
+				      location, DEFAULT_JOB_COMP_DB);
 				break;
 			}
 			i++;
 		}
 		if(location[i]) 
-			db_name = DEFAULT_JOBCOMP_DB;
+			db_name = DEFAULT_JOB_COMP_DB;
 		else
 			db_name = location;
 	}
@@ -290,14 +279,10 @@ extern int slurm_jobcomp_set_location(char *location)
 	else
 		debug("Jobcomp database init failed");
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif 
 }
 
 extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 {
-#ifdef HAVE_MYSQL
 	int rc = SLURM_SUCCESS;
 	char *usr_str = NULL, *grp_str = NULL, lim_str[32];
 	char *connect_type = NULL, *reboot = NULL, *rotate = NULL,
@@ -340,9 +325,13 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 					    SELECT_PRINT_GEOMETRY);
 	start = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
 					 SELECT_PRINT_START);
+#ifdef HAVE_BG
 	blockid = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
 					   SELECT_PRINT_BG_ID);
-
+#else
+	blockid = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					   SELECT_PRINT_RESV_ID);
+#endif
 	query = xstrdup_printf(
 		"insert into %s (jobid, uid, user_name, gid, group_name, "
 		"name, state, proc_cnt, partition, timelimit, "
@@ -411,28 +400,17 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 	xfree(grp_str);
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif 
 }
 
 extern int slurm_jobcomp_get_errno(void)
 {
-#ifdef HAVE_MYSQL
 	return plugin_errno;
-#else
-	return SLURM_ERROR;
-#endif 
 }
 
 extern char *slurm_jobcomp_strerror(int errnum)
 {
-#ifdef HAVE_MYSQL
 	char *res = _lookup_slurm_api_errtab(errnum);
 	return (res ? res : strerror(errnum));
-#else
-	return NULL;
-#endif 
 }
 
 /* 
@@ -444,7 +422,6 @@ extern List slurm_jobcomp_get_jobs(acct_job_cond_t *job_cond)
 {
 	List job_list = NULL;
 				   
-#ifdef HAVE_MYSQL
 	if(!jobcomp_mysql_db || mysql_ping(jobcomp_mysql_db) != 0) {
 		char *loc = slurm_get_jobcomp_loc();
 		if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) {
@@ -455,7 +432,7 @@ extern List slurm_jobcomp_get_jobs(acct_job_cond_t *job_cond)
 	}
 
 	job_list = mysql_jobcomp_process_get_jobs(job_cond);	
-#endif 
+
 	return job_list;
 }
 
@@ -464,7 +441,6 @@ extern List slurm_jobcomp_get_jobs(acct_job_cond_t *job_cond)
  */
 extern int slurm_jobcomp_archive(acct_archive_cond_t *arch_cond)
 {
-#ifdef HAVE_MYSQL
 	if(!jobcomp_mysql_db || mysql_ping(jobcomp_mysql_db) != 0) {
 		char *loc = slurm_get_jobcomp_loc();
 		if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) {
@@ -475,6 +451,4 @@ extern int slurm_jobcomp_archive(acct_archive_cond_t *arch_cond)
 	}
 
 	return mysql_jobcomp_process_archive(arch_cond);
-#endif 
-	return SLURM_ERROR;
 }
diff --git a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
index 19ca5945d00482d21e4f2b615bab55411c7dab99..37b6ce03e8152e8d6ef6312ddd76bec7599fe1cb 100644
--- a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
+++ b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -45,7 +46,6 @@
 #include "src/common/xstring.h"
 #include "mysql_jobcomp_process.h"
 
-#ifdef HAVE_MYSQL
 static void _do_fdump(MYSQL_ROW row, int lc)
 {	
 	int i = 0;
@@ -209,5 +209,3 @@ extern int mysql_jobcomp_process_archive(acct_archive_cond_t *arch_cond)
 {
 	return SLURM_SUCCESS;
 }
-
-#endif	
diff --git a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h
index d5d578edd0ee74a6243dd789ff0f3065741f8686..dcc017a046ca3eec9ed982c78dee6a1d063015d6 100644
--- a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h
+++ b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.h
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -48,7 +49,6 @@
 #include "src/common/jobacct_common.h"
 #include "src/common/slurm_accounting_storage.h"
 
-#ifdef HAVE_MYSQL
 extern MYSQL *jobcomp_mysql_db;
 extern int jobcomp_db_init;
 
@@ -85,6 +85,5 @@ enum {
 extern List mysql_jobcomp_process_get_jobs(acct_job_cond_t *job_cond);
 
 extern int mysql_jobcomp_process_archive(acct_archive_cond_t *arch_cond);
-#endif
 
 #endif
diff --git a/src/plugins/jobcomp/none/Makefile.in b/src/plugins/jobcomp/none/Makefile.in
index 96986c544cd96738570f49bccf6d57917882d64c..914ccca0a6db8435528a8a32b5934f10cc3cb52e 100644
--- a/src/plugins/jobcomp/none/Makefile.in
+++ b/src/plugins/jobcomp/none/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/jobcomp/none/jobcomp_none.c b/src/plugins/jobcomp/none/jobcomp_none.c
index 9864098f6e7eaae80017eacc851c1875765f4f6a..304539726c7274e45df67b93d27c2cef9e322d61 100644
--- a/src/plugins/jobcomp/none/jobcomp_none.c
+++ b/src/plugins/jobcomp/none/jobcomp_none.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/jobcomp/pgsql/Makefile.am b/src/plugins/jobcomp/pgsql/Makefile.am
index ae90b1625a7588d03f02c1c399e8d48a32b04f3c..6c78796b9f997304e7a4146f4a9039897aad9683 100644
--- a/src/plugins/jobcomp/pgsql/Makefile.am
+++ b/src/plugins/jobcomp/pgsql/Makefile.am
@@ -7,6 +7,7 @@ PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
+if WITH_PGSQL
 pkglib_LTLIBRARIES = jobcomp_pgsql.la
 
 # Pgsql storage plugin.
@@ -17,3 +18,7 @@ jobcomp_pgsql_la_CFLAGS = $(PGSQL_CFLAGS)
 jobcomp_pgsql_la_LIBADD  = $(top_builddir)/src/database/libslurm_pgsql.la \
 	$(PGSQL_LIBS)
 jobcomp_pgsql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_pgsql.la
+else
+EXTRA_jobcomp_pgsql_la_SOURCES = jobcomp_pgsql.c \
+		pgsql_jobcomp_process.c pgsql_jobcomp_process.h
+endif
diff --git a/src/plugins/jobcomp/pgsql/Makefile.in b/src/plugins/jobcomp/pgsql/Makefile.in
index 5e80c261277e0b11d9799e104e717f86340a766a..22e3ac79a045dc84c632853412d97780521703e5 100644
--- a/src/plugins/jobcomp/pgsql/Makefile.in
+++ b/src/plugins/jobcomp/pgsql/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,12 +81,18 @@ am__installdirs = "$(DESTDIR)$(pkglibdir)"
 pkglibLTLIBRARIES_INSTALL = $(INSTALL)
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
 am__DEPENDENCIES_1 =
-am_jobcomp_pgsql_la_OBJECTS = jobcomp_pgsql_la-jobcomp_pgsql.lo \
-	jobcomp_pgsql_la-pgsql_jobcomp_process.lo
+am__jobcomp_pgsql_la_SOURCES_DIST = jobcomp_pgsql.c \
+	pgsql_jobcomp_process.c pgsql_jobcomp_process.h
+@WITH_PGSQL_TRUE@am_jobcomp_pgsql_la_OBJECTS =  \
+@WITH_PGSQL_TRUE@	jobcomp_pgsql_la-jobcomp_pgsql.lo \
+@WITH_PGSQL_TRUE@	jobcomp_pgsql_la-pgsql_jobcomp_process.lo
+am__EXTRA_jobcomp_pgsql_la_SOURCES_DIST = jobcomp_pgsql.c \
+	pgsql_jobcomp_process.c pgsql_jobcomp_process.h
 jobcomp_pgsql_la_OBJECTS = $(am_jobcomp_pgsql_la_OBJECTS)
 jobcomp_pgsql_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(jobcomp_pgsql_la_CFLAGS) \
 	$(CFLAGS) $(jobcomp_pgsql_la_LDFLAGS) $(LDFLAGS) -o $@
+@WITH_PGSQL_TRUE@am_jobcomp_pgsql_la_rpath = -rpath $(pkglibdir)
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -95,8 +105,10 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(jobcomp_pgsql_la_SOURCES)
-DIST_SOURCES = $(jobcomp_pgsql_la_SOURCES)
+SOURCES = $(jobcomp_pgsql_la_SOURCES) \
+	$(EXTRA_jobcomp_pgsql_la_SOURCES)
+DIST_SOURCES = $(am__jobcomp_pgsql_la_SOURCES_DIST) \
+	$(am__EXTRA_jobcomp_pgsql_la_SOURCES_DIST)
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -110,6 +122,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -272,18 +288,21 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
-pkglib_LTLIBRARIES = jobcomp_pgsql.la
+@WITH_PGSQL_TRUE@pkglib_LTLIBRARIES = jobcomp_pgsql.la
 
 # Pgsql storage plugin.
-jobcomp_pgsql_la_SOURCES = jobcomp_pgsql.c \
-			pgsql_jobcomp_process.c pgsql_jobcomp_process.h
+@WITH_PGSQL_TRUE@jobcomp_pgsql_la_SOURCES = jobcomp_pgsql.c \
+@WITH_PGSQL_TRUE@			pgsql_jobcomp_process.c pgsql_jobcomp_process.h
 
-jobcomp_pgsql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-jobcomp_pgsql_la_CFLAGS = $(PGSQL_CFLAGS)
-jobcomp_pgsql_la_LIBADD = $(top_builddir)/src/database/libslurm_pgsql.la \
-	$(PGSQL_LIBS)
+@WITH_PGSQL_TRUE@jobcomp_pgsql_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+@WITH_PGSQL_TRUE@jobcomp_pgsql_la_CFLAGS = $(PGSQL_CFLAGS)
+@WITH_PGSQL_TRUE@jobcomp_pgsql_la_LIBADD = $(top_builddir)/src/database/libslurm_pgsql.la \
+@WITH_PGSQL_TRUE@	$(PGSQL_LIBS)
+
+@WITH_PGSQL_TRUE@jobcomp_pgsql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_pgsql.la
+@WITH_PGSQL_FALSE@EXTRA_jobcomp_pgsql_la_SOURCES = jobcomp_pgsql.c \
+@WITH_PGSQL_FALSE@		pgsql_jobcomp_process.c pgsql_jobcomp_process.h
 
-jobcomp_pgsql_la_DEPENDENCIES = $(top_builddir)/src/database/libslurm_pgsql.la
 all: all-am
 
 .SUFFIXES:
@@ -345,7 +364,7 @@ clean-pkglibLTLIBRARIES:
 	  rm -f "$${dir}/so_locations"; \
 	done
 jobcomp_pgsql.la: $(jobcomp_pgsql_la_OBJECTS) $(jobcomp_pgsql_la_DEPENDENCIES) 
-	$(jobcomp_pgsql_la_LINK) -rpath $(pkglibdir) $(jobcomp_pgsql_la_OBJECTS) $(jobcomp_pgsql_la_LIBADD) $(LIBS)
+	$(jobcomp_pgsql_la_LINK) $(am_jobcomp_pgsql_la_rpath) $(jobcomp_pgsql_la_OBJECTS) $(jobcomp_pgsql_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
diff --git a/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c b/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c
index 979e75cf58ed42c2ea51414642ab0238746ad4f6..6ae020c28b9961b38442c7c7b9de048d6667515b 100644
--- a/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c
+++ b/src/plugins/jobcomp/pgsql/jobcomp_pgsql.c
@@ -4,11 +4,13 @@
  *  $Id: storage_pgsql.c 10893 2007-01-29 21:53:48Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -77,10 +79,6 @@ const char plugin_name[] = "Job completion POSTGRESQL plugin";
 const char plugin_type[] = "jobcomp/pgsql";
 const uint32_t plugin_version = 100;
 
-#ifdef HAVE_PGSQL
-
-#define DEFAULT_JOBCOMP_DB "slurm_jobcomp_db"
-
 PGconn *jobcomp_pgsql_db = NULL;
 
 char *jobcomp_table = "jobcomp_table";
@@ -133,7 +131,7 @@ static pgsql_db_info_t *_pgsql_jobcomp_create_db_info()
 	/* it turns out it is better if using defaults to let postgres
 	   handle them on it's own terms */
 	if(!db_info->port) {
-		db_info->port = 5432;
+		db_info->port = DEFAULT_PGSQL_PORT;
 		slurm_set_jobcomp_port(db_info->port);
 	}
 	db_info->host = slurm_get_jobcomp_host();
@@ -232,7 +230,6 @@ static char *_lookup_slurm_api_errtab(int errnum)
 	}
 	return res;
 }
-#endif
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -241,11 +238,7 @@ static char *_lookup_slurm_api_errtab(int errnum)
 extern int init ( void )
 {
 	static int first = 1;
-#ifndef HAVE_PGSQL
-	fatal("No Postgresql storage was found on the machine. "
-	      "Please check the config.log from the run of configure "
-	      "and run again.");	
-#endif
+
 	if(first) {
 		/* since this can be loaded from many different places
 		   only tell us once. */
@@ -260,20 +253,15 @@ extern int init ( void )
 
 extern int fini ( void )
 {
-#ifdef HAVE_PGSQL
 	if (jobcomp_pgsql_db) {
 		PQfinish(jobcomp_pgsql_db);
 		jobcomp_pgsql_db = NULL;
 	}
 	return SLURM_SUCCESS;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int slurm_jobcomp_set_location(char *location)
 {
-#ifdef HAVE_PGSQL
 	pgsql_db_info_t *db_info = _pgsql_jobcomp_create_db_info();
 	int rc = SLURM_SUCCESS;
 	char *db_name = NULL;
@@ -283,19 +271,19 @@ extern int slurm_jobcomp_set_location(char *location)
 		return SLURM_SUCCESS;
 	
 	if(!location)
-		db_name = DEFAULT_JOBCOMP_DB;
+		db_name = DEFAULT_JOB_COMP_DB;
 	else {
 		while(location[i]) {
 			if(location[i] == '.' || location[i] == '/') {
 				debug("%s doesn't look like a database "
 				      "name using %s",
-				      location, DEFAULT_JOBCOMP_DB);
+				      location, DEFAULT_JOB_COMP_DB);
 				break;
 			}
 			i++;
 		}
 		if(location[i]) 
-			db_name = DEFAULT_JOBCOMP_DB;
+			db_name = DEFAULT_JOB_COMP_DB;
 		else
 			db_name = location;
 	}
@@ -313,14 +301,10 @@ extern int slurm_jobcomp_set_location(char *location)
 	else
 		debug("Jobcomp database init failed");
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif
 }
 
 extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 {
-#ifdef HAVE_PGSQL
 	int rc = SLURM_SUCCESS;
 	char *usr_str = NULL, *grp_str = NULL, lim_str[32];
 	char *connect_type = NULL, *reboot = NULL, *rotate = NULL,
@@ -363,9 +347,13 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 					    SELECT_PRINT_GEOMETRY);
 	start = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
 					 SELECT_PRINT_START);
+#ifdef HAVE_BG
 	blockid = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
 					   SELECT_PRINT_BG_ID);
-
+#else
+	blockid = select_g_xstrdup_jobinfo(job_ptr->select_jobinfo,
+					   SELECT_PRINT_RESV_ID);
+#endif
 	query = xstrdup_printf(
 		"insert into %s (jobid, uid, user_name, gid, group_name, "
 		"name, state, proc_cnt, partition, timelimit, "
@@ -435,28 +423,17 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 	xfree(usr_str);
 
 	return rc;
-#else
-	return SLURM_ERROR;
-#endif 
 }
 
 extern int slurm_jobcomp_get_errno()
 {
-#ifdef HAVE_PGSQL
 	return plugin_errno;
-#else
-	return SLURM_ERROR;
-#endif 
 }
 
 extern char *slurm_jobcomp_strerror(int errnum)
 {
-#ifdef HAVE_PGSQL
 	char *res = _lookup_slurm_api_errtab(errnum);
 	return (res ? res : strerror(errnum));
-#else
-	return NULL;
-#endif 
 }
 
 /* 
@@ -468,7 +445,6 @@ extern List slurm_jobcomp_get_jobs(acct_job_cond_t *job_cond)
 {
 	List job_list = NULL;
 
-#ifdef HAVE_PGSQL
 	if(!jobcomp_pgsql_db || PQstatus(jobcomp_pgsql_db) != CONNECTION_OK) {
 		char *loc = slurm_get_jobcomp_loc();
 		if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) {
@@ -479,7 +455,7 @@ extern List slurm_jobcomp_get_jobs(acct_job_cond_t *job_cond)
 	}
 
 	job_list = pgsql_jobcomp_process_get_jobs(job_cond);	
-#endif 
+
 	return job_list;
 }
 
@@ -488,7 +464,6 @@ extern List slurm_jobcomp_get_jobs(acct_job_cond_t *job_cond)
  */
 extern int slurm_jobcomp_archive(acct_archive_cond_t *arch_cond)
 {
-#ifdef HAVE_PGSQL
 	if(!jobcomp_pgsql_db || PQstatus(jobcomp_pgsql_db) != CONNECTION_OK) {
 		char *loc = slurm_get_jobcomp_loc();
 		if(slurm_jobcomp_set_location(loc) == SLURM_ERROR) {
@@ -499,6 +474,4 @@ extern int slurm_jobcomp_archive(acct_archive_cond_t *arch_cond)
 	}
 
 	return pgsql_jobcomp_process_archive(arch_cond);
-#endif 
-	return SLURM_ERROR;
 }
diff --git a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c
index babb42c9811f73913b13c0134a3abddcda1a8f17..ef93a4f51db8c8e03a511ef3dec5d809357154e6 100644
--- a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c
+++ b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.c
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -45,7 +46,6 @@
 #include "src/common/xstring.h"
 #include "pgsql_jobcomp_process.h"
 
-#ifdef HAVE_PGSQL
 static void _do_fdump(PGresult *result, int lc)
 {
 	int i = 0;
@@ -224,5 +224,3 @@ extern int pgsql_jobcomp_process_archive(acct_archive_cond_t *arch_cond)
 {
 	return SLURM_SUCCESS;
 }
-
-#endif	
diff --git a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h
index ffa8b4c7002f199f2159617db06f2568e23ea6fc..124d32339b641e1ef707db4f7a3ccc3c6ab55e8f 100644
--- a/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h
+++ b/src/plugins/jobcomp/pgsql/pgsql_jobcomp_process.h
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -48,7 +49,6 @@
 #include "src/common/jobacct_common.h"
 #include "src/common/slurm_accounting_storage.h"
 
-#ifdef HAVE_PGSQL
 extern PGconn *jobcomp_pgsql_db;
 extern int jobcomp_db_init;
 
@@ -85,6 +85,5 @@ enum {
 extern List pgsql_jobcomp_process_get_jobs(acct_job_cond_t *job_cond);
 
 extern int pgsql_jobcomp_process_archive(acct_archive_cond_t *arch_cond);
-#endif
 
 #endif
diff --git a/src/plugins/jobcomp/script/Makefile.in b/src/plugins/jobcomp/script/Makefile.in
index abab766da32a8218d6ef594346e2bbed66d9309b..05c7e5f84d85b9e63fd4770ebfb4abf80645235a 100644
--- a/src/plugins/jobcomp/script/Makefile.in
+++ b/src/plugins/jobcomp/script/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/jobcomp/script/jobcomp_script.c b/src/plugins/jobcomp/script/jobcomp_script.c
index 3767c62738b71edab0258261bfdce85e9b104878..bf1fa21a1319792fd98fec612ea7a85df6fea374 100644
--- a/src/plugins/jobcomp/script/jobcomp_script.c
+++ b/src/plugins/jobcomp/script/jobcomp_script.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  jobcomp_script.c - Script running slurm job completion logging plugin.
- *  $Id: jobcomp_script.c 16035 2008-12-22 21:46:26Z da $
+ *  $Id: jobcomp_script.c 17214 2009-04-09 23:31:28Z jette $
  *****************************************************************************
  *  Produced at Center for High Performance Computing, North Dakota State
  *  University
  *  Written by Nathan Huff <nhuff@acm.org>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -34,6 +35,30 @@
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+ *****************************************************************************
+ *  Here is a list of the environment variables set
+ *
+ *  ACCOUNT		Account name
+ *  BATCH		"yes" if submitted via sbatch, "no" otherwise
+ *  END			Time of job termination, UTS
+ *  GID			Group ID of job owner
+ *  JOBID		SLURM Job ID
+ *  JOBNAME		Job name
+ *  JOBSTATE		Termination state of job (FIXME
+ *  NODECNT		Count of allocated nodes
+ *  NODES		List of allocated nodes
+ *  PARTITION		Partition name used to run job
+ *  PROCS		Count of allocated CPUs
+ *  START		Time of job start, UTS
+ *  SUBMIT		Time of job submission, UTS
+ *  UID			User ID of job owner
+ *  WORK_DIR		Job's working directory
+ *
+ *  BlueGene specific environment variables:
+ *  BLOCKID		Name of Block ID
+ *  CONNECT_TYPE	Connection type: small, torus or mesh
+ *  GEOMETRY		Requested geometry of the job, "#x#x#" where "#" 
+ *			represents the X, Y and Z dimension sizes
 \*****************************************************************************/
 
 #ifdef HAVE_CONFIG_H
@@ -159,13 +184,10 @@ struct jobcomp_info {
 	char *partition;
 	char *jobstate;
 	char *account;
+	char *work_dir;
 #ifdef HAVE_BG
 	char *connect_type;
-	char *reboot;
-	char *rotate;
-	char *maxprocs;
 	char *geometry;
-	char *block_start;
 	char *blockid;
 #endif
 };
@@ -198,19 +220,15 @@ static struct jobcomp_info * _jobcomp_info_create (struct job_record *job)
 	j->nprocs = job->total_procs;
 	j->nnodes = job->node_cnt;
 	j->account = job->account ? xstrdup (job->account) : NULL;
+	if (job->details && job->details->work_dir)
+		j->work_dir = xstrdup(job->details->work_dir);
+	else
+		j->work_dir = xstrdup("unknown");
 #ifdef HAVE_BG
 	j->connect_type = select_g_xstrdup_jobinfo(job->select_jobinfo,
 						   SELECT_PRINT_CONNECTION);
-	j->reboot = select_g_xstrdup_jobinfo(job->select_jobinfo,
-					     SELECT_PRINT_REBOOT);
-	j->rotate = select_g_xstrdup_jobinfo(job->select_jobinfo,
-					     SELECT_PRINT_ROTATE);
-	j->maxprocs = select_g_xstrdup_jobinfo(job->select_jobinfo,
-					       SELECT_PRINT_MAX_PROCS);
 	j->geometry = select_g_xstrdup_jobinfo(job->select_jobinfo,
 					       SELECT_PRINT_GEOMETRY);
-	j->block_start = select_g_xstrdup_jobinfo(job->select_jobinfo,
-						  SELECT_PRINT_START);
 	j->blockid = select_g_xstrdup_jobinfo(job->select_jobinfo,
 					      SELECT_PRINT_BG_ID);
 #endif
@@ -226,13 +244,10 @@ static void _jobcomp_info_destroy (struct jobcomp_info *j)
 	xfree (j->nodes);
 	xfree (j->jobstate);
 	xfree (j->account);
+	xfree (j->work_dir);
 #ifdef HAVE_BG
 	xfree (j->connect_type);
-	xfree (j->reboot);
-	xfree (j->rotate);
-	xfree (j->maxprocs);
 	xfree (j->geometry);
-	xfree (j->block_start);
 	xfree (j->blockid);
 #endif
 	xfree (j);
@@ -338,15 +353,12 @@ static char ** _create_environment (struct jobcomp_info *job)
 	_env_append (&env, "JOBNAME",   job->name);
 	_env_append (&env, "JOBSTATE",  job->jobstate);
 	_env_append (&env, "PARTITION", job->partition);
-	
+	_env_append (&env, "WORK_DIR",  job->work_dir);
+
 #ifdef HAVE_BG
+	_env_append (&env, "BLOCKID",      job->blockid);
 	_env_append (&env, "CONNECT_TYPE", job->connect_type);
-	_env_append (&env, "REBOOT",       job->reboot);
-	_env_append (&env, "ROTATE",       job->rotate);
-	_env_append (&env, "MAXPROCS",     job->maxprocs);
 	_env_append (&env, "GEOMETRY",     job->geometry);
-	_env_append (&env, "BLOCK_START",  job->block_start);
-	_env_append (&env, "BLOCKID",      job->blockid);
 #endif
 
 	if (job->limit == INFINITE)
diff --git a/src/plugins/mpi/Makefile.in b/src/plugins/mpi/Makefile.in
index e6e7b6a91fc137543872937f952451f515e63520..742968be4744d7d4219901a2a6034ed55178d03e 100644
--- a/src/plugins/mpi/Makefile.in
+++ b/src/plugins/mpi/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/mpi/lam/Makefile.in b/src/plugins/mpi/lam/Makefile.in
index 6fcaab3c657b58f99859ab0c343aebf4db0acd3e..968515f0982d002cafad5d9cf283623d97d693a6 100644
--- a/src/plugins/mpi/lam/Makefile.in
+++ b/src/plugins/mpi/lam/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/mpi/lam/lam.h b/src/plugins/mpi/lam/lam.h
index b09c0cbe8289c161971d602c6f0a92cfc8b28718..1c20e4af79095fa14245e3069f900b320939f28c 100644
--- a/src/plugins/mpi/lam/lam.h
+++ b/src/plugins/mpi/lam/lam.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/lam/mpi_lam.c b/src/plugins/mpi/lam/mpi_lam.c
index c1d90bbcf5fb6b4d769aaede5219a5afebd63b53..61d87ef329036f2f52cc050caf64b8efa4ca6dba 100644
--- a/src/plugins/mpi/lam/mpi_lam.c
+++ b/src/plugins/mpi/lam/mpi_lam.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/mpich1_p4/Makefile.in b/src/plugins/mpi/mpich1_p4/Makefile.in
index c4e36b27cecac64d0b56888f902f90c7391247b6..b4fd87090170c27e6c17526555d651540f92c86c 100644
--- a/src/plugins/mpi/mpich1_p4/Makefile.in
+++ b/src/plugins/mpi/mpich1_p4/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/mpi/mpich1_p4/mpich1_p4.c b/src/plugins/mpi/mpich1_p4/mpich1_p4.c
index 66303a9a2ebbbe1d6321438c4bd82f12ec5469ee..c730112a1672f98d4b58992465192b3f89db4bc8 100644
--- a/src/plugins/mpi/mpich1_p4/mpich1_p4.c
+++ b/src/plugins/mpi/mpich1_p4/mpich1_p4.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/mpich1_shmem/Makefile.in b/src/plugins/mpi/mpich1_shmem/Makefile.in
index d4d9c52a866d2eb761bd8d33d9b4fed28d2ac70b..b797b202be42ec85df4e98f2667841b5919795c4 100644
--- a/src/plugins/mpi/mpich1_shmem/Makefile.in
+++ b/src/plugins/mpi/mpich1_shmem/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
index 2a848feacecef3e005b12fc7eb5c457972b77496..e853b946b6b7c2d7133266112cc937acdb082463 100644
--- a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
+++ b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/mpichgm/Makefile.in b/src/plugins/mpi/mpichgm/Makefile.in
index cc2234e212e5e84cc134dc17d1b97f23ec94681d..6d618f3764bf95cad260f6aa0cb85c9e922cdfc7 100644
--- a/src/plugins/mpi/mpichgm/Makefile.in
+++ b/src/plugins/mpi/mpichgm/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/mpi/mpichgm/mpi_mpichgm.c b/src/plugins/mpi/mpichgm/mpi_mpichgm.c
index 3524f351b9aa73465b6d9e9135a1ddeb8f8d9b5d..581da3578941e0f4fd8bdff328721262a84944d6 100644
--- a/src/plugins/mpi/mpichgm/mpi_mpichgm.c
+++ b/src/plugins/mpi/mpichgm/mpi_mpichgm.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -97,14 +98,14 @@ int p_mpi_hook_slurmstepd_task(const mpi_plugin_task_info_t *job,
 	env_array_overwrite_fmt(env, "GMPI_SLAVE",  "%s", addrbuf);
 	env_array_overwrite_fmt(env, "GMPI_ID",  "%u", job->gtaskid);
 	if (!getenv("GMPI_RECV")) {
-		env_array_overwrite_fmt(env, "GMPI_RECV",  "%u", "hybrid");
+		env_array_overwrite_fmt(env, "GMPI_RECV",  "%s", "hybrid");
 	}
 
 	env_array_overwrite_fmt(env, "MXMPI_MASTER", "%s", addr);
 	env_array_overwrite_fmt(env, "MXMPI_ID", "%u", job->gtaskid);
 	env_array_overwrite_fmt(env, "MXMPI_SLAVE", "%s", addrbuf);
 	if (!getenv("MXMPI_RECV")) {
-		env_array_overwrite_fmt(env, "MXMPI_RECV",  "%u", "hybrid");
+		env_array_overwrite_fmt(env, "MXMPI_RECV",  "%s", "hybrid");
 	}
 	debug2("init for mpi rank %u\n", job->gtaskid);
 	
diff --git a/src/plugins/mpi/mpichgm/mpichgm.c b/src/plugins/mpi/mpichgm/mpichgm.c
index c42295e7c95d5a43d18d0571a588e6fc803f7630..05295c74e583540f6aa080d545ec024f8f8e7c50 100644
--- a/src/plugins/mpi/mpichgm/mpichgm.c
+++ b/src/plugins/mpi/mpichgm/mpichgm.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Takao Hatazaki <takao.hatazaki@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -226,7 +227,7 @@ static int _gmpi_establish_map(gmpi_state_t *st)
 		if (setsockopt(newfd, SOL_SOCKET, SO_REUSEADDR,
 			       (void *)&j, sizeof(j)))
 			error("setsockopt in GMPI master: %m");
-		bzero(&addr, sizeof(addr));
+		memset(&addr, 0, sizeof(addr));
 		addr.sin_family = AF_INET;
 		addr.sin_addr.s_addr = htonl(iaddrs[i]);
 		addr.sin_port = htons(dp->remote_port);
diff --git a/src/plugins/mpi/mpichgm/mpichgm.h b/src/plugins/mpi/mpichgm/mpichgm.h
index f7180de8dc6bb2af2a45b4338f9cdaf54ffa2304..f6a1d2e72652df912898e1b9f8d85a9be9a0a8b1 100644
--- a/src/plugins/mpi/mpichgm/mpichgm.h
+++ b/src/plugins/mpi/mpichgm/mpichgm.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/mpichmx/Makefile.in b/src/plugins/mpi/mpichmx/Makefile.in
index 944c62ff413eaffd631f14915078ddcc42f3ebc9..2f465710e28ce834593dc4004017aa94fd9931bb 100644
--- a/src/plugins/mpi/mpichmx/Makefile.in
+++ b/src/plugins/mpi/mpichmx/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/mpi/mpichmx/mpi_mpichmx.c b/src/plugins/mpi/mpichmx/mpi_mpichmx.c
index 77b67a303e7783b90b724c97f1bc9f9cb3a71ce8..780f25a2817e13f8747578a2786435e604383e0d 100644
--- a/src/plugins/mpi/mpichmx/mpi_mpichmx.c
+++ b/src/plugins/mpi/mpichmx/mpi_mpichmx.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -96,14 +97,14 @@ int p_mpi_hook_slurmstepd_task(const mpi_plugin_task_info_t *job,
 	env_array_overwrite_fmt(env, "GMPI_SLAVE",  "%s", addrbuf);
 	env_array_overwrite_fmt(env, "GMPI_ID",  "%u", job->gtaskid);
 	if (!getenv("GMPI_RECV")) {
-		env_array_overwrite_fmt(env, "GMPI_RECV",  "%u", "hybrid");
+		env_array_overwrite_fmt(env, "GMPI_RECV",  "%s", "hybrid");
 	}
 
 	env_array_overwrite_fmt(env, "MXMPI_MASTER", "%s", addr);
 	env_array_overwrite_fmt(env, "MXMPI_ID", "%u", job->gtaskid);
 	env_array_overwrite_fmt(env, "MXMPI_SLAVE", "%s", addrbuf);
 	if (!getenv("MXMPI_RECV")) {
-		env_array_overwrite_fmt(env, "MXMPI_RECV",  "%u", "hybrid");
+		env_array_overwrite_fmt(env, "MXMPI_RECV",  "%s", "hybrid");
 	}
 	debug2("init for mpi rank %u\n", job->gtaskid);
 	
diff --git a/src/plugins/mpi/mpichmx/mpichmx.c b/src/plugins/mpi/mpichmx/mpichmx.c
index 19998ab745e1647227f37596a32948b8d4d02c89..17c3c89c3ceb77e4c8ed6dcd11766fcfc052c933 100644
--- a/src/plugins/mpi/mpichmx/mpichmx.c
+++ b/src/plugins/mpi/mpichmx/mpichmx.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Takao Hatazaki <takao.hatazaki@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -229,7 +230,7 @@ static int _gmpi_establish_map(gmpi_state_t *st)
 		if (setsockopt(newfd, SOL_SOCKET, SO_REUSEADDR,
 			       (void *)&j, sizeof(j)))
 			error("setsockopt in GMPI master: %m");
-		bzero(&addr, sizeof(addr));
+		memset(&addr, 0, sizeof(addr));
 		addr.sin_family = AF_INET;
 		addr.sin_addr.s_addr = htonl(iaddrs[i]);
 		addr.sin_port = htons(dp->remote_port);
diff --git a/src/plugins/mpi/mpichmx/mpichmx.h b/src/plugins/mpi/mpichmx/mpichmx.h
index 6f71bb29b2181d3eec20f468b0b4d80d34e78738..8cb2b879a775de0ce7f182cb98d69196d2db2862 100644
--- a/src/plugins/mpi/mpichmx/mpichmx.h
+++ b/src/plugins/mpi/mpichmx/mpichmx.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/mvapich/Makefile.in b/src/plugins/mpi/mvapich/Makefile.in
index 8da11d77fd5ebbbf0bdbe191daa67c0b325e1c1f..8dcb0113584c7904ce786da07d6fd0ae98bacf50 100644
--- a/src/plugins/mpi/mvapich/Makefile.in
+++ b/src/plugins/mpi/mvapich/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/mpi/mvapich/mpi_mvapich.c b/src/plugins/mpi/mvapich/mpi_mvapich.c
index 01e51635440c8187bf3b0694d3293e9bd8ffc454..0807052c825bbd04ab0216e5dd6b6b46bad5abea 100644
--- a/src/plugins/mpi/mvapich/mpi_mvapich.c
+++ b/src/plugins/mpi/mvapich/mpi_mvapich.c
@@ -3,12 +3,14 @@
  **  type mpi. 
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/mvapich/mvapich.c b/src/plugins/mpi/mvapich/mvapich.c
index 5e9c50a92d3e0c21c8b45464a750c17385df5a62..394b86369e3ae4e2d3b9a2b6bb195200765719e9 100644
--- a/src/plugins/mpi/mvapich/mvapich.c
+++ b/src/plugins/mpi/mvapich/mvapich.c
@@ -2,12 +2,13 @@
  *  mvapich.c - srun support for MPICH-IB (MVAPICH 0.9.4 and 0.9.5,7,8)
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).  
- *
- *  LLNL-CODE-402394.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/mvapich/mvapich.h b/src/plugins/mpi/mvapich/mvapich.h
index c4708a1762d44f044c682fd3a7153ba50e1c919c..5caf3891ee30d274fbed88e42135f4b87e70a06d 100644
--- a/src/plugins/mpi/mvapich/mvapich.h
+++ b/src/plugins/mpi/mvapich/mvapich.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/none/Makefile.in b/src/plugins/mpi/none/Makefile.in
index 20c19cf46c1ac8e1b3185b5de8e4352fa8dc1210..7614ed50761e182c9eb0e5f1e65db7f7c2756f8d 100644
--- a/src/plugins/mpi/none/Makefile.in
+++ b/src/plugins/mpi/none/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/mpi/none/mpi_none.c b/src/plugins/mpi/none/mpi_none.c
index b331e5f77a5334b446d35bd0fadd1a36d0fe42d3..8ed26b3670a9f26facbebfccc17d536f854407d5 100644
--- a/src/plugins/mpi/none/mpi_none.c
+++ b/src/plugins/mpi/none/mpi_none.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/mpi/openmpi/Makefile.in b/src/plugins/mpi/openmpi/Makefile.in
index 4069688ade46e3bbef44e7034453c6d6976099b3..6c9d7efb88d02471304c28dfa2cb04c456e5210f 100644
--- a/src/plugins/mpi/openmpi/Makefile.in
+++ b/src/plugins/mpi/openmpi/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/mpi/openmpi/mpi_openmpi.c b/src/plugins/mpi/openmpi/mpi_openmpi.c
index 543b39a1e6cb86684d0717782c854aa96c1bcb2a..4ac55561b70af2f879c80bcf7d4d3858cfac5de5 100644
--- a/src/plugins/mpi/openmpi/mpi_openmpi.c
+++ b/src/plugins/mpi/openmpi/mpi_openmpi.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/priority/Makefile.am b/src/plugins/priority/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..0b4761ffbaad3e2f0d99a6a11b53a75498015b9f
--- /dev/null
+++ b/src/plugins/priority/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = basic multifactor
diff --git a/src/plugins/priority/Makefile.in b/src/plugins/priority/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..11dfc0071876b2e8486276c4ed46e7b255abd88e
--- /dev/null
+++ b/src/plugins/priority/Makefile.in
@@ -0,0 +1,574 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/priority
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
+	html-recursive info-recursive install-data-recursive \
+	install-dvi-recursive install-exec-recursive \
+	install-html-recursive install-info-recursive \
+	install-pdf-recursive install-ps-recursive install-recursive \
+	installcheck-recursive installdirs-recursive pdf-recursive \
+	ps-recursive uninstall-recursive
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+SUBDIRS = basic multifactor
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu  src/plugins/priority/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu  src/plugins/priority/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+#     (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+$(RECURSIVE_TARGETS):
+	@failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
+$(RECURSIVE_CLEAN_TARGETS):
+	@failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
+	dot_seen=no; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	rev=''; for subdir in $$list; do \
+	  if test "$$subdir" = "."; then :; else \
+	    rev="$$subdir $$rev"; \
+	  fi; \
+	done; \
+	rev="$$rev ."; \
+	target=`echo $@ | sed s/-recursive//`; \
+	for subdir in $$rev; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done && test -z "$$fail"
+tags-recursive:
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
+	done
+ctags-recursive:
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
+	done
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS: tags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS: ctags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+	list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test -d "$(distdir)/$$subdir" \
+	    || $(MKDIR_P) "$(distdir)/$$subdir" \
+	    || exit 1; \
+	    distdir=`$(am__cd) $(distdir) && pwd`; \
+	    top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
+	    (cd $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$top_distdir" \
+	        distdir="$$distdir/$$subdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-recursive
+all-am: Makefile
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-recursive
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-info: install-info-recursive
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-ps: install-ps-recursive
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \
+	install-strip
+
+.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
+	all all-am check check-am clean clean-generic clean-libtool \
+	ctags ctags-recursive distclean distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs installdirs-am maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \
+	uninstall uninstall-am
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/priority/basic/Makefile.am b/src/plugins/priority/basic/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..48a2a7b58644e3c61a6791d9a8f53e356c98d86b
--- /dev/null
+++ b/src/plugins/priority/basic/Makefile.am
@@ -0,0 +1,13 @@
+# Makefile for priority/basic plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = priority_basic.la
+
+# basic priority logging plugin.
+priority_basic_la_SOURCES = priority_basic.c
+priority_basic_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/plugins/priority/basic/Makefile.in b/src/plugins/priority/basic/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..1390e1682cde5d4d662d59b7a26f11886b4100fe
--- /dev/null
+++ b/src/plugins/priority/basic/Makefile.in
@@ -0,0 +1,566 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for priority/basic plugin
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/priority/basic
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+pkglibLTLIBRARIES_INSTALL = $(INSTALL)
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+priority_basic_la_LIBADD =
+am_priority_basic_la_OBJECTS = priority_basic.lo
+priority_basic_la_OBJECTS = $(am_priority_basic_la_OBJECTS)
+priority_basic_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(priority_basic_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(priority_basic_la_SOURCES)
+DIST_SOURCES = $(priority_basic_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = priority_basic.la
+
+# basic priority logging plugin.
+priority_basic_la_SOURCES = priority_basic.c
+priority_basic_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/plugins/priority/basic/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  src/plugins/priority/basic/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  if test -f $$p; then \
+	    f=$(am__strip_dir) \
+	    echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	    $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \
+	  else :; fi; \
+	done
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  p=$(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+priority_basic.la: $(priority_basic_la_OBJECTS) $(priority_basic_la_DEPENDENCIES) 
+	$(priority_basic_la_LINK) -rpath $(pkglibdir) $(priority_basic_la_OBJECTS) $(priority_basic_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/priority_basic.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/priority/basic/priority_basic.c b/src/plugins/priority/basic/priority_basic.c
new file mode 100644
index 0000000000000000000000000000000000000000..93cf56837a47db41b8df1691675dff91d7537908
--- /dev/null
+++ b/src/plugins/priority/basic/priority_basic.c
@@ -0,0 +1,146 @@
+/*****************************************************************************\
+ *  priority_basic.c - NO-OP slurm priority plugin.
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#if HAVE_STDINT_H
+#  include <stdint.h>
+#endif
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>
+#endif
+
+#include <stdio.h>
+#include <slurm/slurm_errno.h>
+
+#include "src/common/slurm_priority.h"
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *	<application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "jobcomp" for SLURM job completion logging) and <method>
+ * is a description of how this plugin satisfies that application.  SLURM will
+ * only load job completion logging plugins if the plugin_type string has a 
+ * prefix of "jobcomp/".
+ *
+ * plugin_version - an unsigned 32-bit integer giving the version number
+ * of the plugin.  If major and minor revisions are desired, the major
+ * version number may be multiplied by a suitable magnitude constant such
+ * as 100 or 1000.  Various SLURM versions will likely require a certain
+ * minimum versions for their plugins as the job completion logging API 
+ * matures.
+ */
+const char plugin_name[]       	= "Priority BASIC plugin";
+const char plugin_type[]       	= "priority/basic";
+const uint32_t plugin_version	= 100;
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called.  Put global initialization here.
+ */
+int init ( void )
+{
+	verbose("%s loaded", plugin_name);
+	return SLURM_SUCCESS;
+}
+
+int fini ( void )
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * The remainder of this file implements the standard SLURM priority API.
+ */
+
+extern uint32_t priority_p_set(uint32_t last_prio, struct job_record *job_ptr)
+{
+	uint32_t new_prio = 1;
+
+	if(job_ptr->direct_set_prio)
+		return job_ptr->priority;
+
+	if(last_prio >= 2)
+		new_prio = (last_prio - 1);
+
+	if(job_ptr->details)
+		new_prio -= (job_ptr->details->nice - NICE_OFFSET);
+
+	if(new_prio < 1)
+		new_prio = 1;
+
+	return new_prio;
+}
+
+extern void priority_p_reconfig()
+{
+	
+	return;
+}
+
+extern int priority_p_set_max_cluster_usage(uint32_t procs, uint32_t half_life) 
+{
+	return SLURM_SUCCESS;
+}
+
+extern void priority_p_set_assoc_usage(acct_association_rec_t *assoc)
+{
+	return;
+}
+
+extern List priority_p_get_priority_factors_list(
+	priority_factors_request_msg_t *req_msg)
+{
+	return(list_create(NULL));
+}
diff --git a/src/plugins/priority/multifactor/Makefile.am b/src/plugins/priority/multifactor/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..1fd4972fe633cfb5816741ea78dbb34a0c6b4003
--- /dev/null
+++ b/src/plugins/priority/multifactor/Makefile.am
@@ -0,0 +1,14 @@
+# Makefile for priority/multifactor plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = priority_multifactor.la
+
+# Null priority logging plugin.
+priority_multifactor_la_SOURCES = priority_multifactor.c
+priority_multifactor_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+priority_multifactor_la_LIBADD  = -lm
diff --git a/src/plugins/priority/multifactor/Makefile.in b/src/plugins/priority/multifactor/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..10cb10f68821ef6733f2af3b6a509d714201cc88
--- /dev/null
+++ b/src/plugins/priority/multifactor/Makefile.in
@@ -0,0 +1,568 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for priority/multifactor plugin
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/priority/multifactor
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+pkglibLTLIBRARIES_INSTALL = $(INSTALL)
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+priority_multifactor_la_DEPENDENCIES =
+am_priority_multifactor_la_OBJECTS = priority_multifactor.lo
+priority_multifactor_la_OBJECTS =  \
+	$(am_priority_multifactor_la_OBJECTS)
+priority_multifactor_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(priority_multifactor_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(priority_multifactor_la_SOURCES)
+DIST_SOURCES = $(priority_multifactor_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = priority_multifactor.la
+
+# Null priority logging plugin.
+priority_multifactor_la_SOURCES = priority_multifactor.c
+priority_multifactor_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+priority_multifactor_la_LIBADD = -lm
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/plugins/priority/multifactor/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  src/plugins/priority/multifactor/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  if test -f $$p; then \
+	    f=$(am__strip_dir) \
+	    echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	    $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \
+	  else :; fi; \
+	done
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  p=$(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+priority_multifactor.la: $(priority_multifactor_la_OBJECTS) $(priority_multifactor_la_DEPENDENCIES) 
+	$(priority_multifactor_la_LINK) -rpath $(pkglibdir) $(priority_multifactor_la_OBJECTS) $(priority_multifactor_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/priority_multifactor.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/priority/multifactor/priority_multifactor.c b/src/plugins/priority/multifactor/priority_multifactor.c
new file mode 100644
index 0000000000000000000000000000000000000000..980b6eae233293b42959605cd1320d3ae68740c7
--- /dev/null
+++ b/src/plugins/priority/multifactor/priority_multifactor.c
@@ -0,0 +1,1140 @@
+/*****************************************************************************\
+ *  priority_multifactor.c - slurm multifactor priority plugin.
+ *****************************************************************************
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#if HAVE_STDINT_H
+#  include <stdint.h>
+#endif
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>
+#endif
+#ifdef WITH_PTHREADS
+#  include <pthread.h>
+#endif				/* WITH_PTHREADS */
+
+#include <sys/stat.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <slurm/slurm_errno.h>
+#include <math.h>
+
+#include "src/common/slurm_priority.h"
+#include "src/common/xstring.h"
+#include "src/common/assoc_mgr.h"
+#include "src/common/parse_time.h"
+
+#include "src/slurmctld/locks.h"
+
+#define DECAY_INTERVAL	300 /* sleep for this many seconds */
+#define SECS_PER_DAY	(24 * 60 * 60)
+#define SECS_PER_WEEK	(7 * 24 * 60 * 60)
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *	<application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "jobcomp" for SLURM job completion logging) and <method>
+ * is a description of how this plugin satisfies that application.  SLURM will
+ * only load job completion logging plugins if the plugin_type string has a 
+ * prefix of "jobcomp/".
+ *
+ * plugin_version - an unsigned 32-bit integer giving the version number
+ * of the plugin.  If major and minor revisions are desired, the major
+ * version number may be multiplied by a suitable magnitude constant such
+ * as 100 or 1000.  Various SLURM versions will likely require a certain
+ * minimum versions for their plugins as the job completion logging API 
+ * matures.
+ */
+const char plugin_name[]       	= "Priority MULTIFACTOR plugin";
+const char plugin_type[]       	= "priority/multifactor";
+const uint32_t plugin_version	= 100;
+
+static pthread_t decay_handler_thread;
+static pthread_t cleanup_handler_thread;
+static pthread_mutex_t decay_lock = PTHREAD_MUTEX_INITIALIZER;
+static bool running_decay = 0, reconfig = 0, calc_fairshare = 1;
+static bool favor_small; /* favor small jobs over large */
+static uint32_t max_age; /* time when not to add any more
+			  * priority to a job if reached */
+static uint32_t weight_age; /* weight for age factor */
+static uint32_t weight_fs; /* weight for Fairshare factor */
+static uint32_t weight_js; /* weight for Job Size factor */
+static uint32_t weight_part; /* weight for Partition factor */
+static uint32_t weight_qos; /* weight for QOS factor */
+
+extern int priority_p_set_max_cluster_usage(uint32_t procs, uint32_t half_life);
+extern void priority_p_set_assoc_usage(acct_association_rec_t *assoc);
+
+/*
+ * apply decay factor to all associations usage_raw
+ * IN: decay_factor - decay to be applied to each associations' used
+ * shares.  This should already be modified with the amount of delta
+ * time from last application..
+ * RET: SLURM_SUCCESS on SUCCESS, SLURM_ERROR else.
+ */
+static int _apply_decay(double decay_factor)
+{
+	ListIterator itr = NULL;
+	acct_association_rec_t *assoc = NULL;
+	acct_qos_rec_t *qos = NULL;
+
+	/* continue if decay_factor is 0 or 1 since that doesn't help
+	   us at all. 1 means no decay and 0 will just zero
+	   everything out so don't waste time doing it */
+	if(!decay_factor)
+		return SLURM_ERROR;
+	else if(!calc_fairshare)
+		return SLURM_SUCCESS;
+
+	xassert(assoc_mgr_association_list);
+
+	slurm_mutex_lock(&assoc_mgr_association_lock);
+	itr = list_iterator_create(assoc_mgr_association_list);
+	while((assoc = list_next(itr))) {
+		if (assoc == assoc_mgr_root_assoc)
+			continue;
+		assoc->usage_raw *= decay_factor;
+		assoc->grp_used_wall *= decay_factor;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&assoc_mgr_association_lock);
+
+	slurm_mutex_lock(&assoc_mgr_qos_lock);
+	itr = list_iterator_create(assoc_mgr_qos_list);
+	while((qos = list_next(itr))) {
+		qos->usage_raw *= decay_factor;
+		qos->grp_used_wall *= decay_factor;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&assoc_mgr_qos_lock);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * reset usage_raw, and grp_used_cpu_mins on all associations 
+ * This should be called every PriorityUsageResetPeriod
+ * RET: SLURM_SUCCESS on SUCCESS, SLURM_ERROR else.
+ */
+static int _reset_usage()
+{
+	ListIterator itr = NULL;
+	acct_association_rec_t *assoc = NULL;
+	acct_qos_rec_t *qos = NULL;
+
+	if(!calc_fairshare)
+		return SLURM_SUCCESS;
+
+	xassert(assoc_mgr_association_list);
+
+	slurm_mutex_lock(&assoc_mgr_association_lock);
+	itr = list_iterator_create(assoc_mgr_association_list);
+	while((assoc = list_next(itr))) {
+		if (assoc == assoc_mgr_root_assoc)
+			continue;
+		assoc->usage_raw = 0;
+		assoc->grp_used_wall = 0;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&assoc_mgr_association_lock);
+
+	slurm_mutex_lock(&assoc_mgr_qos_lock);
+	itr = list_iterator_create(assoc_mgr_qos_list);
+	while((qos = list_next(itr))) {
+		qos->usage_raw = 0;
+		qos->grp_used_wall = 0;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&assoc_mgr_qos_lock);
+
+	return SLURM_SUCCESS;
+}
+
+static void _read_last_decay_ran(time_t *last_ran, time_t *last_reset)
+{
+	int data_allocated, data_read = 0;
+	uint32_t data_size = 0;
+	int state_fd;
+	char *data = NULL, *state_file;
+	Buf buffer;
+
+	xassert(last_ran);
+	xassert(last_reset);
+
+	(*last_ran) = 0;
+	(*last_reset) = 0;
+
+	/* read the file */
+	state_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(state_file, "/priority_last_decay_ran");
+	lock_state_files();
+	state_fd = open(state_file, O_RDONLY);
+	if (state_fd < 0) {
+		info("No last decay (%s) to recover", state_file);
+		unlock_state_files();
+		return;
+	} else {
+		data_allocated = BUF_SIZE;
+		data = xmalloc(data_allocated);
+		while (1) {
+			data_read = read(state_fd, &data[data_size],
+					 BUF_SIZE);
+			if (data_read < 0) {
+				if (errno == EINTR)
+					continue;
+				else {
+					error("Read error on %s: %m", 
+					      state_file);
+					break;
+				}
+			} else if (data_read == 0)	/* eof */
+				break;
+			data_size      += data_read;
+			data_allocated += data_read;
+			xrealloc(data, data_allocated);
+		}
+		close(state_fd);
+	}
+	xfree(state_file);
+	unlock_state_files();
+
+	buffer = create_buf(data, data_size);
+	safe_unpack_time(last_ran, buffer);
+	safe_unpack_time(last_reset, buffer);
+	free_buf(buffer);
+	debug5("Last ran decay on jobs at %d", last_ran);
+
+	return;
+
+unpack_error:
+	error("Incomplete priority last decay file returning");
+	free_buf(buffer);
+	return;
+
+}
+
+static int _write_last_decay_ran(time_t last_ran, time_t last_reset)
+{
+	/* Save high-water mark to avoid buffer growth with copies */
+	static int high_buffer_size = BUF_SIZE;
+	int error_code = SLURM_SUCCESS;
+	int state_fd;
+	char *old_file, *new_file, *state_file;
+	Buf buffer = init_buf(high_buffer_size);
+
+	pack_time(last_ran, buffer);
+	pack_time(last_reset, buffer);
+
+	/* read the file */
+	old_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(old_file, "/priority_last_decay_ran.old");
+	state_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(state_file, "/priority_last_decay_ran");
+	new_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(new_file, "/priority_last_decay_ran.new");
+
+	lock_state_files();
+	state_fd = creat(new_file, 0600);
+	if (state_fd < 0) {
+		error("Can't save decay state, create file %s error %m",
+		      new_file);
+		error_code = errno;
+	} else {
+		int pos = 0, nwrite = get_buf_offset(buffer), amount;
+		char *data = (char *)get_buf_data(buffer);
+		high_buffer_size = MAX(nwrite, high_buffer_size);
+		while (nwrite > 0) {
+			amount = write(state_fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				error("Error writing file %s, %m", new_file);
+				error_code = errno;
+				break;
+			}
+			nwrite -= amount;
+			pos    += amount;
+		}
+		fsync(state_fd);
+		close(state_fd);
+	}
+
+	if (error_code != SLURM_SUCCESS)
+		(void) unlink(new_file);
+	else {			/* file shuffle */
+		(void) unlink(old_file);
+		(void) link(state_file, old_file);
+		(void) unlink(state_file);
+		(void) link(new_file, state_file);
+		(void) unlink(new_file);
+	}
+	xfree(old_file);
+	xfree(state_file);
+	xfree(new_file);
+
+	unlock_state_files();
+	debug5("done writing time %d", last_ran);
+	free_buf(buffer);
+
+	return error_code;
+}
+
+/* This should initially get the childern list from
+ * assoc_mgr_root_assoc.  Since our algorythm goes from top down we
+ * calculate all the non-user associations now.  When a user submits a
+ * job, that norm_fairshare is calculated.  Here we will set the
+ * usage_efctv to NO_VAL for users to not have to calculate a bunch
+ * of things that will never be used. 
+ *
+ * NOTE: acct_mgr_association_lock must be locked before this is called.
+ */
+static int _set_children_usage_efctv(List childern_list)
+{
+	acct_association_rec_t *assoc = NULL;
+	ListIterator itr = NULL;
+
+	if(!childern_list || !list_count(childern_list)) 
+		return SLURM_SUCCESS;
+
+	itr = list_iterator_create(childern_list);
+	while((assoc = list_next(itr))) {
+		if(assoc->user) {
+			assoc->usage_efctv = (long double)NO_VAL;
+			continue;
+		}
+		priority_p_set_assoc_usage(assoc);
+		_set_children_usage_efctv(assoc->childern_list);
+	}
+	list_iterator_destroy(itr);
+	return SLURM_SUCCESS;
+}
+
+/* job_ptr should already have the partition priority and such added
+ * here before had we will be adding to it
+ */
+static double _get_fairshare_priority( struct job_record *job_ptr)
+{
+	acct_association_rec_t *assoc =
+		(acct_association_rec_t *)job_ptr->assoc_ptr;
+	double priority_fs = 0.0;
+
+	if(!calc_fairshare)
+		return 0;
+
+	if(!assoc) {
+		error("Job %u has no association.  Unable to "
+		      "compute fairshare.");
+		return 0;
+	}
+
+	slurm_mutex_lock(&assoc_mgr_association_lock);
+	if(assoc->usage_efctv == (long double)NO_VAL)
+		priority_p_set_assoc_usage(assoc);
+
+	// Priority is 0 -> 1
+	priority_fs =
+		(assoc->shares_norm - (double)assoc->usage_efctv + 1.0) / 2.0;
+	debug4("Fairshare priority for user %s in acct %s"
+	       "((%f - %Lf) + 1) / 2 = %f",
+	       assoc->user, assoc->acct, assoc->shares_norm,
+	       assoc->usage_efctv, priority_fs);
+
+	slurm_mutex_unlock(&assoc_mgr_association_lock);
+
+	debug3("job %u has a fairshare priority of %f",
+	      job_ptr->job_id, priority_fs);
+
+	return priority_fs;
+}
+
+static void _get_priority_factors(time_t start_time, struct job_record *job_ptr,
+				  priority_factors_object_t* factors,
+				  bool status_only)
+{
+	acct_qos_rec_t *qos_ptr = NULL;
+
+	xassert(factors);
+	xassert(job_ptr);
+
+	qos_ptr = (acct_qos_rec_t *)job_ptr->qos_ptr;
+
+	memset(factors, 0, sizeof(priority_factors_object_t));
+
+	if(weight_age) {
+		uint32_t diff = start_time - job_ptr->details->begin_time;
+		if(job_ptr->details->begin_time) {
+			if(diff < max_age)
+				factors->priority_age =
+					(double)diff / (double)max_age;
+			else
+				factors->priority_age = 1.0;
+		}
+	}
+
+	if(job_ptr->assoc_ptr && weight_fs) {
+		if (status_only)
+			factors->priority_fs = job_ptr->priority_fs;
+		else {
+			factors->priority_fs = _get_fairshare_priority(job_ptr);
+			job_ptr->priority_fs = factors->priority_fs;
+		}
+	}
+
+	if(weight_js) {
+		/* FIXME: This will not work correctly when
+		 * the job is requesting smaller than 1 node.
+		 * We need a way to figure out how to look at
+		 * cpus requested here for those situations.  This can
+		 * probably be done with the num_procs, but
+		 * that isn't always used.  This is usually
+		 * set on bluegene systems, which is where
+		 * this problem arose.  The code below was
+		 * tested on a bluegene system, seemed to
+		 * work, but isn't probably that generic.
+		 * Also the variable total_cpus doesn't exist
+		 * yet so that would need to be defined.
+		 */
+		
+		if(favor_small) {
+			factors->priority_js = (double)(node_record_count
+					   - job_ptr->details->min_nodes)
+				/ (double)node_record_count;
+/* 			if(job_ptr->num_procs && job_ptr->num_procs != NO_VAL) { */
+/* 				factors->priority_js +=  */
+/* 					(double)(total_cpus - job_ptr->num_procs) */
+/* 					/ (double)total_cpus; */
+/* 				factors->priority_js /= 2;			 */
+/* 			} */
+		} else {
+			factors->priority_js =
+				(double)job_ptr->details->min_nodes
+				/ (double)node_record_count;
+/* 			if(job_ptr->num_procs && job_ptr->num_procs != NO_VAL) { */
+/* 				factors->priority_js +=  */
+/* 					(double)job_ptr->num_procs */
+/* 					/ (double)total_cpus; */
+/* 				factors->priority_js /= 2;			 */
+/* 			} */
+		}
+		if (factors->priority_js < .0)
+			factors->priority_js = 0.0;
+		else if (factors->priority_js > 1.0)
+			factors->priority_js = 1.0;
+	}
+
+	if(job_ptr->part_ptr && job_ptr->part_ptr->priority && weight_part) {
+		factors->priority_part = job_ptr->part_ptr->norm_priority;
+	}
+
+	if(qos_ptr && qos_ptr->priority && weight_qos) {
+		factors->priority_qos = qos_ptr->norm_priority;
+	}
+
+	factors->nice = job_ptr->details->nice;
+}
+
+static uint32_t _get_priority_internal(time_t start_time,
+				       struct job_record *job_ptr)
+{
+	double priority		= 0.0;
+	double priority_age	= 0.0;
+	double priority_fs	= 0.0;
+	double priority_js	= 0.0;
+	double priority_part	= 0.0;
+	double priority_qos	= 0.0;
+	priority_factors_object_t	factors;
+
+	if(job_ptr->direct_set_prio)
+		return job_ptr->priority;
+
+	if(!job_ptr->details) {
+		error("_get_priority_internal: job %u does not have a "
+		      "details symbol set, can't set priority");
+		return 0;
+	}
+	/*
+	 * This means the job is not eligible yet
+	 */
+	if(!job_ptr->details->begin_time
+	   || (job_ptr->details->begin_time > start_time))
+		return 1;
+
+	/* figure out the priority */
+	_get_priority_factors(start_time, job_ptr, &factors, false);
+
+	priority_age = factors.priority_age * (double)weight_age;
+	debug3("Weighted Age priority is %f * %u = %.2f",
+	       factors.priority_age, weight_age, priority_age);
+
+	priority_fs = factors.priority_fs * (double)weight_fs;
+	debug3("Weighted Fairshare priority is %f * %u = %.2f",
+	       factors.priority_fs, weight_fs, priority_fs);
+
+	priority_js = factors.priority_js * (double)weight_js;
+	debug3("Weighted JobSize priority is %f * %u = %.2f",
+	       factors.priority_js, weight_js, priority_js);
+
+	priority_part = factors.priority_part * (double)weight_part;
+	debug3("Weighted Partition priority is %f * %u = %.2f",
+	       factors.priority_part, weight_part, priority_part);
+
+	priority_qos = factors.priority_qos * (double)weight_qos;
+	debug3("Weighted QOS priority is %f * %u = %.2f",
+	       factors.priority_qos, weight_qos, priority_qos);
+
+	priority = priority_age + priority_fs + priority_js + priority_part +
+		priority_qos - (double)(factors.nice - NICE_OFFSET);
+
+	/*
+	 * 0 means the job is held; 1 means system hold
+	 * so 2 is the lowest non-held priority
+	 */
+	if(priority < 2)
+		priority = 2;
+
+	debug3("Job %u priority: %.2f + %.2f + %.2f + %.2f + %.2f - %d = %.2f",
+	       job_ptr->job_id, priority_age, priority_fs, priority_js,
+	       priority_part, priority_qos, (factors.nice - NICE_OFFSET),
+	       priority);
+
+	return (uint32_t)priority;
+}
+
+/* based upon the last reset time, compute when the next reset should be */
+static time_t _next_reset(uint16_t reset_period, time_t last_reset)
+{
+	struct tm last_tm;
+	time_t tmp_time, now = time(NULL);
+
+	if(localtime_r(&last_reset, &last_tm) == NULL)
+		return (time_t) 0;
+
+	last_tm.tm_sec   = 0;
+	last_tm.tm_min   = 0;
+	last_tm.tm_hour  = 0;
+/*	last_tm.tm_wday = 0	ignored */
+/*	last_tm.tm_yday = 0;	ignored */
+	last_tm.tm_isdst = -1;
+	switch (reset_period) {
+		case PRIORITY_RESET_DAILY:
+			tmp_time = mktime(&last_tm);
+			tmp_time += SECS_PER_DAY;
+			while ((tmp_time + SECS_PER_DAY) < now)
+				tmp_time += SECS_PER_DAY;
+			return tmp_time;
+		case PRIORITY_RESET_WEEKLY:
+			tmp_time = mktime(&last_tm);
+			tmp_time += (SECS_PER_DAY * (7 - last_tm.tm_wday));
+			while ((tmp_time + SECS_PER_WEEK) < now)
+				tmp_time += SECS_PER_WEEK;
+			return tmp_time;
+		case PRIORITY_RESET_MONTHLY:
+			last_tm.tm_mday = 1;
+			if(last_tm.tm_mon < 11)
+				last_tm.tm_mon++;
+			else {
+				last_tm.tm_mon  = 0;
+				last_tm.tm_year++;
+			}
+			break;
+		case PRIORITY_RESET_QUARTERLY:
+			last_tm.tm_mday = 1;
+			if(last_tm.tm_mon < 3)
+				last_tm.tm_mon = 3;
+			else if(last_tm.tm_mon < 6)
+				last_tm.tm_mon = 6;
+			else if(last_tm.tm_mon < 9)
+				last_tm.tm_mon = 9;
+			else {
+				last_tm.tm_mon  = 0;
+				last_tm.tm_year++;
+			}
+			break;
+		case PRIORITY_RESET_YEARLY:
+			last_tm.tm_mday = 1;
+			last_tm.tm_mon  = 0;
+			last_tm.tm_year++;
+			break;
+		default:
+			return (time_t) 0;
+	}
+	return mktime(&last_tm);
+}
+
+static void *_decay_thread(void *no_data)
+{
+	struct job_record *job_ptr = NULL;
+	ListIterator itr;
+	time_t start_time = time(NULL);
+	time_t next_time;
+/* 	int sigarray[] = {SIGUSR1, 0}; */
+	struct tm tm;
+	time_t last_ran = 0;
+	time_t last_reset = 0, next_reset = 0;
+	double decay_hl = (double)slurm_get_priority_decay_hl();
+	double decay_factor = 1;
+	uint16_t reset_period = slurm_get_priority_reset_period();
+
+	if(decay_hl > 0)
+		decay_factor = 1 - (0.693 / decay_hl);
+
+	/* Write lock on jobs, read lock on nodes and partitions */
+	slurmctld_lock_t job_write_lock =
+		{ NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
+
+	(void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
+	(void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
+
+	if(!localtime_r(&start_time, &tm)) {
+		fatal("_decay_thread: "
+		      "Couldn't get localtime for rollup handler %d",
+		      start_time);
+		return NULL;
+	}
+
+	_read_last_decay_ran(&last_ran, &last_reset);
+	if (last_reset == 0)
+		last_reset = start_time;
+
+	while(1) {
+		time_t now = time(NULL);
+		int run_delta = 0;
+		double real_decay = 0.0;
+
+		slurm_mutex_lock(&decay_lock);
+		running_decay = 1;
+
+		/* If reconfig is called handle all that happens
+		   outside of the loop here */
+		if(reconfig) {
+			/* if decay_hl is 0 or less that means no
+			   decay is to be had.  This also means we
+			   flush the used time at a certain time
+			   set by PriorityUsageResetPeriod in the slurm.conf
+			*/
+			reset_period = slurm_get_priority_reset_period();
+			next_reset = 0;
+			decay_hl = (double)slurm_get_priority_decay_hl();
+			if(decay_hl > 0)
+				decay_factor = 1 - (0.693 / decay_hl);
+			else
+				decay_factor = 1;
+			
+			reconfig = 0;
+		}
+
+		/* this needs to be done right away so as to
+		 * incorporate it into the decay loop.
+		 */
+		switch(reset_period) {
+			case PRIORITY_RESET_NONE:
+				break;
+			case PRIORITY_RESET_NOW:	/* do once */
+				_reset_usage();
+				reset_period = PRIORITY_RESET_NONE;
+				last_reset = now;
+				break;
+			case PRIORITY_RESET_DAILY:
+			case PRIORITY_RESET_WEEKLY:
+			case PRIORITY_RESET_MONTHLY:
+			case PRIORITY_RESET_QUARTERLY:
+			case PRIORITY_RESET_YEARLY:
+				if(next_reset == 0) {
+					next_reset = _next_reset(reset_period, 
+								 last_reset);
+				}
+				if(now >= next_reset) {
+					_reset_usage();
+					last_reset = next_reset;
+					next_reset = _next_reset(reset_period, 
+								 last_reset);
+				}
+		}
+
+		if(!last_ran) 
+			goto get_usage;
+		else
+			run_delta = (start_time - last_ran);
+
+		if(run_delta <= 0)
+			goto get_usage;
+
+		real_decay = pow(decay_factor, (double)run_delta);
+
+		debug3("Decay factor over %d seconds goes from %.15f -> %.15f",
+		       run_delta, decay_factor, real_decay);
+
+		/* first apply decay to used time */
+		if(_apply_decay(real_decay) != SLURM_SUCCESS) {
+			error("problem applying decay");
+			running_decay = 0;
+			slurm_mutex_unlock(&decay_lock);
+			break;
+		}
+
+		lock_slurmctld(job_write_lock);
+		itr = list_iterator_create(job_list);
+		while ((job_ptr = list_next(itr))) {
+			/* apply new usage */
+			if(!IS_JOB_PENDING(job_ptr) &&
+			   job_ptr->start_time && job_ptr->assoc_ptr) {
+				acct_qos_rec_t *qos = 
+					(acct_qos_rec_t *)job_ptr->qos_ptr;
+				acct_association_rec_t *assoc =	
+					(acct_association_rec_t *)
+					job_ptr->assoc_ptr;
+				time_t start_period = last_ran;
+				time_t end_period = start_time;
+				double run_decay = 0;
+
+				if(job_ptr->start_time > start_period) 
+					start_period = job_ptr->start_time;
+
+				if(job_ptr->end_time 
+				   && (end_period > job_ptr->end_time)) 
+					end_period = job_ptr->end_time;
+
+				run_delta = (int)end_period - (int)start_period;
+
+				/* job already has been accounted for
+				   go to next */
+				if(run_delta < 1) 
+					continue;
+
+				debug4("job %u ran for %d seconds",
+				       job_ptr->job_id, run_delta);
+
+				/* get the time in decayed fashion */
+				run_decay = run_delta 
+					* pow(decay_factor, (double)run_delta);
+
+				real_decay = run_decay
+					* (double)job_ptr->total_procs;
+	
+				/* now apply the usage factor for this
+				   qos */
+				if(qos) {
+					slurm_mutex_lock(&assoc_mgr_qos_lock);
+					if(qos->usage_factor > 0) {
+						real_decay *= qos->usage_factor;
+						run_decay *= qos->usage_factor;
+					}
+					qos->grp_used_wall += run_decay;
+					qos->usage_raw +=
+						(long double)real_decay;
+					slurm_mutex_unlock(&assoc_mgr_qos_lock);
+				}
+
+				slurm_mutex_lock(&assoc_mgr_association_lock);
+				while(assoc) {
+					/* we don't want to make the
+					   root assoc responsible for
+					   keeping track of time 
+					*/ 
+					if (assoc == assoc_mgr_root_assoc)
+						break;
+					assoc->grp_used_wall += run_decay;
+					assoc->usage_raw +=
+						(long double)real_decay;
+					debug4("adding %f new usage to "
+					       "assoc %u (user='%s' acct='%s') "
+					       "raw usage is now %Lf.  Group "
+					       "wall added %d making it %d.",
+					       real_decay, assoc->id, 
+					       assoc->user, assoc->acct,
+					       assoc->usage_raw, run_delta,
+					       assoc->grp_used_wall);
+				
+					assoc = assoc->parent_assoc_ptr;
+				}
+				slurm_mutex_unlock(&assoc_mgr_association_lock);
+			}
+
+			/* 
+			 * This means the job is held, 0, or a system
+			 * hold, 1. Continue also if the job is not
+			 * pending.  There is no reason to set the
+			 * priority if the job isn't pending.
+			 */ 
+			if((job_ptr->priority <= 1) || !IS_JOB_PENDING(job_ptr))
+				continue;
+	
+			job_ptr->priority =
+				_get_priority_internal(start_time, job_ptr);
+
+			debug2("priority for job %u is now %u", 
+			       job_ptr->job_id, job_ptr->priority);
+		}
+		list_iterator_destroy(itr);
+		unlock_slurmctld(job_write_lock);
+
+	get_usage:
+		/* now calculate all the normalized usage here */
+		slurm_mutex_lock(&assoc_mgr_association_lock);
+		_set_children_usage_efctv(assoc_mgr_root_assoc->childern_list);
+		slurm_mutex_unlock(&assoc_mgr_association_lock);
+	
+		last_ran = start_time;
+
+		_write_last_decay_ran(last_ran, last_reset);
+
+		running_decay = 0;
+		slurm_mutex_unlock(&decay_lock);
+
+		/* sleep for DECAY_INTERVAL secs */
+		tm.tm_sec += DECAY_INTERVAL;
+		tm.tm_isdst = -1;
+		next_time = mktime(&tm);
+		sleep((next_time-start_time));
+		start_time = next_time;
+		/* repeat ;) */
+	}
+	return NULL;
+}
+
+/* Selects the specific jobs that the user wanted to see
+ * Requests that include job id(s) and user id(s) must match both to be passed.
+ * Returns 1 if job should be omitted */
+static int _filter_job(struct job_record *job_ptr, List req_job_list,
+		       List req_user_list)
+{
+	int filter = 0;
+	ListIterator iterator;
+	uint32_t *job_id;
+	uint32_t *user_id;
+
+	if (req_job_list) {
+		filter = 1;
+		iterator = list_iterator_create(req_job_list);
+		while ((job_id = list_next(iterator))) {
+			if (*job_id == job_ptr->job_id) {
+				filter = 0;
+				break;
+			}
+		}
+		list_iterator_destroy(iterator);
+		if (filter == 1) {
+			return 1;
+		}
+	}
+
+	if (req_user_list) {
+		filter = 1;
+		iterator = list_iterator_create(req_user_list);
+		while ((user_id = list_next(iterator))) {
+			if (*user_id == job_ptr->user_id) {
+				filter = 0;
+				break;
+			}
+		}
+		list_iterator_destroy(iterator);
+		if (filter == 1)
+			return 1;
+	}
+
+	return filter;
+}
+
+static void *_cleanup_thread(void *no_data)
+{
+	pthread_join(decay_handler_thread, NULL);
+	return NULL;
+}
+
+static void _internal_setup()
+{
+	favor_small = slurm_get_priority_favor_small();
+
+	max_age = slurm_get_priority_max_age();
+	weight_age = slurm_get_priority_weight_age();
+	weight_fs = slurm_get_priority_weight_fairshare();
+	weight_js = slurm_get_priority_weight_job_size();
+	weight_part = slurm_get_priority_weight_partition();
+	weight_qos = slurm_get_priority_weight_qos();
+
+	debug3("priority: Max Age is %u", max_age);
+	debug3("priority: Weight Age is %u", weight_age);
+	debug3("priority: Weight Fairshare is %u", weight_fs);
+	debug3("priority: Weight JobSize is %u", weight_js);
+	debug3("priority: Weight Part is %u", weight_part);
+	debug3("priority: Weight QOS is %u", weight_qos);
+}
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called.  Put global initialization here.
+ */
+int init ( void )
+{
+	pthread_attr_t thread_attr;
+	char *temp = NULL;
+
+	_internal_setup();
+
+	/* Check to see if we are running a supported accounting plugin */
+	temp = slurm_get_accounting_storage_type();
+	if(strcasecmp(temp, "accounting_storage/slurmdbd")
+	   && strcasecmp(temp, "accounting_storage/mysql")) {
+		error("You are not running a supported "
+		      "accounting_storage plugin\n(%s).\n"
+		      "Fairshare can only be calculated with either "
+		      "'accounting_storage/slurmdbd' "
+		      "or 'accounting_storage/mysql' enabled.  "
+		      "If you want multifactor priority without fairshare "
+		      "ignore this message.\n",
+		      temp);
+		calc_fairshare = 0;
+		weight_fs = 0;
+	} else {
+		if(!cluster_procs)
+			fatal("We need to have a cluster cpu count "
+			      "before we can init the priority/multifactor "
+			      "plugin");
+		priority_p_set_max_cluster_usage(cluster_procs,
+						 slurm_get_priority_decay_hl());
+		slurm_attr_init(&thread_attr);
+		if (pthread_create(&decay_handler_thread, &thread_attr,
+				   _decay_thread, NULL))
+			fatal("pthread_create error %m");
+		
+		/* This is here to join the decay thread so we don't core
+		   dump if in the sleep, since there is no other place to join
+		   we have to create another thread to do it.
+		*/
+		slurm_attr_init(&thread_attr);
+		if (pthread_create(&cleanup_handler_thread, &thread_attr,
+				   _cleanup_thread, NULL))
+			fatal("pthread_create error %m");
+		
+		slurm_attr_destroy(&thread_attr);
+	}
+	xfree(temp);
+
+	verbose("%s loaded", plugin_name);
+	return SLURM_SUCCESS;
+}
+
+int fini ( void )
+{
+	/* Daemon termination handled here */
+	if(running_decay)
+		debug("Waiting for decay thread to finish.");
+
+	slurm_mutex_lock(&decay_lock);
+	
+	/* cancel the decay thread and then join the cleanup thread */
+	if(decay_handler_thread)
+		pthread_cancel(decay_handler_thread);
+	if(cleanup_handler_thread)
+		pthread_join(cleanup_handler_thread, NULL);
+
+	slurm_mutex_unlock(&decay_lock);
+
+	return SLURM_SUCCESS;
+}
+
+extern uint32_t priority_p_set(uint32_t last_prio, struct job_record *job_ptr)
+{
+	uint32_t priority = _get_priority_internal(time(NULL), job_ptr);
+
+	debug2("initial priority for job %u is %u", job_ptr->job_id, priority);
+
+	return priority;
+}
+
+extern void priority_p_reconfig()
+{
+	reconfig = 1;
+	_internal_setup();
+	debug2("%s reconfigured", plugin_name);
+	
+	return;
+}
+
+extern int priority_p_set_max_cluster_usage(uint32_t procs, uint32_t half_life)
+{
+	static uint32_t last_procs = 0;
+	static uint32_t last_half_life = 0;
+
+	if(!calc_fairshare)
+		return SLURM_SUCCESS;
+
+	/* No need to do this if nothing has changed so just return */
+	if((procs == last_procs) && (half_life == last_half_life))
+		return SLURM_SUCCESS;
+
+	xassert(assoc_mgr_root_assoc);
+
+	last_procs = procs;
+	last_half_life = half_life;
+
+	/* get the total decay for the entire cluster */
+	assoc_mgr_root_assoc->usage_raw =
+		(long double)procs * (long double)half_life * (long double)2;
+	assoc_mgr_root_assoc->usage_norm = 1.0;
+	debug3("Total possible cpu usage for half_life of %d secs "
+	       "on the system is %.0Lf",
+	       half_life, assoc_mgr_root_assoc->usage_raw);
+
+	return SLURM_SUCCESS;
+}
+
+extern void priority_p_set_assoc_usage(acct_association_rec_t *assoc)
+{
+	char *child = "account";
+	char *child_str = assoc->acct;
+
+	xassert(assoc);
+
+	if(assoc->user) {
+		child = "user";
+		child_str = assoc->user;
+	}
+
+	xassert(assoc_mgr_root_assoc);
+	xassert(assoc_mgr_root_assoc->usage_raw);
+	xassert(assoc->parent_assoc_ptr);
+	
+	assoc->usage_norm = assoc->usage_raw / assoc_mgr_root_assoc->usage_raw;
+	debug4("Normalized usage for %s %s off %s %Lf / %Lf = %Lf",
+	       child, child_str, assoc->parent_assoc_ptr->acct,
+	       assoc->usage_raw, assoc_mgr_root_assoc->usage_raw,
+	       assoc->usage_norm);
+	/* This is needed in case someone changes the half-life on the
+	   fly and now we have used more time than is available under
+	   the new config */
+	if (assoc->usage_norm > 1.0) 
+		assoc->usage_norm = 1.0;
+	
+	if (assoc->parent_assoc_ptr == assoc_mgr_root_assoc) {
+		assoc->usage_efctv = assoc->usage_norm;
+		debug4("Effective usage for %s %s off %s %Lf %Lf",
+		       child, child_str, assoc->parent_assoc_ptr->acct,
+		       assoc->usage_efctv, assoc->usage_norm);
+	} else {
+		assoc->usage_efctv = assoc->usage_norm +
+			((assoc->parent_assoc_ptr->usage_efctv -
+			  assoc->usage_norm) *
+			 assoc->shares_raw / 
+			 (long double)assoc->level_shares);
+		debug4("Effective usage for %s %s off %s "
+		       "%Lf + ((%Lf - %Lf) * %d / %d) = %Lf",
+		       child, child_str, assoc->parent_assoc_ptr->acct,
+		       assoc->usage_norm,
+		       assoc->parent_assoc_ptr->usage_efctv,
+		       assoc->usage_norm, assoc->shares_raw,
+		       assoc->level_shares, assoc->usage_efctv);
+	}
+}
+
+extern List priority_p_get_priority_factors_list(
+	priority_factors_request_msg_t *req_msg)
+{
+	List req_job_list;
+	List req_user_list;
+	List ret_list = NULL;
+	ListIterator itr;
+	priority_factors_object_t *obj = NULL;
+	struct job_record *job_ptr = NULL;
+	time_t start_time = time(NULL);
+
+	xassert(req_msg);
+	req_job_list = req_msg->job_id_list;
+	req_user_list = req_msg->uid_list;
+
+	/* Read lock on jobs, nodes, and partitions */
+	slurmctld_lock_t job_read_lock =
+		{ NO_LOCK, READ_LOCK, READ_LOCK, READ_LOCK };
+
+	if (job_list && list_count(job_list)) {
+		ret_list = list_create(slurm_destroy_priority_factors_object);
+		lock_slurmctld(job_read_lock);
+		itr = list_iterator_create(job_list);
+		while ((job_ptr = list_next(itr))) {
+			/*
+			 * We are only looking for pending jobs 
+			 */
+			if(!IS_JOB_PENDING(job_ptr))
+				continue;
+			/*
+			 * This means the job is not eligible yet
+			 */
+			if(!job_ptr->details->begin_time
+			   || (job_ptr->details->begin_time > start_time))
+				continue;
+
+			/*
+			 * 0 means the job is held; 1 means system hold
+			 */
+			if(job_ptr->priority <= 1)
+				continue;
+			
+			if (_filter_job(job_ptr, req_job_list, req_user_list))
+				continue;
+			
+			obj = xmalloc(sizeof(priority_factors_object_t));
+			
+			_get_priority_factors(start_time, job_ptr, obj, true);
+			obj->job_id = job_ptr->job_id;
+			obj->user_id = job_ptr->user_id;
+			list_append(ret_list, obj);
+		}
+		list_iterator_destroy(itr);
+		unlock_slurmctld(job_read_lock);
+		if (!list_count(ret_list)) {
+			list_destroy(ret_list);
+			ret_list = NULL;
+		}
+	}
+
+	return ret_list;
+}
diff --git a/src/plugins/proctrack/Makefile.in b/src/plugins/proctrack/Makefile.in
index 19758aa90cadd50b23c865e8d50845654346b3b0..1f93dca055dd8dca0caf44ca0424bcffd289381f 100644
--- a/src/plugins/proctrack/Makefile.in
+++ b/src/plugins/proctrack/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/proctrack/aix/Makefile.in b/src/plugins/proctrack/aix/Makefile.in
index 04ad764d82921af2213b96c15ef3db552ca1c166..c99342ce711a9b7e6d343b91b194c9674324a8de 100644
--- a/src/plugins/proctrack/aix/Makefile.in
+++ b/src/plugins/proctrack/aix/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/proctrack/aix/proctrack_aix.c b/src/plugins/proctrack/aix/proctrack_aix.c
index d3b7fb63e35ce2be411141a1e488a4944b384c44..5eaa97126054334f140cde44c3df8b4b336e99a9 100644
--- a/src/plugins/proctrack/aix/proctrack_aix.c
+++ b/src/plugins/proctrack/aix/proctrack_aix.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/proctrack/linuxproc/Makefile.in b/src/plugins/proctrack/linuxproc/Makefile.in
index 60026f03691052651a01448bacd1d69d0e4cc850..c87ae8f727b8f8cb781b26ee281372e7a870c989 100644
--- a/src/plugins/proctrack/linuxproc/Makefile.in
+++ b/src/plugins/proctrack/linuxproc/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -110,6 +114,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/proctrack/linuxproc/kill_tree.c b/src/plugins/proctrack/linuxproc/kill_tree.c
index b792cea0f4d71c71e76e39e8d364e9ba0db12e50..d4c480394254738dbe90fd48b9b5d8c0390a51a1 100644
--- a/src/plugins/proctrack/linuxproc/kill_tree.c
+++ b/src/plugins/proctrack/linuxproc/kill_tree.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Takao Hatazaki <takao.hatazaki@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/proctrack/linuxproc/kill_tree.h b/src/plugins/proctrack/linuxproc/kill_tree.h
index 8ae0d2c56404d3f01cec6a954ec898e5be99e0e8..4129505c8d51ee17b6e41491d018e2609199353b 100644
--- a/src/plugins/proctrack/linuxproc/kill_tree.h
+++ b/src/plugins/proctrack/linuxproc/kill_tree.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Takao Hatazaki <takao.hatazaki@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
index 95331c8d9f1ee3cd4444be27e77e7e9b30b9ba06..1e1b63a9f5d9f3e6462fba268b07da5c429a394a 100644
--- a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
+++ b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/proctrack/pgid/Makefile.in b/src/plugins/proctrack/pgid/Makefile.in
index dcc057da651eb8371b721d1518ad663074d2a284..49f8eff0cca771dd8e22f139cc8d0de508478305 100644
--- a/src/plugins/proctrack/pgid/Makefile.in
+++ b/src/plugins/proctrack/pgid/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/proctrack/pgid/proctrack_pgid.c b/src/plugins/proctrack/pgid/proctrack_pgid.c
index 94f6f2b595a7de8948622cdafe7d35d478a9b406..fc27dfb573ba7f2125b16eb7aa80881b0d0f3fb5 100644
--- a/src/plugins/proctrack/pgid/proctrack_pgid.c
+++ b/src/plugins/proctrack/pgid/proctrack_pgid.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/proctrack/rms/Makefile.in b/src/plugins/proctrack/rms/Makefile.in
index 9c4d24aaa2d1cf34df3d0a4639316888dcdd9e61..7de197ff17cf129260704a0ae29dee503365d663 100644
--- a/src/plugins/proctrack/rms/Makefile.in
+++ b/src/plugins/proctrack/rms/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -110,6 +114,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/proctrack/rms/proctrack_rms.c b/src/plugins/proctrack/rms/proctrack_rms.c
index 93204494f4ee84128b6034bed9122e1e51729ec3..0e56f0349635df354a0c75857e3f0082281a2261 100644
--- a/src/plugins/proctrack/rms/proctrack_rms.c
+++ b/src/plugins/proctrack/rms/proctrack_rms.c
@@ -3,10 +3,11 @@
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/proctrack/sgi_job/Makefile.in b/src/plugins/proctrack/sgi_job/Makefile.in
index c279fb5e6f5fbc13b008894e4e2091895ca822f3..80a2117395ffb251bb9f5932bae841186e152d38 100644
--- a/src/plugins/proctrack/sgi_job/Makefile.in
+++ b/src/plugins/proctrack/sgi_job/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
index 53fa0d62c56b15c93bdf588e08a2a6dee5eb5e92..23a46aeed414f06c957b85205e179ff0a1afdc03 100644
--- a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
+++ b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/Makefile.in b/src/plugins/sched/Makefile.in
index ed121a34f8295d0841536ff241ccd6cc714e59cd..b623e469cf3b757199abdafda7ab9cdc0143d055 100644
--- a/src/plugins/sched/Makefile.in
+++ b/src/plugins/sched/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/sched/backfill/Makefile.in b/src/plugins/sched/backfill/Makefile.in
index e870190231f04898c5e231a517fb892e86228f56..c58ec8c085d2746bd8fd1b45126ba9da76172e2b 100644
--- a/src/plugins/sched/backfill/Makefile.in
+++ b/src/plugins/sched/backfill/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index 02555505ed0bd9fd530830f34c307fd92ed66da0..f97d954b02ba8718ba5ddac8b5cb681557f3a1df 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -15,13 +15,14 @@
  *  priority job.
  *****************************************************************************
  *  Copyright (C) 2003-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -29,7 +30,7 @@
  *  any later version.
  *
  *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
+ *  to link the code of portions of this program with the OpenSSL library under
  *  certain conditions as described in each individual source file, and 
  *  distribute linked combinations including the two. You must obey the GNU 
  *  General Public License in all respects for all of the code used other than 
@@ -72,6 +73,7 @@
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/node_scheduler.h"
+#include "src/slurmctld/reservation.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/srun_comm.h"
 #include "backfill.h"
@@ -89,10 +91,6 @@ static bool new_work      = false;
 static bool stop_backfill = false;
 static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER;
 
-/* Backfill scheduling has considerable overhead, 
- *	so only attempt it every BACKFILL_INTERVAL seconds.
- * Much of the scheduling for BlueGene happens through backfill,
- *	so we run it more frequently. */
 #ifndef BACKFILL_INTERVAL
 #  ifdef HAVE_BG
 #    define BACKFILL_INTERVAL	5
@@ -122,7 +120,11 @@ static void _attempt_backfill(void);
 static void _diff_tv_str(struct timeval *tv1,struct timeval *tv2,
 		char *tv_str, int len_tv_str);
 static bool _more_work(void);
-static int _start_job(struct job_record *job_ptr, bitstr_t *avail_bitmap);
+static int  _num_feature_count(struct job_record *job_ptr);
+static int  _start_job(struct job_record *job_ptr, bitstr_t *avail_bitmap);
+static int  _try_sched(struct job_record *job_ptr, bitstr_t **avail_bitmap,
+		       uint32_t min_nodes, uint32_t max_nodes,
+		       uint32_t req_nodes);
 
 #if __DEBUG
 /* Log resource allocate table */
@@ -163,6 +165,112 @@ static void _diff_tv_str(struct timeval *tv1,struct timeval *tv2,
 	snprintf(tv_str, len_tv_str, "usec=%ld", delta_t);
 }
 
+/* test if job has feature count specification */
+static int _num_feature_count(struct job_record *job_ptr)
+{
+	struct job_details *detail_ptr = job_ptr->details;
+	int rc = 0;
+	ListIterator feat_iter;
+	struct feature_record *feat_ptr;
+
+	if (detail_ptr->feature_list == NULL)	/* no constraints */
+		return rc;
+
+	feat_iter = list_iterator_create(detail_ptr->feature_list);
+	while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) {
+		if (feat_ptr->count)
+			rc++;
+	}
+	list_iterator_destroy(feat_iter);
+
+	return rc;
+}
+
+/* Attempt to schedule a specific job on specific available nodes
+ * IN job_ptr - job to schedule
+ * IN/OUT avail_bitmap - nodes available/selected to use
+ * RET SLURM_SUCCESS on success, otherwise an error code
+ */
+static int  _try_sched(struct job_record *job_ptr, bitstr_t **avail_bitmap,
+		       uint32_t min_nodes, uint32_t max_nodes,
+		       uint32_t req_nodes)
+{
+	bitstr_t *tmp_bitmap;
+	int rc = SLURM_SUCCESS;
+	int feat_cnt = _num_feature_count(job_ptr);
+
+	if (feat_cnt) {
+		/* Ideally schedule the job feature by feature,
+		 * but I don't want to add that complexity here
+		 * right now, so clear the feature counts and try
+		 * to schedule. This will work if there is only 
+		 * one feature count. It should work fairly well
+		 * in cases where there are multiple feature
+		 * counts. */
+		struct job_details *detail_ptr = job_ptr->details;
+		ListIterator feat_iter;
+		struct feature_record *feat_ptr;
+		int i = 0, list_size;
+		uint16_t *feat_cnt_orig = NULL, high_cnt = 0;
+
+		/* Clear the feature counts */
+		list_size = list_count(detail_ptr->feature_list);
+		feat_cnt_orig = xmalloc(sizeof(uint16_t) * list_size);
+		feat_iter = list_iterator_create(detail_ptr->feature_list);
+		while ((feat_ptr = 
+			(struct feature_record *) list_next(feat_iter))) {
+			high_cnt = MAX(high_cnt, feat_ptr->count);
+			feat_cnt_orig[i++] = feat_ptr->count;
+			feat_ptr->count = 0;
+		}
+		list_iterator_destroy(feat_iter);
+
+		if ((job_req_node_filter(job_ptr, *avail_bitmap) != 
+		     SLURM_SUCCESS) ||
+		    (bit_set_count(*avail_bitmap) < high_cnt)) {
+			rc = ESLURM_NODES_BUSY;
+		} else {
+			rc = select_g_job_test(job_ptr, *avail_bitmap, 
+					       high_cnt, max_nodes, req_nodes,
+					       SELECT_MODE_WILL_RUN);
+		}
+
+		/* Restore the feature counts */
+		i = 0;
+		feat_iter = list_iterator_create(detail_ptr->feature_list);
+		while ((feat_ptr = 
+			(struct feature_record *) list_next(feat_iter))) {
+			feat_ptr->count = feat_cnt_orig[i++];
+		}
+		list_iterator_destroy(feat_iter);
+		xfree(feat_cnt_orig);
+	} else {
+		/* Try to schedule the job. First on dedicated nodes
+		 * then on shared nodes (if so configured). */
+		uint16_t orig_shared;
+		time_t now = time(NULL);
+		orig_shared = job_ptr->details->shared;
+		job_ptr->details->shared = 0;
+		tmp_bitmap = bit_copy(*avail_bitmap);
+		rc = select_g_job_test(job_ptr, *avail_bitmap, min_nodes,
+				       max_nodes, req_nodes,
+				       SELECT_MODE_WILL_RUN);
+		job_ptr->details->shared = orig_shared;
+		if (((rc != SLURM_SUCCESS) || (job_ptr->start_time > now)) &&
+		    (orig_shared != 0)) {
+			FREE_NULL_BITMAP(*avail_bitmap);
+			*avail_bitmap= tmp_bitmap;
+			rc = select_g_job_test(job_ptr, *avail_bitmap, 
+					       min_nodes, max_nodes, req_nodes,
+					       SELECT_MODE_WILL_RUN);
+		} else
+			FREE_NULL_BITMAP(tmp_bitmap);
+	}
+
+	return rc;
+
+}
+
 /* Terminate backfill_agent */
 extern void stop_backfill_agent(void)
 {
@@ -174,14 +282,24 @@ extern void stop_backfill_agent(void)
 extern void *backfill_agent(void *args)
 {
 	struct timeval tv1, tv2;
-	char tv_str[20];
+	char tv_str[20], *sched_params, *tmp_ptr;
 	time_t now;
-	int i, iter;
+	int backfill_interval = 0, i, iter;
 	static time_t last_backfill_time = 0;
 	/* Read config, and partitions; Write jobs and nodes */
 	slurmctld_lock_t all_locks = {
 		READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK };
 
+	sched_params = slurm_get_sched_params();
+	if (sched_params && (tmp_ptr=strstr(sched_params, "interval=")))
+		backfill_interval = atoi(tmp_ptr+9);
+	else
+		backfill_interval = BACKFILL_INTERVAL;
+	if (backfill_interval < 1) {
+		fatal("Invalid backfill scheduler interval: %d", 
+		      backfill_interval);
+	}
+
 	while (!stop_backfill) {
 		iter = (BACKFILL_CHECK_SEC * 1000000) /
 		       STOP_CHECK_USEC;
@@ -193,9 +311,9 @@ extern void *backfill_agent(void *args)
 		
 		now = time(NULL);
 		/* Avoid resource fragmentation if important */
-		if (switch_no_frag() && job_is_completing())
+		if (job_is_completing())
 			continue;
-		if ((difftime(now, last_backfill_time) < BACKFILL_INTERVAL) ||
+		if ((difftime(now, last_backfill_time) < backfill_interval) ||
 		    stop_backfill || (!_more_work()))
 			continue;
 		last_backfill_time = now;
@@ -222,9 +340,8 @@ static void _attempt_backfill(void)
 	struct part_record *part_ptr;
 	uint32_t end_time, end_reserve, time_limit;
 	uint32_t min_nodes, max_nodes, req_nodes;
-	uint16_t orig_shared;
-	bitstr_t *avail_bitmap = NULL, *tmp_bitmap;
-	time_t now = time(NULL);
+	bitstr_t *avail_bitmap = NULL, *resv_bitmap = NULL;
+	time_t now = time(NULL), start_res;
 	node_space_map_t node_space[MAX_BACKFILL_JOB_CNT + 2];
 
 	if (slurm_get_root_filter())
@@ -300,13 +417,24 @@ static void _attempt_backfill(void)
 				time_limit = MIN(job_ptr->time_limit,
 						 part_ptr->max_time);
 		}
-		end_time = (time_limit * 60) + now;
 
-		/* Identify usable nodes for this job */
+		/* Determine impact of any resource reservations */
 		FREE_NULL_BITMAP(avail_bitmap);
-		avail_bitmap = bit_copy(part_ptr->node_bitmap);
+		start_res = now;
+		j = job_test_resv(job_ptr, &start_res, true, &avail_bitmap);
+		if (j != SLURM_SUCCESS)
+			continue;
+		if (start_res > now)
+			end_time = (time_limit * 60) + start_res;
+		else
+			end_time = (time_limit * 60) + now;
+
+		/* Identify usable nodes for this job */
+		bit_and(avail_bitmap, part_ptr->node_bitmap);
 		bit_and(avail_bitmap, up_node_bitmap);
 		for (j=0; ; ) {
+			if (node_space[j].end_time < start_res)
+				continue;
 			if (node_space[j].begin_time <= end_time) {
 				bit_and(avail_bitmap, 
 					node_space[j].avail_bitmap);
@@ -315,8 +443,12 @@ static void _attempt_backfill(void)
 			if ((j = node_space[j].next) == 0)
 				break;
 		}
-		if (job_req_node_filter(job_ptr, avail_bitmap))
-			continue;	/* problem with features */
+
+		/* Identify nodes which are definitely off limits */
+		FREE_NULL_BITMAP(resv_bitmap);
+		resv_bitmap = bit_copy(avail_bitmap);
+		bit_not(resv_bitmap);
+
 		if (job_ptr->details->exc_node_bitmap) {
 			bit_not(job_ptr->details->exc_node_bitmap);
 			bit_and(avail_bitmap, 
@@ -329,35 +461,24 @@ static void _attempt_backfill(void)
 			continue;	/* required nodes missing */
 		if (bit_set_count(avail_bitmap) < min_nodes)
 			continue;	/* insufficient nodes remain */
+		if (job_req_node_filter(job_ptr, avail_bitmap))
+			continue;	/* nodes lack features */
 
-		/* Try to schedule the job. First on dedicated nodes
-		 * then on shared nodes (if so configured). */
-		orig_shared = job_ptr->details->shared;
-		job_ptr->details->shared = 0;
-		tmp_bitmap = bit_copy(avail_bitmap);
-		j = select_g_job_test(job_ptr, avail_bitmap, min_nodes,
-				      max_nodes, req_nodes,
-				      SELECT_MODE_WILL_RUN);
-		job_ptr->details->shared = orig_shared;
-		if ((j != SLURM_SUCCESS) && (orig_shared != 0)) {
-			FREE_NULL_BITMAP(avail_bitmap);
-			avail_bitmap= tmp_bitmap;
-			j = select_g_job_test(job_ptr, avail_bitmap, min_nodes,
-					      max_nodes, req_nodes,
-					      SELECT_MODE_WILL_RUN);
-		} else
-			FREE_NULL_BITMAP(tmp_bitmap);
+		j = _try_sched(job_ptr, &avail_bitmap, 
+			       min_nodes, max_nodes, req_nodes);
 		if (j != SLURM_SUCCESS)
 			continue;	/* not runable */
-
+		
+		job_ptr->start_time = MAX(job_ptr->start_time, start_res);
 		if (job_ptr->start_time <= now) {
-			int rc = _start_job(job_ptr, avail_bitmap);
-			if(rc == ESLURM_ACCOUNTING_POLICY) 
+			int rc = _start_job(job_ptr, resv_bitmap);
+			if (rc == ESLURM_ACCOUNTING_POLICY) 
+				continue;
+			else if (rc != SLURM_SUCCESS)
+				/* Planned to start job, but something bad
+				 * happended. Reserve nodes where this should
+				 * apparently run and try more jobs. */
 				continue;
-			else if(rc != SLURM_SUCCESS)
-				/* Planned to start job, but something
-				 * bad happended */
-				break;
 		}
 		if (job_ptr->start_time > (now + BACKFILL_WINDOW)) {
 			/* Starts too far in the future to worry about */
@@ -381,6 +502,7 @@ static void _attempt_backfill(void)
 #endif
 	}
 	FREE_NULL_BITMAP(avail_bitmap);
+	FREE_NULL_BITMAP(resv_bitmap);
 
 	for (i=0; ; ) {
 		bit_free(node_space[i].avail_bitmap);
@@ -390,16 +512,18 @@ static void _attempt_backfill(void)
 	xfree(job_queue);
 }
 
-static int _start_job(struct job_record *job_ptr, bitstr_t *avail_bitmap)
+/* Try to start the job on any non-reserved nodes */
+static int _start_job(struct job_record *job_ptr, bitstr_t *resv_bitmap)
 {
 	int rc;
 	bitstr_t *orig_exc_nodes = NULL;
 	static uint32_t fail_jobid = 0;
 
-	if (job_ptr->details->exc_node_bitmap)
+	if (job_ptr->details->exc_node_bitmap) {
 		orig_exc_nodes = job_ptr->details->exc_node_bitmap;
-	job_ptr->details->exc_node_bitmap = bit_copy(avail_bitmap);
-	bit_not(job_ptr->details->exc_node_bitmap);
+		bit_or(job_ptr->details->exc_node_bitmap, resv_bitmap);
+	} else
+		job_ptr->details->exc_node_bitmap = bit_copy(resv_bitmap);
 
 	rc = select_nodes(job_ptr, false, NULL);
 	bit_free(job_ptr->details->exc_node_bitmap);
@@ -417,13 +541,15 @@ static int _start_job(struct job_record *job_ptr, bitstr_t *avail_bitmap)
 #if __DEBUG
 		info("backfill: Jobs backfilled: %d", backfilled_jobs);
 #endif
-	} else if ((job_ptr->job_id != fail_jobid)
-		   && (rc != ESLURM_ACCOUNTING_POLICY)) {
-		char *node_list = bitmap2node_name(avail_bitmap);
+	} else if ((job_ptr->job_id != fail_jobid) &&
+		   (rc != ESLURM_ACCOUNTING_POLICY)) {
+		char *node_list;
+		bit_not(resv_bitmap);
+		node_list = bitmap2node_name(resv_bitmap);
 		/* This happens when a job has sharing disabled and
 		 * a selected node is still completing some job, 
 		 * which should be a temporary situation. */
-		verbose("backfill: Failed to start JobId=%u on %s: %s",
+		verbose("backfill: Failed to start JobId=%u in %s: %s",
 			job_ptr->job_id, node_list, slurm_strerror(rc));
 		xfree(node_list);
 		fail_jobid = job_ptr->job_id;
diff --git a/src/plugins/sched/backfill/backfill.h b/src/plugins/sched/backfill/backfill.h
index 2e894eaa6837e3e7347b83c512b4a0eca6393e78..0af52ec485b0ceaeac4ab65acaca4d229fc14d15 100644
--- a/src/plugins/sched/backfill/backfill.h
+++ b/src/plugins/sched/backfill/backfill.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2003 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/backfill/backfill_wrapper.c b/src/plugins/sched/backfill/backfill_wrapper.c
index b21ff971e3769c5618af143eef2c4128bb7e55a3..6a369fcf96de7d7dab61fb69d8a4f45af48858f4 100644
--- a/src/plugins/sched/backfill/backfill_wrapper.c
+++ b/src/plugins/sched/backfill/backfill_wrapper.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2003 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>, Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -44,6 +45,7 @@
 
 #include "src/common/plugin.h"
 #include "src/common/log.h"
+#include "src/common/slurm_priority.h"
 #include "src/common/macros.h"
 #include "src/slurmctld/slurmctld.h"
 #include "backfill.h"
@@ -142,10 +144,7 @@ u_int32_t
 slurm_sched_plugin_initial_priority( u_int32_t last_prio, 
 				     struct job_record *job_ptr )
 {
-	if (last_prio >= 2)
-		return (last_prio - 1);
-	else
-		return 1;
+	return priority_g_set(last_prio, job_ptr);
 }
 
 /**************************************************************************/
diff --git a/src/plugins/sched/builtin/Makefile.in b/src/plugins/sched/builtin/Makefile.in
index b1f665e0adef1d7ff958e1242e7d220f864ca4eb..4df72f0bce3b054201b927c62a7769d584eef7f2 100644
--- a/src/plugins/sched/builtin/Makefile.in
+++ b/src/plugins/sched/builtin/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/sched/builtin/builtin_wrapper.c b/src/plugins/sched/builtin/builtin_wrapper.c
index 9abed1f5e5c2321c744a97ae4141969e9d625956..935582675f0567e2efd48eede4d66d66d2bf031b 100644
--- a/src/plugins/sched/builtin/builtin_wrapper.c
+++ b/src/plugins/sched/builtin/builtin_wrapper.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -41,6 +42,7 @@
 #include "src/common/plugin.h"
 #include "src/common/log.h"
 #include "src/slurmctld/slurmctld.h"
+#include "src/common/slurm_priority.h"
 
 const char		plugin_name[]	= "SLURM Built-in Scheduler plugin";
 const char		plugin_type[]	= "sched/builtin";
@@ -109,10 +111,7 @@ u_int32_t
 slurm_sched_plugin_initial_priority( u_int32_t last_prio,
 				     struct job_record *job_ptr )
 {
-	if (last_prio >= 2)
-		return (last_prio - 1);
-	else
-		return 1;
+	return priority_g_set(last_prio, job_ptr);
 }
 
 /**************************************************************************/
diff --git a/src/plugins/sched/gang/Makefile.in b/src/plugins/sched/gang/Makefile.in
index 2183caff1d122f19f4c3f48ed0f00d4966a7efe1..387f69e2f631c6895e64cb7bcb0ced800e682d47 100644
--- a/src/plugins/sched/gang/Makefile.in
+++ b/src/plugins/sched/gang/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/sched/gang/gang.c b/src/plugins/sched/gang/gang.c
index 86ab638b195a7201ea5cfbe3c65c0b7fa9927395..4fe9f6a7ddbe0df63e9e5a53845d7e514f61d268 100644
--- a/src/plugins/sched/gang/gang.c
+++ b/src/plugins/sched/gang/gang.c
@@ -3,10 +3,11 @@
  *****************************************************************************
  *  Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
  *  Written by Chris Holmes
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -77,10 +78,9 @@ enum gs_flags {
 
 struct gs_job {
 	uint32_t job_id;
+	struct job_record *job_ptr;
 	uint16_t sig_state;
 	uint16_t row_state;
-	bitstr_t *resmap;
-	uint16_t *alloc_cpus;
 };
 
 struct gs_part {
@@ -103,21 +103,21 @@ struct gs_part {
  *
  *       SUMMARY OF DATA MANAGEMENT
  *
- * For GS_NODE and GS_CPU:    bits in resmaps represent nodes
- * For GS_SOCKET and GS_CORE: bits in resmaps represent sockets
- * GS_NODE and GS_SOCKET ignore the CPU array
- * GS_CPU and GS_CORE use the CPU array to help resolve conflict
+ * For GS_NODE:   job_ptr->select_job->node_bitmap only
+ * For GS_CPU:    job_ptr->select_job->{node_bitmap, cpus}
+ * For GS_SOCKET: job_ptr->select_job->{node,core}_bitmap
+ * For GS_CORE:   job_ptr->select_job->{node,core}_bitmap
  *
  *         EVALUATION ALGORITHM
  *
- * For GS_NODE and GS_SOCKET: bits CANNOT conflict
- * For GS_CPUS and GS_CORE:  if bits conflict, make sure sum of CPUs per
- *                           resource don't exceed physical resource count
+ * For GS_NODE, GS_SOCKET, and GS_CORE, the bits CANNOT conflict
+ * For GS_CPU:  if bits conflict, make sure sum of CPUs per
+ *              resource don't exceed physical resource count
  *
  *
- * The j_ptr->alloc_cpus array is a collection of allocated values ONLY.
- * For every bit set in j_ptr->resmap, there is a corresponding element
- * (with an equal-to or less-than index value) in j_ptr->alloc_cpus. 
+ * The core_bitmap and cpus array are a collection of allocated values
+ * ONLY. For every bit set in node_bitmap, there is a corresponding
+ * element in cpus and a set of elements in the core_bitmap. 
  *
  ******************************************
  *
@@ -143,9 +143,11 @@ static uint32_t default_job_list_size = 64;
 static uint32_t gs_resmap_size = 0;
 static pthread_mutex_t data_mutex = PTHREAD_MUTEX_INITIALIZER;
 
-static uint32_t gs_num_groups = 0;
-static uint16_t *gs_cpus_per_res = NULL;
-static uint32_t *gs_cpu_count_reps = NULL;
+static uint16_t *gs_bits_per_node = NULL;
+static uint32_t *gs_bit_rep_count = NULL;
+
+static uint16_t *gs_sockets_per_node = NULL;
+static uint32_t *gs_socket_rep_count = NULL;
 
 static struct gs_part **gs_part_sorted = NULL;
 static uint32_t num_sorted_part = 0;
@@ -178,13 +180,13 @@ void _print_jobs(struct gs_part *p_ptr)
 		p_ptr->part_name, p_ptr->num_jobs, p_ptr->num_shadows);
 	for (i = 0; i < p_ptr->num_shadows; i++) {
 		debug3("sched/gang:   shadow job %u row_s %s, sig_s %s",
-			p_ptr->shadow[i]->job_id,
+			p_ptr->shadow[i]->job_ptr->job_id,
 			_print_flag(p_ptr->shadow[i]->row_state),
 			_print_flag(p_ptr->shadow[i]->sig_state));
 	}
 	for (i = 0; i < p_ptr->num_jobs; i++) {
 		debug3("sched/gang:   job %u row_s %s, sig_s %s",
-			p_ptr->job_list[i]->job_id,
+			p_ptr->job_list[i]->job_ptr->job_id,
 			_print_flag(p_ptr->job_list[i]->row_state),
 			_print_flag(p_ptr->job_list[i]->sig_state));
 	}
@@ -214,92 +216,151 @@ _get_gr_type() {
 	return GS_NODE;
 }
 
-/* Return resource data for the given node */
-static uint16_t
-_compute_resources(int i, char socket_count)
+
+static void _load_socket_cnt()
 {
-	if (gr_type == GS_NODE)
-		return 1;
+	uint32_t i, index = 0, array_size = GS_CPU_ARRAY_INCREMENT;
 
-	if (gr_type == GS_CPU) {
-		if (socket_count)
-			return 1;
-		if (gs_fast_schedule)
-			return node_record_table_ptr[i].config_ptr->cpus;
-		return node_record_table_ptr[i].cpus;
+	if (gr_type != GS_SOCKET)
+		return;
+
+	gs_sockets_per_node = xmalloc(array_size * sizeof(uint16_t));
+	gs_socket_rep_count = xmalloc(array_size * sizeof(uint32_t));
+
+	for (i = 0; i < node_record_count; i++) {
+		uint16_t sock;
+		if (gs_fast_schedule) {
+			sock = node_record_table_ptr[i].config_ptr->sockets;
+		} else {
+			sock = node_record_table_ptr[i].sockets;
+		}
+		if (gs_sockets_per_node[index] == sock) {
+			gs_socket_rep_count[index]++;
+			continue;
+		}
+		if (gs_socket_rep_count[index] > 0) {
+			/* advance index and check array_size */
+			index++;
+			if (index >= array_size) {
+				array_size += GS_CPU_ARRAY_INCREMENT;
+				xrealloc(gs_sockets_per_node,
+				 	array_size * sizeof(uint16_t));
+				xrealloc(gs_socket_rep_count,
+				 	array_size * sizeof(uint32_t));
+			}
+		}
+		gs_sockets_per_node[index] = sock;
+		gs_socket_rep_count[index] = 1;
 	}
-	
-	if (socket_count || gr_type == GS_SOCKET) {
-		if (gs_fast_schedule)
-			return node_record_table_ptr[i].config_ptr->sockets;
-		return node_record_table_ptr[i].sockets;
+	index++;
+	if (index >= array_size) {
+		array_size += GS_CPU_ARRAY_INCREMENT;
+		xrealloc(gs_sockets_per_node, array_size * sizeof(uint16_t));
+		xrealloc(gs_socket_rep_count, array_size * sizeof(uint32_t));
 	}
+	/* leave the last entries '0' */
 
-	/* gr_type == GS_CORE */
-	if (gs_fast_schedule)
-		return node_record_table_ptr[i].config_ptr->cores;
-	return node_record_table_ptr[i].cores;
+	for (i = 0; i < index; i++) {
+		debug3("sched/gang: _load_socket_cnt: grp %d bits %u reps %u",
+			i, gs_sockets_per_node[i], gs_socket_rep_count[i]);
+	}
 }
 
 /* For GS_CPU  the gs_phys_res_cnt is the total number of CPUs per node.
- * For GS_CORE the gs_phys_res_cnt is the total number of cores per socket per
- * node (currently no nodes are made with different core counts per socket) */
+ * For GS_CORE and GS_SOCKET the gs_phys_res_cnt is the total number of
+ * cores per per node.
+ * This function also sets gs_resmap_size;
+ */
 static void
 _load_phys_res_cnt()
 {
-	int i, array_size = GS_CPU_ARRAY_INCREMENT;
-	uint32_t adder;
+	uint32_t i, index = 0, array_size = GS_CPU_ARRAY_INCREMENT;
+
+	xfree(gs_bits_per_node);
+	xfree(gs_bit_rep_count);
+	xfree(gs_sockets_per_node);
+	xfree(gs_socket_rep_count);
 
-	xfree(gs_cpus_per_res);
-	xfree(gs_cpu_count_reps);
-	gs_num_groups = 0;
-	if (gr_type == GS_NODE || gr_type == GS_SOCKET)
+	if (gr_type != GS_CPU && gr_type != GS_CORE && gr_type != GS_SOCKET)
 		return;
 
-	gs_cpus_per_res   = xmalloc(array_size * sizeof(uint16_t));
-	gs_cpu_count_reps = xmalloc(array_size * sizeof(uint32_t));
+	gs_bits_per_node = xmalloc(array_size * sizeof(uint16_t));
+	gs_bit_rep_count = xmalloc(array_size * sizeof(uint32_t));
+
+	gs_resmap_size = 0;
 	for (i = 0; i < node_record_count; i++) {
-		uint16_t res = _compute_resources(i, 0);
-		if (gs_cpus_per_res[gs_num_groups] == res) {
-			adder = 1;
-			if (gr_type == GS_CORE)
-				adder = _compute_resources(i, 1);
-			gs_cpu_count_reps[gs_num_groups] += adder;
+		uint16_t bit;
+		if (gr_type == GS_CPU) {
+			if (gs_fast_schedule) 
+				bit = node_record_table_ptr[i].config_ptr->cpus;
+			else
+				bit = node_record_table_ptr[i].cpus;
+		} else {
+			if (gs_fast_schedule) {
+				bit  = node_record_table_ptr[i].config_ptr->cores;
+				bit *= node_record_table_ptr[i].config_ptr->sockets;
+			} else {
+				bit  = node_record_table_ptr[i].cores;
+				bit *= node_record_table_ptr[i].sockets;
+			}
+		}
+		gs_resmap_size += bit;
+		if (gs_bits_per_node[index] == bit) {
+			gs_bit_rep_count[index]++;
 			continue;
 		}
-		if (gs_cpus_per_res[gs_num_groups] != 0) {
-			gs_num_groups++;
-			if (gs_num_groups >= array_size) {
+		if (gs_bit_rep_count[index] > 0) {
+			/* advance index and check array_size */
+			index++;
+			if (index >= array_size) {
 				array_size += GS_CPU_ARRAY_INCREMENT;
-				xrealloc(gs_cpus_per_res,
-					 array_size * sizeof(uint16_t));
-				xrealloc(gs_cpu_count_reps,
-					 array_size * sizeof(uint32_t));
+				xrealloc(gs_bits_per_node,
+				 	array_size * sizeof(uint16_t));
+				xrealloc(gs_bit_rep_count,
+				 	array_size * sizeof(uint32_t));
 			}
 		}
-		gs_cpus_per_res[gs_num_groups] = res;
-		adder = 1;
-		if (gr_type == GS_CORE)
-			adder = _compute_resources(i, 1);
-		gs_cpu_count_reps[gs_num_groups] = adder;
+		gs_bits_per_node[index] = bit;
+		gs_bit_rep_count[index] = 1;
 	}
-	gs_num_groups++;
-	for (i = 0; i < gs_num_groups; i++) {
-		debug3("sched/gang: _load_phys_res_cnt: grp %d cpus %u reps %u",
-			i, gs_cpus_per_res[i], gs_cpu_count_reps[i]);
+	/* leave the last entries '0' */
+	index++;
+	if (index >= array_size) {
+		array_size += GS_CPU_ARRAY_INCREMENT;
+		xrealloc(gs_bits_per_node, array_size * sizeof(uint16_t));
+		xrealloc(gs_bit_rep_count, array_size * sizeof(uint32_t));
 	}
-	return;
+
+	for (i = 0; i < index; i++) {
+		debug3("sched/gang: _load_phys_res_cnt: grp %d bits %u reps %u",
+			i, gs_bits_per_node[i], gs_bit_rep_count[i]);
+
+	}
+	if (gr_type == GS_SOCKET)
+		_load_socket_cnt();
 }
 
-static uint16_t
-_get_phys_res_cnt(int res_index)
+static uint16_t _get_phys_bit_cnt(int node_index)
 {
 	int i = 0;
-	int pos = gs_cpu_count_reps[i++];
-	while (res_index >= pos) {
-		pos += gs_cpu_count_reps[i++];
+	int pos = gs_bit_rep_count[i++];
+	while (node_index >= pos) {
+		pos += gs_bit_rep_count[i++];
+	}
+	return gs_bits_per_node[i-1];
+}
+
+
+static uint16_t _get_socket_cnt(int node_index)
+{
+	int pos, i = 0;
+	if (!gs_socket_rep_count || !gs_sockets_per_node)
+		return 0;
+	pos = gs_socket_rep_count[i++];
+	while (node_index >= pos) {
+		pos += gs_socket_rep_count[i++];
 	}
-	return gs_cpus_per_res[i-1];
+	return gs_sockets_per_node[i-1];
 }
 
 
@@ -307,14 +368,11 @@ _get_phys_res_cnt(int res_index)
  * To destroy it, step down the array and destroy the pieces of
  * each gs_part entity, and then delete the whole array.
  * To destroy a gs_part entity, you need to delete the name, the
- * list of jobs, the shadow list, and the active_resmap. Each
- * job has a resmap that must be deleted also.
+ * list of jobs, the shadow list, and the active_resmap.
  */
-static void
-_destroy_parts() {
+static void _destroy_parts() {
 	int i;
 	struct gs_part *tmp, *ptr = gs_part_list;
-	struct gs_job *j_ptr;
 
 	while (ptr) {
 		tmp = ptr;
@@ -322,11 +380,7 @@ _destroy_parts() {
 
 		xfree(tmp->part_name);
 		for (i = 0; i < tmp->num_jobs; i++) {
-			j_ptr = tmp->job_list[i];
-			if (j_ptr->resmap)
-				bit_free(j_ptr->resmap);
-			xfree(j_ptr->alloc_cpus);
-			xfree(j_ptr);
+			xfree(tmp->job_list[i]);
 		}
 		xfree(tmp->shadow);
 		if (tmp->active_resmap)
@@ -339,8 +393,7 @@ _destroy_parts() {
 
 /* Build the gs_part_list. The job_list will be created later,
  * once a job is added. */
-static void
-_build_parts() {
+static void _build_parts() {
 	ListIterator part_iterator;
 	struct part_record *p_ptr;
 	int i, num_parts;
@@ -390,33 +443,36 @@ static int
 _find_job_index(struct gs_part *p_ptr, uint32_t job_id) {
 	int i;
 	for (i = 0; i < p_ptr->num_jobs; i++) {
-		if (p_ptr->job_list[i]->job_id == job_id)
+		if (p_ptr->job_list[i]->job_ptr->job_id == job_id)
 			return i;
 	}
 	return -1;
 }
 
-/* Return 1 if job fits in this row, else return 0 */
+/* Return 1 if job "cpu count" fits in this row, else return 0 */
 static int
-_can_cpus_fit(bitstr_t *setmap, struct gs_job *j_ptr, struct gs_part *p_ptr)
+_can_cpus_fit(struct job_record *job_ptr, struct gs_part *p_ptr)
 {
-	int i, size, a = 0;
+	int i, j, size;
 	uint16_t *p_cpus, *j_cpus;
+	select_job_res_t job_res = job_ptr->select_job;
 
-	size = bit_size(setmap);
+	if (gr_type != GS_CPU)
+		return 0;
+	
+	size = bit_size(job_res->node_bitmap);
 	p_cpus = p_ptr->active_cpus;
-	j_cpus = j_ptr->alloc_cpus;
+	j_cpus = job_res->cpus;
 
 	if (!p_cpus || !j_cpus)
 		return 0;
 
-	for (i = 0; i < size; i++) {
-		if (bit_test(setmap, i)) {
-			if (p_cpus[i]+j_cpus[a] > _get_phys_res_cnt(i))
+	for (j = 0, i = 0; i < size; i++) {
+		if (bit_test(job_res->node_bitmap, i)) {
+			if (p_cpus[i]+j_cpus[j] > _get_phys_bit_cnt(i))
 				return 0;
+			j++;
 		}
-		if (bit_test(j_ptr->resmap, i))
-			a++;
 	}
 	return 1;
 }
@@ -424,92 +480,141 @@ _can_cpus_fit(bitstr_t *setmap, struct gs_job *j_ptr, struct gs_part *p_ptr)
 
 /* Return 1 if job fits in this row, else return 0 */
 static int
-_job_fits_in_active_row(struct gs_job *j_ptr, struct gs_part *p_ptr)
+_job_fits_in_active_row(struct job_record *job_ptr, struct gs_part *p_ptr)
 {
+	select_job_res_t job_res = job_ptr->select_job;
 	int count;
-	bitstr_t *tmpmap;
+	bitstr_t *job_map;
 
 	if (p_ptr->active_resmap == NULL || p_ptr->jobs_active == 0)
 		return 1;
 
-	tmpmap = bit_copy(j_ptr->resmap);
-	if (!tmpmap)
-		fatal("sched/gang: memory allocation error");
+	if (gr_type == GS_CORE || gr_type == GS_SOCKET) {
+		return can_select_job_cores_fit(job_res, p_ptr->active_resmap,
+						gs_bits_per_node,
+						gs_bit_rep_count);
+	}
 	
-	bit_and(tmpmap, p_ptr->active_resmap);
+	/* gr_type == GS_NODE || gr_type == GS_CPU */
+	job_map = bit_copy(job_res->node_bitmap);
+	if (!job_map)
+		fatal("sched/gang: memory allocation error");
+	bit_and(job_map, p_ptr->active_resmap);
 	/* any set bits indicate contention for the same resource */
-	count = bit_set_count(tmpmap);
+	count = bit_set_count(job_map);
 	debug3("sched/gang: _job_fits_in_active_row: %d bits conflict", count);
-
-	if (count == 0) {
-		bit_free(tmpmap);
+	bit_free(job_map);
+	if (count == 0)
 		return 1;
+	if (gr_type == GS_CPU)
+		/* For GS_CPU we check the CPU arrays */
+		return _can_cpus_fit(job_ptr, p_ptr);
+
+	return 0;
+}
+
+
+/* a helper function for _add_job_to_active when GS_SOCKET
+ * a job has just been added to p_ptr->active_resmap, so set all cores of
+ * each used socket to avoid activating another job on the same socket */
+static void _fill_sockets(bitstr_t *job_nodemap, struct gs_part *p_ptr)
+{
+	uint32_t c, i, size;
+	int n, first_bit, last_bit;
+
+	if (!job_nodemap || !p_ptr || !p_ptr->active_resmap)
+		return;
+	size      = bit_size(job_nodemap);
+	first_bit = bit_ffs(job_nodemap);
+	last_bit  = bit_fls(job_nodemap);
+	if (first_bit < 0 || last_bit < 0)
+		fatal("sched/gang: _add_job_to_active: nodeless job?");
+
+	for (c = 0, n = 0; n < first_bit; n++) {
+		c += _get_phys_bit_cnt(n);
 	}
-	if (gr_type == GS_NODE || gr_type == GS_SOCKET) {
-		bit_free(tmpmap);
-		return 0;
+	for (n = first_bit; n <= last_bit; n++) {
+		uint16_t s, socks, cps, cores_per_node;
+		cores_per_node = _get_phys_bit_cnt(n);
+		if (bit_test(job_nodemap, n) == 0) {
+			c += cores_per_node;
+			continue;
+		}
+		socks = _get_socket_cnt(n);
+		cps = cores_per_node / socks;
+		for (s = 0; s < socks; s++) {
+			for (i = c; i < c+cps; i++) {
+				if (bit_test(p_ptr->active_resmap, i))
+					break;
+			}
+			if (i < c+cps) {
+				/* set all bits on this used socket */
+				bit_nset(p_ptr->active_resmap, c, c+cps-1);
+			}
+			c += cps;
+		}
 	}
-
-	/* for GS_CPU and GS_CORE, we need to compare CPU arrays and
-	 * see if the sum of CPUs on any one resource exceed the total
-	 * of physical resources available */
-	count = _can_cpus_fit(tmpmap, j_ptr, p_ptr);
-	bit_free(tmpmap);
-	return count;
 }
 
+
 /* Add the given job to the "active" structures of
  * the given partition and increment the run count */
 static void
-_add_job_to_active(struct gs_job *j_ptr, struct gs_part *p_ptr)
+_add_job_to_active(struct job_record *job_ptr, struct gs_part *p_ptr)
 {
-	int i, a, sz;
+	select_job_res_t job_res = job_ptr->select_job;
 
 	/* add job to active_resmap */
-	if (!p_ptr->active_resmap) {
-		/* allocate the active resmap */
-		debug3("sched/gang: _add_job_to_active: using job %u as active base",
-			j_ptr->job_id);
-		p_ptr->active_resmap = bit_copy(j_ptr->resmap);
-	} else if (p_ptr->jobs_active == 0) {
-		/* if the active_resmap exists but jobs_active is '0',
-		 * this means to overwrite the bitmap memory */
-		debug3("sched/gang: _add_job_to_active: copying job %u into active base",
-			j_ptr->job_id);
-		bit_copybits(p_ptr->active_resmap, j_ptr->resmap);
+	if (gr_type == GS_CORE || gr_type == GS_SOCKET) {
+		if (p_ptr->jobs_active == 0 && p_ptr->active_resmap) {
+			uint32_t size = bit_size(p_ptr->active_resmap);
+			bit_nclear(p_ptr->active_resmap, 0, size-1);
+		}
+		add_select_job_to_row(job_res, &(p_ptr->active_resmap),
+				      gs_bits_per_node, gs_bit_rep_count);
+		if (gr_type == GS_SOCKET)
+			_fill_sockets(job_res->node_bitmap, p_ptr);
 	} else {
-		/* add job to existing jobs in the active resmap */
-		debug3("sched/gang: _add_job_to_active: merging job %u into active resmap",
-			j_ptr->job_id);
-		bit_or(p_ptr->active_resmap, j_ptr->resmap);
+		/* GS_NODE or GS_CPU */
+		if (!p_ptr->active_resmap) {
+			debug3("sched/gang: _add_job_to_active: job %u first",
+				job_ptr->job_id);
+			p_ptr->active_resmap = bit_copy(job_res->node_bitmap);
+		} else if (p_ptr->jobs_active == 0) {
+			debug3("sched/gang: _add_job_to_active: job %u copied",
+				job_ptr->job_id);
+			bit_copybits(p_ptr->active_resmap,job_res->node_bitmap);
+		} else {
+			debug3("sched/gang: _add_job_to_active: adding job %u",
+				job_ptr->job_id);
+			bit_or(p_ptr->active_resmap, job_res->node_bitmap);
+		}
 	}
 	
 	/* add job to the active_cpus array */
-	if (gr_type == GS_CPU || gr_type == GS_CORE) {
-		sz = bit_size(p_ptr->active_resmap);
+	if (gr_type == GS_CPU) {
+		uint32_t i, a, sz = bit_size(p_ptr->active_resmap);
 		if (!p_ptr->active_cpus) {
 			/* create active_cpus array */
 			p_ptr->active_cpus = xmalloc(sz * sizeof(uint16_t));
 		}
 		if (p_ptr->jobs_active == 0) {
 			/* overwrite the existing values in active_cpus */
-			a = 0;
-			for (i = 0; i < sz; i++) {
-				if (bit_test(j_ptr->resmap, i)) {
+			for (a = 0, i = 0; i < sz; i++) {
+				if (bit_test(job_res->node_bitmap, i)) {
 					p_ptr->active_cpus[i] =
-						j_ptr->alloc_cpus[a++];
+						job_res->cpus[a++];
 				} else {
 					p_ptr->active_cpus[i] = 0;
 				}
 			}
 		} else {
 			/* add job to existing jobs in the active cpus */
-			a = 0;
-			for (i = 0; i < sz; i++) {
-				if (bit_test(j_ptr->resmap, i)) {
-					uint16_t limit = _get_phys_res_cnt(i);
+			for (a = 0, i = 0; i < sz; i++) {
+				if (bit_test(job_res->node_bitmap, i)) {
+					uint16_t limit = _get_phys_bit_cnt(i);
 					p_ptr->active_cpus[i] +=
-						j_ptr->alloc_cpus[a++];
+						job_res->cpus[a++];
 					/* when adding shadows, the resources
 					 * may get overcommitted */
 					if (p_ptr->active_cpus[i] > limit)
@@ -521,6 +626,7 @@ _add_job_to_active(struct gs_job *j_ptr, struct gs_part *p_ptr)
 	p_ptr->jobs_active += 1;
 }
 
+
 static void
 _signal_job(uint32_t job_id, int sig)
 {
@@ -541,94 +647,6 @@ _signal_job(uint32_t job_id, int sig)
 		      job_id);
 }
 
-static uint32_t
-_get_resmap_size()
-{
-	int i;
-	uint32_t count = 0;
-	/* if GS_NODE or GS_CPU, then size is the number of nodes */
-	if (gr_type == GS_NODE || gr_type == GS_CPU)
-		return node_record_count;
-	/* else the size is the total number of sockets on all nodes */
-	for (i = 0; i < node_record_count; i++) {
-		count += _compute_resources(i, 1);
-	}
-	return count;
-}
-
-/* Load the gs_job struct with the correct
- * resmap and CPU array information
- */
-static void
-_load_alloc_cpus(struct gs_job *j_ptr, bitstr_t *nodemap)
-{
-	int i, a, alloc_index, sz;
-
-	xfree(j_ptr->alloc_cpus);
-	sz = bit_set_count(j_ptr->resmap);
-	j_ptr->alloc_cpus = xmalloc(sz * sizeof(uint16_t));
-
-	a = 0;
-	alloc_index = 0;
-	for (i = 0; i < node_record_count; i++) {
-		uint16_t j, cores, sockets = _compute_resources(i, 1);
-		
-		if (bit_test(nodemap, i)) {
-			for (j = 0; j < sockets; j++) {
-				cores = select_g_get_job_cores(j_ptr->job_id,
-								alloc_index,
-								j);
-				if (cores > 0)
-					j_ptr->alloc_cpus[a++] = cores;
-			}
-			alloc_index++;
-		}
-	}
-}
-
-/* return an appropriate resmap given the granularity (GS_NODE/GS_CORE/etc.) */
-/* This code fails if the bitmap size has changed. */
-static bitstr_t *
-_get_resmap(bitstr_t *origmap, uint32_t job_id)
-{
-	int i, alloc_index = 0, map_index = 0;
-	bitstr_t *newmap;
-	
-	if (bit_size(origmap) != node_record_count) {
-		error("sched/gang: bitmap size has changed from %d for %u",
-			node_record_count, job_id);
-		fatal("sched/gang: inconsistent bitmap size error");
-	}
-	if (gr_type == GS_NODE || gr_type == GS_CPU) {
-		newmap = bit_copy(origmap);
-		return newmap;
-	}
-	
-	/* for GS_SOCKET and GS_CORE the resmap represents sockets */
-	newmap = bit_alloc(gs_resmap_size);
-	if (!newmap) {
-		fatal("sched/gang: memory error creating newmap");
-	}
-	for (i = 0; i < node_record_count; i++) {
-		uint16_t j, cores, sockets = _compute_resources(i, 1);
-		
-		if (bit_test(origmap, i)) {
-			for (j = 0; j < sockets; j++) {
-				cores = select_g_get_job_cores(job_id,
-								alloc_index,
-								j);
-				if (cores > 0)
-					bit_set(newmap, map_index);
-				map_index++;
-			}
-			alloc_index++;
-		} else {
-			/* no cores allocated on this node */
-			map_index += sockets;
-		}
-	}
-	return newmap;
-}
 
 /* construct gs_part_sorted as a sorted list of the current partitions */
 static void
@@ -670,6 +688,7 @@ _sort_partitions()
 	}
 }
 
+
 /* Scan the partition list. Add the given job as a "shadow" to every
  * partition with a lower priority than the given partition */
 static void
@@ -708,6 +727,7 @@ _cast_shadow(struct gs_job *j_ptr, uint16_t priority)
 	}
 }
 
+
 /* Remove the given job as a "shadow" from all partitions */
 static void
 _clear_shadow(struct gs_job *j_ptr)
@@ -737,6 +757,7 @@ _clear_shadow(struct gs_job *j_ptr)
 	}
 }
 
+
 /* Rebuild the active row BUT preserve the order of existing jobs.
  * This is called after one or more jobs have been removed from
  * the partition or if a higher priority "shadow" has been added
@@ -748,10 +769,12 @@ _update_active_row(struct gs_part *p_ptr, int add_new_jobs)
 	int i;
 	struct gs_job *j_ptr;
 
+	debug3("sched/gang: update_active_row: rebuilding part %s...",
+		p_ptr->part_name);
 	/* rebuild the active row, starting with any shadows */
 	p_ptr->jobs_active = 0;
 	for (i = 0; p_ptr->shadow && p_ptr->shadow[i]; i++) {
-		_add_job_to_active(p_ptr->shadow[i], p_ptr);
+		_add_job_to_active(p_ptr->shadow[i]->job_ptr, p_ptr);
 	}
 	
 	/* attempt to add the existing 'active' jobs */
@@ -759,8 +782,8 @@ _update_active_row(struct gs_part *p_ptr, int add_new_jobs)
 		j_ptr = p_ptr->job_list[i];
 		if (j_ptr->row_state != GS_ACTIVE)
 			continue;
-		if (_job_fits_in_active_row(j_ptr, p_ptr)) {
-			_add_job_to_active(j_ptr, p_ptr);
+		if (_job_fits_in_active_row(j_ptr->job_ptr, p_ptr)) {
+			_add_job_to_active(j_ptr->job_ptr, p_ptr);
 			_cast_shadow(j_ptr, p_ptr->priority);
 			
 		} else {
@@ -779,8 +802,8 @@ _update_active_row(struct gs_part *p_ptr, int add_new_jobs)
 		j_ptr = p_ptr->job_list[i];
 		if (j_ptr->row_state != GS_FILLER)
 			continue;
-		if (_job_fits_in_active_row(j_ptr, p_ptr)) {
-			_add_job_to_active(j_ptr, p_ptr);
+		if (_job_fits_in_active_row(j_ptr->job_ptr, p_ptr)) {
+			_add_job_to_active(j_ptr->job_ptr, p_ptr);
 			_cast_shadow(j_ptr, p_ptr->priority);
 		} else {
 			/* this job has been preempted by a shadow job.
@@ -802,8 +825,8 @@ _update_active_row(struct gs_part *p_ptr, int add_new_jobs)
 		j_ptr = p_ptr->job_list[i];
 		if (j_ptr->row_state != GS_NO_ACTIVE)
 			continue;
-		if (_job_fits_in_active_row(j_ptr, p_ptr)) {
-			_add_job_to_active(j_ptr, p_ptr);
+		if (_job_fits_in_active_row(j_ptr->job_ptr, p_ptr)) {
+			_add_job_to_active(j_ptr->job_ptr, p_ptr);
 			_cast_shadow(j_ptr, p_ptr->priority);
 			/* note that this job is a "filler" for this row */
 			j_ptr->row_state = GS_FILLER;
@@ -846,13 +869,14 @@ _remove_job_from_part(uint32_t job_id, struct gs_part *p_ptr)
 	if (!job_id || !p_ptr)
 		return;
 
-	debug3("sched/gang: _remove_job_from_part: removing job %u", job_id);
 	/* find the job in the job_list */
 	i = _find_job_index(p_ptr, job_id);
 	if (i < 0)
 		/* job not found */
 		return;
 
+	debug3("sched/gang: _remove_job_from_part: removing job %u from %s",
+		job_id, p_ptr->part_name);
 	j_ptr = p_ptr->job_list[i];
 	
 	/* remove any shadow first */
@@ -871,11 +895,7 @@ _remove_job_from_part(uint32_t job_id, struct gs_part *p_ptr)
 			j_ptr->job_id);
 		_signal_job(j_ptr->job_id, GS_RESUME);
 	}
-	bit_free(j_ptr->resmap);
-	j_ptr->resmap = NULL;
-	if (j_ptr->alloc_cpus)
-		xfree(j_ptr->alloc_cpus);
-	j_ptr->alloc_cpus = NULL;
+	j_ptr->job_ptr = NULL;
 	xfree(j_ptr);
 	
 	return;
@@ -886,17 +906,19 @@ _remove_job_from_part(uint32_t job_id, struct gs_part *p_ptr)
  * lower priority than the given partition. Return the sig state of the
  * job (GS_SUSPEND or GS_RESUME) */
 static uint16_t
-_add_job_to_part(struct gs_part *p_ptr, uint32_t job_id, bitstr_t *job_bitmap)
+_add_job_to_part(struct gs_part *p_ptr, struct job_record *job_ptr)
 {
 	int i;
 	struct gs_job *j_ptr;
 
 	xassert(p_ptr);
-	xassert(job_id > 0);
-	xassert(job_bitmap);
+	xassert(job_ptr->job_id > 0);
+	xassert(job_ptr->select_job);
+	xassert(job_ptr->select_job->node_bitmap);
+	xassert(job_ptr->select_job->core_bitmap);
 
-	debug3("sched/gang: _add_job_to_part: adding job %u", job_id);
-	_print_jobs(p_ptr);
+	debug3("sched/gang: _add_job_to_part: adding job %u to %s",
+		job_ptr->job_id, p_ptr->part_name);
 	
 	/* take care of any memory needs */
 	if (!p_ptr->job_list) {
@@ -907,15 +929,15 @@ _add_job_to_part(struct gs_part *p_ptr, uint32_t job_id, bitstr_t *job_bitmap)
 	}
 	
 	/* protect against duplicates */
-	i = _find_job_index(p_ptr, job_id);
+	i = _find_job_index(p_ptr, job_ptr->job_id);
 	if (i >= 0) {
 		/* This job already exists, but the resource allocation
 		 * may have changed. In any case, remove the existing
 		 * job before adding this new one.
 		 */
 		debug3("sched/gang: _add_job_to_part: duplicate job %u detected",
-			job_id);
-		_remove_job_from_part(job_id, p_ptr);
+			job_ptr->job_id);
+		_remove_job_from_part(job_ptr->job_id, p_ptr);
 		_update_active_row(p_ptr, 0);
 	}
 	
@@ -930,23 +952,19 @@ _add_job_to_part(struct gs_part *p_ptr, uint32_t job_id, bitstr_t *job_bitmap)
 	j_ptr = xmalloc(sizeof(struct gs_job));
 	
 	/* gather job info */
-	j_ptr->job_id    = job_id;
+	j_ptr->job_id    = job_ptr->job_id;
+	j_ptr->job_ptr   = job_ptr;
 	j_ptr->sig_state = GS_RESUME;  /* all jobs are running initially */
 	j_ptr->row_state = GS_NO_ACTIVE; /* job is not in the active row */
-	j_ptr->resmap    = _get_resmap(job_bitmap, job_id);
-	j_ptr->alloc_cpus = NULL;
-	if (gr_type == GS_CORE || gr_type == GS_CPU) {
-		_load_alloc_cpus(j_ptr, job_bitmap);
-	}
 
 	/* append this job to the job_list */
 	p_ptr->job_list[p_ptr->num_jobs++] = j_ptr;
 	
 	/* determine the immediate fate of this job (run or suspend) */
-	if (_job_fits_in_active_row(j_ptr, p_ptr)) {
-		debug3("sched/gang: _add_job_to_part: adding job %u to active row", 
-			job_id);
-		_add_job_to_active(j_ptr, p_ptr);
+	if (_job_fits_in_active_row(job_ptr, p_ptr)) {
+		debug3("sched/gang: _add_job_to_part: job %u remains running", 
+			job_ptr->job_id);
+		_add_job_to_active(job_ptr, p_ptr);
 		/* note that this job is a "filler" for this row */
 		j_ptr->row_state = GS_FILLER;
 		/* all jobs begin in the run state, so
@@ -958,8 +976,8 @@ _add_job_to_part(struct gs_part *p_ptr, uint32_t job_id, bitstr_t *job_bitmap)
 
 	} else {
 		debug3("sched/gang: _add_job_to_part: suspending job %u",
-			job_id);
-		_signal_job(j_ptr->job_id, GS_SUSPEND);
+			job_ptr->job_id);
+		_signal_job(job_ptr->job_id, GS_SUSPEND);
 		j_ptr->sig_state = GS_SUSPEND;
 	}
 	
@@ -1015,8 +1033,7 @@ _scan_slurm_job_list()
 			 */
 				_signal_job(job_ptr->job_id, GS_RESUME);
 			
-			_add_job_to_part(p_ptr, job_ptr->job_id,
-					 job_ptr->node_bitmap);
+			_add_job_to_part(p_ptr, job_ptr);
 			continue;
 		}
 		
@@ -1086,7 +1103,6 @@ gs_init()
 	timeslicer_seconds = slurmctld_conf.sched_time_slice;
 	gs_fast_schedule = slurm_get_fast_schedule();
 	gr_type = _get_gr_type();
-	gs_resmap_size = _get_resmap_size();
 
 	/* load the physical resource count data */
 	_load_phys_res_cnt();
@@ -1128,9 +1144,10 @@ gs_fini()
 	_destroy_parts();
 	xfree(gs_part_sorted);
 	gs_part_sorted = NULL;
-	xfree(gs_cpus_per_res);
-	xfree(gs_cpu_count_reps);
-	gs_num_groups = 0;
+	xfree(gs_bits_per_node);
+	xfree(gs_bit_rep_count);
+	xfree(gs_sockets_per_node);
+	xfree(gs_socket_rep_count);
 	pthread_mutex_unlock(&data_mutex);
 	debug3("sched/gang: leaving gs_fini");
 
@@ -1148,8 +1165,7 @@ gs_job_start(struct job_record *job_ptr)
 	pthread_mutex_lock(&data_mutex);
 	p_ptr = _find_gs_part(job_ptr->partition);
 	if (p_ptr) {
-		job_state = _add_job_to_part(p_ptr, job_ptr->job_id,
-						job_ptr->node_bitmap);
+		job_state = _add_job_to_part(p_ptr, job_ptr);
 		/* if this job is running then check for preemption */
 		if (job_state == GS_RESUME)
 			_update_all_active_rows();
@@ -1244,6 +1260,11 @@ gs_reconfig()
 
 	old_part_list = gs_part_list;
 	gs_part_list = NULL;
+
+	/* reset global data */
+	gs_fast_schedule = slurm_get_fast_schedule();
+	gr_type = _get_gr_type();
+	_load_phys_res_cnt();
 	_build_parts();
 	
 	/* scan the old part list and add existing jobs to the new list */
@@ -1287,8 +1308,7 @@ gs_reconfig()
 			/* transfer the job as long as it is still active */
 			if (job_ptr->job_state == JOB_SUSPENDED ||
 			    job_ptr->job_state == JOB_RUNNING) {				
-				_add_job_to_part(newp_ptr, job_ptr->job_id,
-						 job_ptr->node_bitmap);
+				_add_job_to_part(newp_ptr, job_ptr);
 			}
 		}
 	}
@@ -1326,13 +1346,13 @@ _build_active_row(struct gs_part *p_ptr)
 	
 	/* apply all shadow jobs first */
 	for (i = 0; i < p_ptr->num_shadows; i++) {
-		_add_job_to_active(p_ptr->shadow[i], p_ptr);
+		_add_job_to_active(p_ptr->shadow[i]->job_ptr, p_ptr);
 	}
 	
 	/* attempt to add jobs from the job_list in the current order */
 	for (i = 0; i < p_ptr->num_jobs; i++) {
-		if (_job_fits_in_active_row(p_ptr->job_list[i], p_ptr)) {
-			_add_job_to_active(p_ptr->job_list[i], p_ptr);
+		if (_job_fits_in_active_row(p_ptr->job_list[i]->job_ptr, p_ptr)) {
+			_add_job_to_active(p_ptr->job_list[i]->job_ptr, p_ptr);
 			p_ptr->job_list[i]->row_state = GS_ACTIVE;
 		}
 	}
@@ -1360,7 +1380,6 @@ _cycle_job_list(struct gs_part *p_ptr)
 	struct gs_job *j_ptr;
 	
 	debug3("sched/gang: entering _cycle_job_list");
-	_print_jobs(p_ptr);
 	/* re-prioritize the job_list and set all row_states to GS_NO_ACTIVE */
 	for (i = 0; i < p_ptr->num_jobs; i++) {
 		while (p_ptr->job_list[i]->row_state == GS_ACTIVE) {
@@ -1377,7 +1396,6 @@ _cycle_job_list(struct gs_part *p_ptr)
 			
 	}
 	debug3("sched/gang: _cycle_job_list reordered job list:");
-	_print_jobs(p_ptr);
 	/* Rebuild the active row. */
 	_build_active_row(p_ptr);
 	debug3("sched/gang: _cycle_job_list new active job list:");
diff --git a/src/plugins/sched/gang/gang.h b/src/plugins/sched/gang/gang.h
index 520ea513ae3dd50dd34746c1194d39e6e0773339..97acc00e4eee82d4ffba4abc377a5c952615ef44 100644
--- a/src/plugins/sched/gang/gang.h
+++ b/src/plugins/sched/gang/gang.h
@@ -3,10 +3,11 @@
  *****************************************************************************
  *  Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
  *  Written by Chris Holmes
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/gang/sched_gang.c b/src/plugins/sched/gang/sched_gang.c
index 52095c6a85805be73d64c63fbfcc3beff6ed57c0..73187e841a5104b72530ea5431429fa447ce6491 100644
--- a/src/plugins/sched/gang/sched_gang.c
+++ b/src/plugins/sched/gang/sched_gang.c
@@ -3,10 +3,11 @@
  *****************************************************************************
  *  Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
  *  Written by Chris Holmes
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -35,6 +36,7 @@
 \*****************************************************************************/
 
 #include "./gang.h"
+#include "src/common/slurm_priority.h"
 
 const char		plugin_name[]	= "Gang Scheduler plugin";
 const char		plugin_type[]	= "sched/gang";
@@ -111,10 +113,7 @@ slurm_sched_plugin_initial_priority( uint32_t last_prio,
 {
 	/* ignored for timeslicing, but will be used to support priority */
 
-	if (last_prio >= 2)
-		return (last_prio - 1);
-	else
-		return 1;
+	return priority_g_set(last_prio, job_ptr);
 }
 
 /**************************************************************************/
diff --git a/src/plugins/sched/hold/Makefile.in b/src/plugins/sched/hold/Makefile.in
index 35ac58b0a857aa6c14404ca25dc1b9eccb8b478e..7dfe7247be2970201d298f9a4891437576b5df07 100644
--- a/src/plugins/sched/hold/Makefile.in
+++ b/src/plugins/sched/hold/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/sched/hold/hold_wrapper.c b/src/plugins/sched/hold/hold_wrapper.c
index 7e6dc6405a717a3bc7acff891ecbd1b1da1bbe35..60bba4825b7eb7a5f18147f9c05030785120bbbe 100644
--- a/src/plugins/sched/hold/hold_wrapper.c
+++ b/src/plugins/sched/hold/hold_wrapper.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -46,6 +47,7 @@
 #include "src/common/plugin.h"
 #include "src/common/log.h"
 #include "src/slurmctld/slurmctld.h"
+#include "src/common/slurm_priority.h"
 
 const char		plugin_name[]	= "SLURM Hold Scheduler plugin";
 const char		plugin_type[]	= "sched/hold";
@@ -119,10 +121,7 @@ slurm_sched_plugin_initial_priority( u_int32_t last_prio,
 	if (stat("/etc/slurm.hold", &buf) == 0)
 		return 0;	/* hold all new jobs */
 
-	if (last_prio >= 2)
-		return (last_prio - 1);
-	else
-		return 1;
+	return priority_g_set(last_prio, job_ptr);
 }
 
 /**************************************************************************/
diff --git a/src/plugins/sched/wiki/Makefile.in b/src/plugins/sched/wiki/Makefile.in
index 45183cd167033ef7554012111dd5acda0a5b74de..60fb5ddd86d172fdae44bdf0fb7874658aa6af9b 100644
--- a/src/plugins/sched/wiki/Makefile.in
+++ b/src/plugins/sched/wiki/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -111,6 +115,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/sched/wiki/cancel_job.c b/src/plugins/sched/wiki/cancel_job.c
index a74921f06b4a65ec860fb3d4b574d2fddf63dfa2..1deb25f9c0844ff85ff4eed005dd41e448bfa2db 100644
--- a/src/plugins/sched/wiki/cancel_job.c
+++ b/src/plugins/sched/wiki/cancel_job.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki/get_jobs.c b/src/plugins/sched/wiki/get_jobs.c
index 3989f614f3a6ccbb4e8fcdcecdf8e6ec320db2bd..3362472cb56a12e234211f9d03138721ddcc794e 100644
--- a/src/plugins/sched/wiki/get_jobs.c
+++ b/src/plugins/sched/wiki/get_jobs.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  get_jobs.c - Process Wiki get job info request
  *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
+ *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -15,7 +17,7 @@
  *  any later version.
  *
  *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
+ *  to link the code of portions of this program with the OpenSSL library under
  *  certain conditions as described in each individual source file, and 
  *  distribute linked combinations including the two. You must obey the GNU 
  *  General Public License in all respects for all of the code used other than 
@@ -49,6 +51,7 @@
 static char *	_dump_all_jobs(int *job_cnt, time_t update_time);
 static char *	_dump_job(struct job_record *job_ptr, time_t update_time);
 static uint16_t _get_job_cpus_per_task(struct job_record *job_ptr);
+static uint16_t _get_job_tasks_per_node(struct job_record *job_ptr);
 static uint32_t	_get_job_end_time(struct job_record *job_ptr);
 static char *	_get_job_features(struct job_record *job_ptr);
 static uint32_t	_get_job_min_disk(struct job_record *job_ptr);
@@ -81,16 +84,19 @@ static char *	_task_list(struct job_record *job_ptr);
  *	WCLIMIT=<secs>;			wall clock time limit, seconds
  *	TASKS=<cpus>;			CPUs required
  *	[NODES=<nodes>;]		count of nodes required
+ *	[TASKSPERNODE=<cnt>;]		tasks required per node
  *	DPROCS=<cpus_per_task>;		count of CPUs required per task
  *	QUEUETIME=<uts>;		submission time
  *	STARTTIME=<uts>;		time execution started
  *	PARTITIONMASK=<partition>;	partition name
+ *	[DMEM=<mbytes>;]		MB of memory required per cpu
  *	RMEM=<MB>;			MB of memory required
  *	RDISK=<MB>;			MB of disk space required
  *	[COMPLETETIME=<uts>;]		termination time
  *	[SUSPENDTIME=<secs>;]		seconds that job has been suspended
- *	[QOS=<quality_of_service>];	quality of service
- *	[ACCOUNT=<bank_account>];	bank account name
+ *	[ACCOUNT=<bank_account>;]	bank account name
+ *	[QOS=<quality_of_service>;]	quality of service
+ *	[RCLASS=<resource_class>;]	resource class
  *	[COMMENT=<whatever>;]		job dependency or account number
  *	UNAME=<user_name>;		user name
  *	GNAME=<group_name>;		group name
@@ -203,7 +209,7 @@ static char *	_dump_job(struct job_record *job_ptr, time_t update_time)
 {
 	char tmp[16384], *buf = NULL;
 	char *uname, *gname;
-	uint32_t end_time, suspend_time;
+	uint32_t end_time, suspend_time, min_mem;
 
 	if (!job_ptr)
 		return NULL;
@@ -265,10 +271,18 @@ static char *	_dump_job(struct job_record *job_ptr, time_t update_time)
 	xstrcat(buf, tmp);
 
 	if (!IS_JOB_FINISHED(job_ptr)) {
+	        uint16_t tpn;
 		snprintf(tmp, sizeof(tmp),
 			"NODES=%u;",
 			_get_job_min_nodes(job_ptr));
 		xstrcat(buf, tmp);
+		tpn = _get_job_tasks_per_node(job_ptr);
+		if (tpn > 0) {
+			snprintf(tmp, sizeof(tmp),
+				 "TASKPERNODE=%u;",
+				 tpn);
+			xstrcat(buf, tmp);
+		}
 	}
 
 	snprintf(tmp, sizeof(tmp),
@@ -283,6 +297,13 @@ static char *	_dump_job(struct job_record *job_ptr, time_t update_time)
 		job_ptr->partition);
 	xstrcat(buf, tmp);
 
+	min_mem = _get_job_min_mem(job_ptr);
+	if (min_mem & MEM_PER_CPU) {
+		snprintf(tmp, sizeof(tmp),
+			"DMEM=%u;", min_mem & (~MEM_PER_CPU));
+		xstrcat(buf, tmp);
+	}
+
 	snprintf(tmp, sizeof(tmp),
 		"RMEM=%u;RDISK=%u;",
 		_get_job_min_mem(job_ptr),
@@ -305,7 +326,7 @@ static char *	_dump_job(struct job_record *job_ptr, time_t update_time)
 
 	if (job_ptr->account) {
 		/* allow QOS spec in form "qos-name" */
-		if (!strncmp(job_ptr->account,"qos-",4)) {
+		if (!strncmp(job_ptr->account, "qos-", 4)) {
 			snprintf(tmp, sizeof(tmp),
 				 "QOS=%s;", job_ptr->account + 4);
 		} else {
@@ -316,9 +337,33 @@ static char *	_dump_job(struct job_record *job_ptr, time_t update_time)
 	}
 
 	if (job_ptr->comment && job_ptr->comment[0]) {
-		snprintf(tmp,sizeof(tmp),
-			"COMMENT=%s;", job_ptr->comment);
-		xstrcat(buf,tmp);
+		/* Parse comment for class/qos spec */
+		char *copy;
+		char *cred, *value;
+		copy = xstrdup(job_ptr->comment);
+		cred = strtok(copy, ",");
+		while (cred != NULL) {
+			if (!strncmp(cred, "qos:", 4)) {
+				value = &cred[4];
+				if (value[0] != '\0') {
+					snprintf(tmp, sizeof(tmp),
+						 "QOS=%s;", value);
+					xstrcat(buf, tmp);
+				}
+			} else if (!strncmp(cred, "class:", 6)) {
+				value = &cred[6];
+				if (value[0] != '\0') {
+					snprintf(tmp, sizeof(tmp),
+						"RCLASS=%s;", value);
+					xstrcat(buf, tmp);
+				}
+			}
+			cred = strtok(NULL, ",");
+		}
+		xfree(copy);
+		snprintf(tmp, sizeof(tmp),
+			 "COMMENT=%s;", job_ptr->comment);
+		xstrcat(buf, tmp);
 	}
 
 	if (job_ptr->details &&
@@ -345,6 +390,16 @@ static uint16_t _get_job_cpus_per_task(struct job_record *job_ptr)
 	return cpus_per_task;
 }
 
+
+static uint16_t _get_job_tasks_per_node(struct job_record *job_ptr)
+{
+	uint16_t tasks_per_node = 0;
+
+	if (job_ptr->details && job_ptr->details->ntasks_per_node)
+		tasks_per_node = job_ptr->details->ntasks_per_node;
+	return tasks_per_node;
+}
+
 static uint32_t _get_job_min_mem(struct job_record *job_ptr)
 {
 	if (job_ptr->details)
@@ -526,20 +581,22 @@ static char * _task_list(struct job_record *job_ptr)
 	int i, j, task_cnt;
 	char *buf = NULL, *host;
 	hostlist_t hl = hostlist_create(job_ptr->nodes);
+	select_job_res_t select_ptr = job_ptr->select_job;
 
+	xassert(select_ptr && select_ptr->cpus);
 	buf = xstrdup("");
 	if (hl == NULL)
 		return buf;
 
-	for (i=0; i<job_ptr->alloc_lps_cnt; i++) {
+	for (i=0; i<select_ptr->nhosts; i++) {
 		host = hostlist_shift(hl);
 		if (host == NULL) {
-			error("bad alloc_lps_cnt for job %u (%s, %d)", 
+			error("bad node_cnt for job %u (%s, %d)", 
 				job_ptr->job_id, job_ptr->nodes,
-				job_ptr->alloc_lps_cnt);
+				job_ptr->node_cnt);
 			break;
 		}
-		task_cnt = job_ptr->alloc_lps[i];
+		task_cnt = select_ptr->cpus[i];
 		if (job_ptr->details && job_ptr->details->cpus_per_task)
 			task_cnt /= job_ptr->details->cpus_per_task;
 		for (j=0; j<task_cnt; j++) {
diff --git a/src/plugins/sched/wiki/get_nodes.c b/src/plugins/sched/wiki/get_nodes.c
index 02f5b4dd4f27dfc28c3ab999c306b5cc9efe44e6..0128e90be103df15fa43f1fdcc3f208e318f39ad 100644
--- a/src/plugins/sched/wiki/get_nodes.c
+++ b/src/plugins/sched/wiki/get_nodes.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -132,10 +133,14 @@ static char *	_dump_all_nodes(int *node_cnt, time_t update_time)
 	int i, cnt = 0;
 	struct node_record *node_ptr = node_record_table_ptr;
 	char *tmp_buf, *buf = NULL;
+	uint16_t base_state;
 
 	for (i=0; i<node_record_count; i++, node_ptr++) {
 		if (node_ptr->name == NULL)
 			continue;
+		base_state = node_ptr->node_state & NODE_STATE_BASE;
+		if (base_state == NODE_STATE_FUTURE)
+			continue;
 		tmp_buf = _dump_node(node_ptr, update_time);
 		if (cnt > 0)
 			xstrcat(buf, "#");
diff --git a/src/plugins/sched/wiki/hostlist.c b/src/plugins/sched/wiki/hostlist.c
index 1c3214891a3b94e82f51c210c66a315dec2e5db7..50d4555dbe49e150fa5a7cecffafcabaecd5225b 100644
--- a/src/plugins/sched/wiki/hostlist.c
+++ b/src/plugins/sched/wiki/hostlist.c
@@ -2,12 +2,14 @@
  *  hostlist.c - Convert hostlist expressions between Slurm and Moab formats
  *****************************************************************************
  *  Copyright (C) 2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -81,7 +83,7 @@ extern char * moab2slurm_task_list(char *moab_tasklist, int *task_cnt)
 	static uint32_t cr_test = 0, cr_enabled = 0;
 
 	if (cr_test == 0) {
-		select_g_get_info_from_plugin(SELECT_CR_PLUGIN,
+		select_g_get_info_from_plugin(SELECT_CR_PLUGIN, NULL,
 						&cr_enabled);
 		cr_test = 1;
 	}
@@ -161,32 +163,43 @@ extern char * slurm_job2moab_task_list(struct job_record *job_ptr)
 /* Return task list in Moab format 1: tux0:tux0:tux1:tux1:tux2 */
 static char * _task_list(struct job_record *job_ptr)
 {
-	int i, j;
+	int i, j, node_inx = 0, task_cnt;
 	char *buf = NULL, *host;
-	hostlist_t hl = hostlist_create(job_ptr->nodes);
+	select_job_res_t select_ptr = job_ptr->select_job;
 
-	if (hl == NULL) {
-		error("hostlist_create error for job %u, %s",
-			job_ptr->job_id, job_ptr->nodes);
-		return buf;
-	}
+	xassert(select_ptr);
+	for (i=0; i<select_ptr->nhosts; i++) {
+		if (i == 0) {
+			xassert(select_ptr->cpus && select_ptr->node_bitmap);
+			node_inx = bit_ffs(select_ptr->node_bitmap);
+		} else {
+			for (node_inx++; node_inx<node_record_count; 
+			     node_inx++) {
+				if (bit_test(select_ptr->node_bitmap,node_inx))
+					break;
+			}
+			if (node_inx >= node_record_count) {
+				error("Improperly formed select_job for %u",
+				      job_ptr->job_id);
+				break;
+			}
+		}
+		host = node_record_table_ptr[node_inx].name;
 
-	for (i=0; i<job_ptr->alloc_lps_cnt; i++) {
-		host = hostlist_shift(hl);
-		if (host == NULL) {
-			error("bad alloc_lps_cnt for job %u (%s, %d)", 
-				job_ptr->job_id, job_ptr->nodes,
-				job_ptr->alloc_lps_cnt);
-			break;
+		task_cnt = select_ptr->cpus[i];
+		if (job_ptr->details && job_ptr->details->cpus_per_task)
+			task_cnt /= job_ptr->details->cpus_per_task;
+		if (task_cnt < 1) {
+			error("Invalid task_cnt for job %u on node %s",
+			      job_ptr->job_id, host);
+			task_cnt = 1;
 		}
-		for (j=0; j<job_ptr->alloc_lps[i]; j++) {
+		for (j=0; j<task_cnt; j++) {
 			if (buf)
 				xstrcat(buf, ":");
 			xstrcat(buf, host);
 		}
-		free(host);
 	}
-	hostlist_destroy(hl);
 	return buf;
 }
 
@@ -248,27 +261,39 @@ static void _append_hl_buf(char **buf, hostlist_t *hl_tmp, int *reps)
 /* Return task list in Moab format 2: tux[0-1]*2:tux2 */
 static char * _task_list_exp(struct job_record *job_ptr)
 {
-	int i, reps = -1;
+	int i, node_inx = 0, reps = -1, task_cnt;
 	char *buf = NULL, *host;
-	hostlist_t hl = hostlist_create(job_ptr->nodes);
 	hostlist_t hl_tmp = (hostlist_t) NULL;
+	select_job_res_t select_ptr = job_ptr->select_job;
 
-	if (hl == NULL) {
-		error("hostlist_create error for job %u, %s",
-			job_ptr->job_id, job_ptr->nodes);
-		return buf;
-	}
-
-	for (i=0; i<job_ptr->alloc_lps_cnt; i++) {
-		host = hostlist_shift(hl);
-		if (host == NULL) {
-			error("bad alloc_lps_cnt for job %u (%s, %d)", 
-				job_ptr->job_id, job_ptr->nodes,
-				job_ptr->alloc_lps_cnt);
-			break;
+	xassert(select_ptr);
+	for (i=0; i<select_ptr->nhosts; i++) {
+		if (i == 0) {
+			xassert(select_ptr->cpus && select_ptr->node_bitmap);
+			node_inx = bit_ffs(select_ptr->node_bitmap);
+		} else {
+			for (node_inx++; node_inx<node_record_count; 
+			     node_inx++) {
+				if (bit_test(select_ptr->node_bitmap,node_inx))
+					break;
+			}
+			if (node_inx >= node_record_count) {
+				error("Improperly formed select_job for %u",
+				      job_ptr->job_id);
+				break;
+			}
 		}
+		host = node_record_table_ptr[node_inx].name;
 
-		if (reps == job_ptr->alloc_lps[i]) {
+		task_cnt = select_ptr->cpus[i];
+		if (job_ptr->details && job_ptr->details->cpus_per_task)
+			task_cnt /= job_ptr->details->cpus_per_task;
+		if (task_cnt < 1) {
+			error("Invalid task_cnt for job %u on node %s",
+			      job_ptr->job_id, host);
+			task_cnt = 1;
+		}
+		if (reps == task_cnt) {
 			/* append to existing hostlist record */
 			if (hostlist_push(hl_tmp, host) == 0)
 				error("hostlist_push failure");
@@ -279,13 +304,11 @@ static char * _task_list_exp(struct job_record *job_ptr)
 			/* start new hostlist record */
 			hl_tmp = hostlist_create(host);
 			if (hl_tmp)
-				reps = job_ptr->alloc_lps[i];
+				reps = task_cnt;
 			else
 				error("hostlist_create failure");
 		}
-		free(host);
 	}
-	hostlist_destroy(hl);
 	if (hl_tmp)
 		_append_hl_buf(&buf, &hl_tmp, &reps);
 	return buf;
diff --git a/src/plugins/sched/wiki/job_modify.c b/src/plugins/sched/wiki/job_modify.c
index 263e418bdd4eefa9e51bcf7ea8c3f01d2cce984d..b1ede96fe331e288a7ba11a507c0868c295d6c17 100644
--- a/src/plugins/sched/wiki/job_modify.c
+++ b/src/plugins/sched/wiki/job_modify.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki/msg.c b/src/plugins/sched/wiki/msg.c
index 4b7ebfd6c79cea42862bfebae223cc71de4fd688..5ae992e7434a854af7077dba2c03f5521c487471 100644
--- a/src/plugins/sched/wiki/msg.c
+++ b/src/plugins/sched/wiki/msg.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  msg.c - Message/communcation manager for Wiki plugin
  *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
+ *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -35,10 +37,11 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include "./crypto.h"
-#include "./msg.h"
-#include "src/common/uid.h"
-#include "src/slurmctld/locks.h"
+#include "slurm/slurm.h"
+#include <src/common/uid.h>
+#include <src/slurmctld/locks.h>
+#include <src/plugins/sched/wiki/crypto.h>
+#include <src/plugins/sched/wiki/msg.h>
 
 #define _DEBUG 0
 
@@ -474,7 +477,11 @@ static char *	_recv_msg(slurm_fd new_fd)
 		return NULL;
 	}
 
-	debug2("wiki msg recv:%s", buf);
+	if (slurm_get_debug_flags() && DEBUG_FLAG_WIKI)
+		info("wiki msg recv:%s", buf);
+	else
+		debug2("wiki msg recv:%s", buf);
+
 	return buf;
 }
 
@@ -488,7 +495,10 @@ static size_t	_send_msg(slurm_fd new_fd, char *buf, size_t size)
 	char header[10];
 	size_t data_sent;
 
-	debug2("wiki msg send:%s", buf);
+	if (slurm_get_debug_flags() && DEBUG_FLAG_WIKI)
+		info("wiki msg send:%s", buf);
+	else
+		debug2("wiki msg send:%s", buf);
 
 	(void) sprintf(header, "%08lu\n", (unsigned long) size);
 	if (_write_bytes((int) new_fd, header, 9) != 9) {
diff --git a/src/plugins/sched/wiki/msg.h b/src/plugins/sched/wiki/msg.h
index d01f20d0019941ac330e90622f949193ed20eebb..13cd14335fae0d687c20433be9edfbea9b738b35 100644
--- a/src/plugins/sched/wiki/msg.h
+++ b/src/plugins/sched/wiki/msg.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -84,8 +85,8 @@
 
 /* Global configuration parameters */
 #define E_HOST_SIZE  256
-#define EXC_PART_CNT  10
-#define HIDE_PART_CNT 10
+#define EXC_PART_CNT  32
+#define HIDE_PART_CNT 32
 #define KEY_SIZE      32
 #define PRIO_HOLD      0
 #define PRIO_DECREMENT 1
diff --git a/src/plugins/sched/wiki/resume_job.c b/src/plugins/sched/wiki/resume_job.c
index b172e04d25cc5f8aefc2fe9a42b67a0b0c6d22eb..2308f95b9c49ea36a7597fd535981afe43510093 100644
--- a/src/plugins/sched/wiki/resume_job.c
+++ b/src/plugins/sched/wiki/resume_job.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki/sched_wiki.c b/src/plugins/sched/wiki/sched_wiki.c
index 4fdb4dd7dc52df20f5dce6710c58691b1fdfe022..d7270af8f996cc774c76c13e11c1bb2dfdfbb71e 100644
--- a/src/plugins/sched/wiki/sched_wiki.c
+++ b/src/plugins/sched/wiki/sched_wiki.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -42,6 +43,7 @@
 #include "src/common/log.h"
 #include "src/slurmctld/slurmctld.h"
 #include "./msg.h"
+#include "src/common/slurm_priority.h"
 
 const char		plugin_name[]	= "Wiki (Maui) Scheduler plugin";
 const char		plugin_type[]	= "sched/wiki";
@@ -117,12 +119,9 @@ slurm_sched_plugin_initial_priority( uint32_t last_prio,
 		return 0;
 	}
 
-	if (init_prio_mode == PRIO_DECREMENT) {
-		if (last_prio >= 2)
-			return (last_prio - 1);
-		else
-			return 1;
-	}
+	if (init_prio_mode == PRIO_DECREMENT) 
+		return priority_g_set(last_prio, job_ptr);
+
 	return 0;
 }
 
diff --git a/src/plugins/sched/wiki/start_job.c b/src/plugins/sched/wiki/start_job.c
index 7be3a0cbc6105a43a4863c2bebb4ac55dd2716e7..245722cb8c931dea530f5042e9369460bb669c76 100644
--- a/src/plugins/sched/wiki/start_job.c
+++ b/src/plugins/sched/wiki/start_job.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -147,7 +148,7 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 	static uint32_t cr_test = 0, cr_enabled = 0;
 
 	if (cr_test == 0) {
-		select_g_get_info_from_plugin(SELECT_CR_PLUGIN,
+		select_g_get_info_from_plugin(SELECT_CR_PLUGIN, NULL,
 						&cr_enabled);
 		cr_test = 1;
 	}
@@ -280,6 +281,7 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 			}
 			wait_string = job_reason_string(wait_reason);
 			job_ptr->state_reason = WAIT_HELD;
+			xfree(job_ptr->state_desc);
 		}
 		*err_code = -910 - wait_reason;
 		snprintf(tmp_msg, sizeof(tmp_msg),
diff --git a/src/plugins/sched/wiki/suspend_job.c b/src/plugins/sched/wiki/suspend_job.c
index 383b9539b3ee64cf94e4e9afbf9eb0694df16b03..fc6879d8169b699fc0bac82093e1354f2b4f12ee 100644
--- a/src/plugins/sched/wiki/suspend_job.c
+++ b/src/plugins/sched/wiki/suspend_job.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/Makefile.in b/src/plugins/sched/wiki2/Makefile.in
index ad0c7a25b2c90b7941b90d16ce62e8ddfdf12c8f..d10c4e24760f89fdeeafac7ba453cef4bcb5df26 100644
--- a/src/plugins/sched/wiki2/Makefile.in
+++ b/src/plugins/sched/wiki2/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -114,6 +118,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/sched/wiki2/cancel_job.c b/src/plugins/sched/wiki2/cancel_job.c
index 8a91bf0a01aa37cf6cc1a74d8366aecbad34cc23..921eb5516980df515a190101947f561cd9140750 100644
--- a/src/plugins/sched/wiki2/cancel_job.c
+++ b/src/plugins/sched/wiki2/cancel_job.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/event.c b/src/plugins/sched/wiki2/event.c
index 5c840507126aed3be2f06747820d196cbd6ebd47..41fc23f0e6a82094597f2399499558a189ed13c9 100644
--- a/src/plugins/sched/wiki2/event.c
+++ b/src/plugins/sched/wiki2/event.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/get_jobs.c b/src/plugins/sched/wiki2/get_jobs.c
index 96226ae70328c8fc0a2f504b45dab250650d6c9b..024acb08ec77bb1e01c4948a05e2019a3c5a7c35 100644
--- a/src/plugins/sched/wiki2/get_jobs.c
+++ b/src/plugins/sched/wiki2/get_jobs.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -126,7 +127,7 @@ extern int	get_jobs(char *cmd_ptr, int *err_code, char **err_msg)
 	int job_rec_cnt = 0, buf_size = 0;
 
 	if (cr_test == 0) {
-		select_g_get_info_from_plugin(SELECT_CR_PLUGIN,
+		select_g_get_info_from_plugin(SELECT_CR_PLUGIN, NULL,
 					      &cr_enabled);
 		cr_test = 1;
 	}
diff --git a/src/plugins/sched/wiki2/get_nodes.c b/src/plugins/sched/wiki2/get_nodes.c
index af8263ded38ecbc44bf0d4f1c6e796e6ca85b152..96b135ff35a707a509856e8b3b5c8fdacaa57980 100644
--- a/src/plugins/sched/wiki2/get_nodes.c
+++ b/src/plugins/sched/wiki2/get_nodes.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -116,7 +117,8 @@ extern int	get_nodes(char *cmd_ptr, int *err_code, char **err_msg)
 					      node_name);
 					continue;
 				}
-				tmp_buf = _dump_node(node_ptr, NULL, update_time);
+				tmp_buf = _dump_node(node_ptr, NULL, 
+						     update_time);
 				if (node_rec_cnt > 0)
 					xstrcat(buf, "#");
 				xstrcat(buf, tmp_buf);
@@ -152,10 +154,14 @@ static char *	_dump_all_nodes(int *node_cnt, time_t update_time)
 	char *tmp_buf = NULL, *buf = NULL;
 	struct node_record *uniq_node_ptr = NULL;
 	hostlist_t hl = NULL;
-	
+	uint16_t base_state;
+
 	for (i=0; i<node_record_count; i++, node_ptr++) {
 		if (node_ptr->name == NULL)
 			continue;
+		base_state = node_ptr->node_state & NODE_STATE_BASE;
+		if (base_state == NODE_STATE_FUTURE)
+			continue;
 		if (use_host_exp == 2) {
 			rc = _same_info(uniq_node_ptr, node_ptr, update_time);
 			if (rc == 0) {
diff --git a/src/plugins/sched/wiki2/hostlist.c b/src/plugins/sched/wiki2/hostlist.c
index c9f83f4a249376b423c1a01e188f44e86797841c..b11cf7fe2436a7b609f047ffa1dfb17965dd355c 100644
--- a/src/plugins/sched/wiki2/hostlist.c
+++ b/src/plugins/sched/wiki2/hostlist.c
@@ -2,12 +2,14 @@
  *  hostlist.c - Convert hostlist expressions between Slurm and Moab formats
  *****************************************************************************
  *  Copyright (C) 2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -81,7 +83,7 @@ extern char * moab2slurm_task_list(char *moab_tasklist, int *task_cnt)
 	static uint32_t cr_test = 0, cr_enabled = 0;
 
 	if (cr_test == 0) {
-		select_g_get_info_from_plugin(SELECT_CR_PLUGIN,
+		select_g_get_info_from_plugin(SELECT_CR_PLUGIN, NULL,
 						&cr_enabled);
 		cr_test = 1;
 	}
@@ -161,35 +163,43 @@ extern char * slurm_job2moab_task_list(struct job_record *job_ptr)
 /* Return task list in Moab format 1: tux0:tux0:tux1:tux1:tux2 */
 static char * _task_list(struct job_record *job_ptr)
 {
-	int i, j, task_cnt;
+	int i, j, node_inx = 0, task_cnt;
 	char *buf = NULL, *host;
-	hostlist_t hl = hostlist_create(job_ptr->nodes);
+	select_job_res_t select_ptr = job_ptr->select_job;
 
-	if (hl == NULL) {
-		error("hostlist_create error for job %u, %s",
-			job_ptr->job_id, job_ptr->nodes);
-		return buf;
-	}
-
-	for (i=0; i<job_ptr->alloc_lps_cnt; i++) {
-		host = hostlist_shift(hl);
-		if (host == NULL) {
-			error("bad alloc_lps_cnt for job %u (%s, %d)", 
-				job_ptr->job_id, job_ptr->nodes,
-				job_ptr->alloc_lps_cnt);
-			break;
+	xassert(select_ptr);
+	for (i=0; i<select_ptr->nhosts; i++) {
+		if (i == 0) {
+			xassert(select_ptr->cpus && select_ptr->node_bitmap);
+			node_inx = bit_ffs(select_ptr->node_bitmap);
+		} else {
+			for (node_inx++; node_inx<node_record_count; 
+			     node_inx++) {
+				if (bit_test(select_ptr->node_bitmap,node_inx))
+					break;
+			}
+			if (node_inx >= node_record_count) {
+				error("Improperly formed select_job for %u",
+				      job_ptr->job_id);
+				break;
+			}
 		}
-		task_cnt = job_ptr->alloc_lps[i];
+		host = node_record_table_ptr[node_inx].name;
+
+		task_cnt = select_ptr->cpus[i];
 		if (job_ptr->details && job_ptr->details->cpus_per_task)
 			task_cnt /= job_ptr->details->cpus_per_task;
+		if (task_cnt < 1) {
+			error("Invalid task_cnt for job %u on node %s",
+			      job_ptr->job_id, host);
+			task_cnt = 1;
+		}
 		for (j=0; j<task_cnt; j++) {
 			if (buf)
 				xstrcat(buf, ":");
 			xstrcat(buf, host);
 		}
-		free(host);
 	}
-	hostlist_destroy(hl);
 	return buf;
 }
 
@@ -251,29 +261,38 @@ static void _append_hl_buf(char **buf, hostlist_t *hl_tmp, int *reps)
 /* Return task list in Moab format 2: tux[0-1]*2:tux2 */
 static char * _task_list_exp(struct job_record *job_ptr)
 {
-	int i, reps = -1, task_cnt;
+	int i, node_inx = 0, reps = -1, task_cnt;
 	char *buf = NULL, *host;
-	hostlist_t hl = hostlist_create(job_ptr->nodes);
 	hostlist_t hl_tmp = (hostlist_t) NULL;
+	select_job_res_t select_ptr = job_ptr->select_job;
 
-	if (hl == NULL) {
-		error("hostlist_create error for job %u, %s",
-			job_ptr->job_id, job_ptr->nodes);
-		return buf;
-	}
-
-	for (i=0; i<job_ptr->alloc_lps_cnt; i++) {
-		host = hostlist_shift(hl);
-		if (host == NULL) {
-			error("bad alloc_lps_cnt for job %u (%s, %d)", 
-				job_ptr->job_id, job_ptr->nodes,
-				job_ptr->alloc_lps_cnt);
-			break;
+	xassert(select_ptr);
+	for (i=0; i<select_ptr->nhosts; i++) {
+		if (i == 0) {
+			xassert(select_ptr->cpus && select_ptr->node_bitmap);
+			node_inx = bit_ffs(select_ptr->node_bitmap);
+		} else {
+			for (node_inx++; node_inx<node_record_count; 
+			     node_inx++) {
+				if (bit_test(select_ptr->node_bitmap,node_inx))
+					break;
+			}
+			if (node_inx >= node_record_count) {
+				error("Improperly formed select_job for %u",
+				      job_ptr->job_id);
+				break;
+			}
 		}
+		host = node_record_table_ptr[node_inx].name;
 
-		task_cnt = job_ptr->alloc_lps[i];
+		task_cnt = select_ptr->cpus[i];
 		if (job_ptr->details && job_ptr->details->cpus_per_task)
 			task_cnt /= job_ptr->details->cpus_per_task;
+		if (task_cnt < 1) {
+			error("Invalid task_cnt for job %u on node %s",
+			      job_ptr->job_id, host);
+			task_cnt = 1;
+		}
 		if (reps == task_cnt) {
 			/* append to existing hostlist record */
 			if (hostlist_push(hl_tmp, host) == 0)
@@ -289,9 +308,7 @@ static char * _task_list_exp(struct job_record *job_ptr)
 			else
 				error("hostlist_create failure");
 		}
-		free(host);
 	}
-	hostlist_destroy(hl);
 	if (hl_tmp)
 		_append_hl_buf(&buf, &hl_tmp, &reps);
 	return buf;
diff --git a/src/plugins/sched/wiki2/initialize.c b/src/plugins/sched/wiki2/initialize.c
index f94576ab1f1f7f72591675fcc5a6934ffb2ac49a..4314b2901b044352ee40ecb91a3165ba9ccefa46 100644
--- a/src/plugins/sched/wiki2/initialize.c
+++ b/src/plugins/sched/wiki2/initialize.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/job_add_task.c b/src/plugins/sched/wiki2/job_add_task.c
index 97d6604ac4a54f0ec5ef4c8728a2e4989188717a..4671a19aaad9b53f097ab32a3f9abfe300fd0b10 100644
--- a/src/plugins/sched/wiki2/job_add_task.c
+++ b/src/plugins/sched/wiki2/job_add_task.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/job_modify.c b/src/plugins/sched/wiki2/job_modify.c
index c0c7673abe5edba8e3834134ef87ad14fd951634..834dd9978c9f4fb431e5be2f91eebb467dc1dc54 100644
--- a/src/plugins/sched/wiki2/job_modify.c
+++ b/src/plugins/sched/wiki2/job_modify.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/job_notify.c b/src/plugins/sched/wiki2/job_notify.c
index ddb3a65dd3bbd2eae6aa4f9b02a838d6611a6082..7aa80a7e499ebb9f0ca1fa3ae072443f18819a04 100644
--- a/src/plugins/sched/wiki2/job_notify.c
+++ b/src/plugins/sched/wiki2/job_notify.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/job_release_task.c b/src/plugins/sched/wiki2/job_release_task.c
index 1a02f40e122eb2a40243f026dd8724b27a8f315f..b0b825f87204cd4af8b176bbf06e632b43337a1a 100644
--- a/src/plugins/sched/wiki2/job_release_task.c
+++ b/src/plugins/sched/wiki2/job_release_task.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/job_requeue.c b/src/plugins/sched/wiki2/job_requeue.c
index a756dcf0063b037ef87c36fa42177d9d8c2053cc..539aa994244875d1cee6e9dc5fec5b6f3d0000f3 100644
--- a/src/plugins/sched/wiki2/job_requeue.c
+++ b/src/plugins/sched/wiki2/job_requeue.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/job_signal.c b/src/plugins/sched/wiki2/job_signal.c
index d1a24fa068f4d5eed9b13d65127ca1717ec3036a..fc12f41c57272b555aaf7ea17d2b8b4da189cd02 100644
--- a/src/plugins/sched/wiki2/job_signal.c
+++ b/src/plugins/sched/wiki2/job_signal.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/job_will_run.c b/src/plugins/sched/wiki2/job_will_run.c
index 27e7d1e45d3a31866013093c84b092b68e7c3dcd..0211556e522e3fa20607c1f90962e1011ec40a6a 100644
--- a/src/plugins/sched/wiki2/job_will_run.c
+++ b/src/plugins/sched/wiki2/job_will_run.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -16,7 +17,7 @@
  *  any later version.
  *
  *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
+ *  to link the code of portions of this program with the OpenSSL library under
  *  certain conditions as described in each individual source file, and 
  *  distribute linked combinations including the two. You must obey the GNU 
  *  General Public License in all respects for all of the code used other than 
@@ -39,8 +40,9 @@
 #include "./msg.h"
 #include "src/common/node_select.h"
 #include "src/slurmctld/locks.h"
-#include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/node_scheduler.h"
+#include "src/slurmctld/reservation.h"
+#include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/state_save.h"
 
 #define MAX_JOB_QUEUE 20
@@ -96,7 +98,8 @@ extern int	job_will_run(char *cmd_ptr, int *err_code, char **err_msg)
 		arg_ptr += 6;
 		jobid[job_cnt] = strtoul(arg_ptr, &tmp_char, 10);
 		if (tmp_char[0] == '@')
-			start_time[job_cnt] = strtoul(tmp_char+1, &tmp_char, 10);
+			start_time[job_cnt] = strtoul(tmp_char+1, &tmp_char,
+						      10);
 		else
 			start_time[job_cnt] = 0;
 		if (tmp_char[0] != ',') {
@@ -150,13 +153,14 @@ static char *	_will_run_test(uint32_t *jobid, time_t *start_time,
 {
 	struct job_record *job_ptr;
 	struct part_record *part_ptr;
-	bitstr_t *avail_bitmap = NULL;
+	bitstr_t *avail_bitmap = NULL, *resv_bitmap = NULL;
 	char *hostlist, *reply_msg = NULL;
 	uint32_t min_nodes, max_nodes, req_nodes;
 	int i, rc;
 	select_will_run_t *select_will_run = NULL;
 	List select_list;
 	ListIterator iter;
+	time_t now = time(NULL), start_res;
 
 	select_list = list_create(_select_list_del);
 	if (select_list == NULL)
@@ -208,6 +212,23 @@ static char *	_will_run_test(uint32_t *jobid, time_t *start_time,
 			break;
 		}
 
+		/* Enforce reservation: access control, time and nodes */
+		if (start_time[i])
+			start_res = start_time[i];
+		else
+			start_res = now;
+		rc = job_test_resv(job_ptr, &start_res, true, &resv_bitmap);
+		if (rc != SLURM_SUCCESS) {
+			*err_code = -730;
+			*err_msg = "Job denied access to reservation";
+			error("wiki: reservation access denied for job %u", 
+			      jobid[i]);
+			break;
+		}
+		start_time[i] = MAX(start_time[i], start_res);
+		bit_and(avail_bitmap, resv_bitmap);
+		FREE_NULL_BITMAP(resv_bitmap);
+
 		/* Only consider nodes that are not DOWN or DRAINED */
 		bit_and(avail_bitmap, avail_node_bitmap);
 
diff --git a/src/plugins/sched/wiki2/msg.c b/src/plugins/sched/wiki2/msg.c
index 466868415c19217d2a6ab9f800477e376b3e2d51..6b2d10465c6e6a7092cdf1a87ff9bd9a7fb63697 100644
--- a/src/plugins/sched/wiki2/msg.c
+++ b/src/plugins/sched/wiki2/msg.c
@@ -2,12 +2,14 @@
  *  msg.c - Message/communcation manager for Wiki plugin
  *****************************************************************************
  *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -35,10 +37,11 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include "./crypto.h"
-#include "./msg.h"
-#include "src/common/uid.h"
-#include "src/slurmctld/locks.h"
+#include "slurm/slurm.h"
+#include <src/common/uid.h>
+#include <src/slurmctld/locks.h>
+#include <src/plugins/sched/wiki2/crypto.h>
+#include <src/plugins/sched/wiki2/msg.h>
 #include <sys/poll.h>
 
 #define _DEBUG 0
@@ -529,7 +532,11 @@ static char *	_recv_msg(slurm_fd new_fd)
 		return NULL;
 	}
 
-	debug2("wiki msg recv:%s", buf);
+	if (slurm_get_debug_flags() && DEBUG_FLAG_WIKI)
+		info("wiki msg recv:%s", buf);
+	else
+		debug2("wiki msg recv:%s", buf);
+
 	return buf;
 }
 
@@ -543,7 +550,10 @@ static size_t	_send_msg(slurm_fd new_fd, char *buf, size_t size)
 	char header[10];
 	size_t data_sent;
 
-	debug2("wiki msg send:%s", buf);
+	if (slurm_get_debug_flags() && DEBUG_FLAG_WIKI)
+		info("wiki msg send:%s", buf);
+	else
+		debug2("wiki msg send:%s", buf);
 
 	(void) sprintf(header, "%08lu\n", (unsigned long) size);
 	if (_write_bytes((int) new_fd, header, 9) != 9) {
diff --git a/src/plugins/sched/wiki2/msg.h b/src/plugins/sched/wiki2/msg.h
index b9d22f64008cca950e43de2c241f25a7eefa01ed..de55e0f2c58f736a2772a70ca0e72eaee0f9595e 100644
--- a/src/plugins/sched/wiki2/msg.h
+++ b/src/plugins/sched/wiki2/msg.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -84,8 +85,8 @@
 
 /* Global configuration parameters */
 #define E_HOST_SIZE	256
-#define EXC_PART_CNT	10
-#define HIDE_PART_CNT	10
+#define EXC_PART_CNT	32
+#define HIDE_PART_CNT	32
 #define KEY_SIZE	32
 #define PRIO_HOLD	0
 #define PRIO_DECREMENT	1
diff --git a/src/plugins/sched/wiki2/resume_job.c b/src/plugins/sched/wiki2/resume_job.c
index b172e04d25cc5f8aefc2fe9a42b67a0b0c6d22eb..2308f95b9c49ea36a7597fd535981afe43510093 100644
--- a/src/plugins/sched/wiki2/resume_job.c
+++ b/src/plugins/sched/wiki2/resume_job.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/sched/wiki2/sched_wiki.c b/src/plugins/sched/wiki2/sched_wiki.c
index db4168acf23d4ee072134e45de54a5f1809db980..5f8f6fa305d51587b3d13f419e840a0fbfa24d04 100644
--- a/src/plugins/sched/wiki2/sched_wiki.c
+++ b/src/plugins/sched/wiki2/sched_wiki.c
@@ -2,12 +2,14 @@
  *  sched_wiki.c - Wiki plugin for Moab and Maui schedulers.
  *****************************************************************************
  *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -39,6 +41,7 @@
 
 #include "src/common/plugin.h"
 #include "src/common/log.h"
+#include "src/common/slurm_priority.h"
 #include "./msg.h"
 
 const char		plugin_name[]	= "Wiki (Maui and Moab) Scheduler plugin";
@@ -118,12 +121,9 @@ slurm_sched_plugin_initial_priority( uint32_t last_prio,
 		return 0;
 	}
 
-	if (init_prio_mode == PRIO_DECREMENT) {
-		if (last_prio >= 2)
-			return (last_prio - 1);
-		else
-			return 1;
-	}
+	if (init_prio_mode == PRIO_DECREMENT) 
+		return priority_g_set(last_prio, job_ptr);
+	
 	return 0;
 }
 
@@ -140,7 +140,9 @@ void slurm_sched_plugin_job_is_pending( void )
 /**************************************************************************/
 int slurm_sched_plugin_reconfig( void )
 {
-	return parse_wiki_config();
+	int rc = parse_wiki_config();
+	(void) event_notify(1235, "Partition change");
+	return rc;
 }
 
 /**************************************************************************/
diff --git a/src/plugins/sched/wiki2/start_job.c b/src/plugins/sched/wiki2/start_job.c
index 3dfe72a847133d8f113a6023772e9269e091d3f5..06ddf6f827130bab9e910a3c47ccf15dcd3b706f 100644
--- a/src/plugins/sched/wiki2/start_job.c
+++ b/src/plugins/sched/wiki2/start_job.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -190,7 +191,7 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 	static uint32_t cr_test = 0, cr_enabled = 0;
 
 	if (cr_test == 0) {
-		select_g_get_info_from_plugin(SELECT_CR_PLUGIN,
+		select_g_get_info_from_plugin(SELECT_CR_PLUGIN, NULL,
 						&cr_enabled);
 		cr_test = 1;
 	}
@@ -334,6 +335,7 @@ static int	_start_job(uint32_t jobid, int task_cnt, char *hostlist,
 			}
 			wait_string = job_reason_string(wait_reason);
 			job_ptr->state_reason = WAIT_HELD;
+			xfree(job_ptr->state_desc);
 		}
 		*err_code = -910 - wait_reason;
 		snprintf(tmp_msg, sizeof(tmp_msg),
diff --git a/src/plugins/sched/wiki2/suspend_job.c b/src/plugins/sched/wiki2/suspend_job.c
index 383b9539b3ee64cf94e4e9afbf9eb0694df16b03..fc6879d8169b699fc0bac82093e1354f2b4f12ee 100644
--- a/src/plugins/sched/wiki2/suspend_job.c
+++ b/src/plugins/sched/wiki2/suspend_job.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/Makefile.in b/src/plugins/select/Makefile.in
index 67ea11018a499c5d3679fb96741efe11447855af..7c6b4066236a6751e576ff0c58cb36ce6c92a879 100644
--- a/src/plugins/select/Makefile.in
+++ b/src/plugins/select/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/select/bluegene/Makefile.in b/src/plugins/select/bluegene/Makefile.in
index 6c605f7cc52f5eeb8e839b1e58c119b38ba35a71..4e96779fa0f23ef4df4765b67f93f15f87f37339 100644
--- a/src/plugins/select/bluegene/Makefile.in
+++ b/src/plugins/select/bluegene/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/select/bluegene/block_allocator/Makefile.am b/src/plugins/select/bluegene/block_allocator/Makefile.am
index bbc7f300e98e70862782591368bb4484746a1e2b..5f74086f7bbd596e9fe8b3a6181c6f2c95685665 100644
--- a/src/plugins/select/bluegene/block_allocator/Makefile.am
+++ b/src/plugins/select/bluegene/block_allocator/Makefile.am
@@ -5,21 +5,6 @@ CLEANFILES = core.*
 
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 
-#to build the executable
-# noinst_PROGRAMS = block_allocator
-
-# block_allocator_SOURCES = block_allocator.c bridge_linker.c \
-# 	block_allocator.h bridge_linker.h
-
-# block_allocator_LDADD = \
-# 	$(top_builddir)/src/api/libslurm.la
-
-
-# block_allocator_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
-
-# CPPFLAGS = -DBUILD_EXE
-
-
 # making a .la
 
 noinst_LTLIBRARIES = libbluegene_block_allocator.la 
@@ -29,3 +14,16 @@ libbluegene_block_allocator_la_SOURCES =    \
 libbluegene_block_allocator_la_LDFLAGS        = \
 	$(LIB_LDFLAGS) -lm    
 
+if BLUEGENE_LOADED
+
+#to build the debug executable
+noinst_PROGRAMS = wire_test
+
+wire_testSOURCES = wire_test.c block_allocator.h
+
+wire_test_LDADD = libbluegene_block_allocator.la \
+	$(top_builddir)/src/api/libslurm.o -ldl
+
+wire_test_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
+
+endif
diff --git a/src/plugins/select/bluegene/block_allocator/Makefile.in b/src/plugins/select/bluegene/block_allocator/Makefile.in
index c06ae39c17a5499947c5aecb44e71362712d3786..bffdc165070695ff8dd9ac02ac209303a31ecd1c 100644
--- a/src/plugins/select/bluegene/block_allocator/Makefile.in
+++ b/src/plugins/select/bluegene/block_allocator/Makefile.in
@@ -16,6 +16,7 @@
 
 # Makefile.am for bluegene_block_allocator
 
+
 VPATH = @srcdir@
 pkgdatadir = $(datadir)/@PACKAGE@
 pkglibdir = $(libdir)/@PACKAGE@
@@ -35,6 +36,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
+@BLUEGENE_LOADED_TRUE@noinst_PROGRAMS = wire_test$(EXEEXT)
 subdir = src/plugins/select/bluegene/block_allocator
 DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -43,14 +45,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +83,15 @@ libbluegene_block_allocator_la_LINK = $(LIBTOOL) --tag=CC \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
 	$(AM_CFLAGS) $(CFLAGS) \
 	$(libbluegene_block_allocator_la_LDFLAGS) $(LDFLAGS) -o $@
+PROGRAMS = $(noinst_PROGRAMS)
+wire_test_SOURCES = wire_test.c
+wire_test_OBJECTS = wire_test.$(OBJEXT)
+@BLUEGENE_LOADED_TRUE@wire_test_DEPENDENCIES =  \
+@BLUEGENE_LOADED_TRUE@	libbluegene_block_allocator.la \
+@BLUEGENE_LOADED_TRUE@	$(top_builddir)/src/api/libslurm.o
+wire_test_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(wire_test_LDFLAGS) $(LDFLAGS) -o $@
 DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
 depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
 am__depfiles_maybe = depfiles
@@ -89,8 +104,8 @@ CCLD = $(CC)
 LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
 	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
 	$(LDFLAGS) -o $@
-SOURCES = $(libbluegene_block_allocator_la_SOURCES)
-DIST_SOURCES = $(libbluegene_block_allocator_la_SOURCES)
+SOURCES = $(libbluegene_block_allocator_la_SOURCES) wire_test.c
+DIST_SOURCES = $(libbluegene_block_allocator_la_SOURCES) wire_test.c
 ETAGS = etags
 CTAGS = ctags
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
@@ -104,6 +119,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -267,19 +286,6 @@ AUTOMAKE_OPTIONS = foreign
 CLEANFILES = core.*
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 
-#to build the executable
-# noinst_PROGRAMS = block_allocator
-
-# block_allocator_SOURCES = block_allocator.c bridge_linker.c \
-# 	block_allocator.h bridge_linker.h
-
-# block_allocator_LDADD = \
-# 	$(top_builddir)/src/api/libslurm.la
-
-# block_allocator_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
-
-# CPPFLAGS = -DBUILD_EXE
-
 # making a .la
 noinst_LTLIBRARIES = libbluegene_block_allocator.la 
 libbluegene_block_allocator_la_SOURCES = \
@@ -288,6 +294,11 @@ libbluegene_block_allocator_la_SOURCES = \
 libbluegene_block_allocator_la_LDFLAGS = \
 	$(LIB_LDFLAGS) -lm    
 
+@BLUEGENE_LOADED_TRUE@wire_testSOURCES = wire_test.c block_allocator.h
+@BLUEGENE_LOADED_TRUE@wire_test_LDADD = libbluegene_block_allocator.la \
+@BLUEGENE_LOADED_TRUE@	$(top_builddir)/src/api/libslurm.o -ldl
+
+@BLUEGENE_LOADED_TRUE@wire_test_LDFLAGS = -export-dynamic -lm $(CMD_LDFLAGS)
 all: all-am
 
 .SUFFIXES:
@@ -333,6 +344,16 @@ clean-noinstLTLIBRARIES:
 libbluegene_block_allocator.la: $(libbluegene_block_allocator_la_OBJECTS) $(libbluegene_block_allocator_la_DEPENDENCIES) 
 	$(libbluegene_block_allocator_la_LINK)  $(libbluegene_block_allocator_la_OBJECTS) $(libbluegene_block_allocator_la_LIBADD) $(LIBS)
 
+clean-noinstPROGRAMS:
+	@list='$(noinst_PROGRAMS)'; for p in $$list; do \
+	  f=`echo $$p|sed 's/$(EXEEXT)$$//'`; \
+	  echo " rm -f $$p $$f"; \
+	  rm -f $$p $$f ; \
+	done
+wire_test$(EXEEXT): $(wire_test_OBJECTS) $(wire_test_DEPENDENCIES) 
+	@rm -f wire_test$(EXEEXT)
+	$(wire_test_LINK) $(wire_test_OBJECTS) $(wire_test_LDADD) $(LIBS)
+
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
 
@@ -341,6 +362,7 @@ distclean-compile:
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/block_allocator.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bridge_linker.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/wire_test.Po@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@@ -444,7 +466,7 @@ distdir: $(DISTFILES)
 	done
 check-am: all-am
 check: check-am
-all-am: Makefile $(LTLIBRARIES)
+all-am: Makefile $(LTLIBRARIES) $(PROGRAMS)
 installdirs:
 install: install-am
 install-exec: install-exec-am
@@ -474,7 +496,7 @@ maintainer-clean-generic:
 clean: clean-am
 
 clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
-	mostlyclean-am
+	clean-noinstPROGRAMS mostlyclean-am
 
 distclean: distclean-am
 	-rm -rf ./$(DEPDIR)
@@ -533,17 +555,18 @@ uninstall-am:
 .MAKE: install-am install-strip
 
 .PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
-	clean-libtool clean-noinstLTLIBRARIES ctags distclean \
-	distclean-compile distclean-generic distclean-libtool \
-	distclean-tags distdir dvi dvi-am html html-am info info-am \
-	install install-am install-data install-data-am install-dvi \
-	install-dvi-am install-exec install-exec-am install-html \
-	install-html-am install-info install-info-am install-man \
-	install-pdf install-pdf-am install-ps install-ps-am \
-	install-strip installcheck installcheck-am installdirs \
-	maintainer-clean maintainer-clean-generic mostlyclean \
-	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
-	pdf pdf-am ps ps-am tags uninstall uninstall-am
+	clean-libtool clean-noinstLTLIBRARIES clean-noinstPROGRAMS \
+	ctags distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags uninstall uninstall-am
 
 # Tell versions [3.59,3.63) of GNU make to not export all variables.
 # Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.c b/src/plugins/select/bluegene/block_allocator/block_allocator.c
index 546771f902a740eb16542a8e785c9acbc3034b75..ec5a201d67fe02586a6aaf05066c8319575eba5e 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.c
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  block_allocator.c - Assorted functions for layout of bluegene blocks, 
  *	 wiring, mapping for smap, etc.
- *  $Id: block_allocator.c 17225 2009-04-10 19:25:52Z da $
+ *  $Id: block_allocator.c 17643 2009-05-29 17:20:48Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -200,7 +201,8 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 /* 			     List nodes, int dim,  */
 /* 			     int count, int highest_phys_x);  */
 /* */
-static int _finish_torus(ba_switch_t *curr_switch, int source_port, 
+static int _finish_torus(List results, 
+			 ba_switch_t *curr_switch, int source_port, 
 			 int dim, int count, int *start);
 /* */
 static int *_set_best_path();
@@ -500,12 +502,11 @@ extern int new_ba_request(ba_request_t* ba_request)
 #ifdef HAVE_BG
 	float sz=1;
 	int geo[BA_SYSTEM_DIMENSIONS] = {0,0,0};
-	int i2, i3, picked, total_sz=1 , size2, size3;
-	ListIterator itr;
-	int checked[8];
+	int i2, i3, picked, total_sz=1, size2=0;
+	int checked[DIM_SIZE[X]];
 	int *geo_ptr;
 	int messed_with = 0;
-	
+
 	ba_request->save_name= NULL;
 	ba_request->rotate_count= 0;
 	ba_request->elongate_count = 0;
@@ -583,7 +584,7 @@ extern int new_ba_request(ba_request_t* ba_request)
 				    ba_request->rotate);
 		}
 	
-		if(ba_request->size>total_sz || ba_request->size<1) {
+		if(ba_request->size > total_sz || ba_request->size < 1) {
 			return 0;			
 		}
 		sz = ba_request->size % (DIM_SIZE[Y] * DIM_SIZE[Z]);
@@ -593,54 +594,55 @@ extern int new_ba_request(ba_request_t* ba_request)
 		      geo[Y] = DIM_SIZE[Y];
 		      geo[Z] = DIM_SIZE[Z];
 		      sz=ba_request->size;
-		      _append_geo(geo,
-				  ba_request->elongate_geos,
-				  ba_request->rotate);
+		      if((geo[X]*geo[Y]*geo[Z]) == ba_request->size)
+			      _append_geo(geo,
+					  ba_request->elongate_geos,
+					  ba_request->rotate);
+		      else
+			      error("%d I was just trying to add a "
+				    "geo of %d%d%d "
+				    "while I am trying to request %d midplanes",
+				    __LINE__, geo[X], geo[Y], geo[Z],
+				    ba_request->size);
 		}	
 //	startagain:		
 		picked=0;
-		for(i=0;i<8;i++)
+		for(i=0; i<DIM_SIZE[X]; i++)
 			checked[i]=0;
 		
-		size3=ba_request->size;
-		
 		for (i=0; i<BA_SYSTEM_DIMENSIONS; i++) {
 			total_sz *= DIM_SIZE[i];
 			geo[i] = 1;
 		}
-	
+	       
 		sz = 1;
-		size3=ba_request->size;
 		picked=0;
 	tryagain:	
-		if(size3!=ba_request->size)
-			size2=size3;
-		else
-			size2=ba_request->size;
+		size2 = ba_request->size;
 		//messedup:
-
 		for (i=picked; i<BA_SYSTEM_DIMENSIONS; i++) { 
-			if(size2<=1) 
+			if(size2 <= 1) 
 				break;
-			sz = size2%DIM_SIZE[i];
+	
+			sz = size2 % DIM_SIZE[i];
 			if(!sz) {
 				geo[i] = DIM_SIZE[i];	
 				size2 /= DIM_SIZE[i];
-			} else if (size2 > DIM_SIZE[i]){
-				for(i2=(DIM_SIZE[i]-1);i2>1;i2--) {
+			} else if (size2 > DIM_SIZE[i]) {
+				for(i2=(DIM_SIZE[i]-1); i2 > 1; i2--) {
 					/* go through each number to see if 
 					   the size is divisable by a smaller 
 					   number that is 
 					   good in the other dims. */
 					if (!(size2%i2) && !checked[i2]) {
 						size2 /= i2;
-									
+					
 						if(i==0)
 							checked[i2]=1;
 							
-						if(i2<DIM_SIZE[i]) 
+						if(i2<DIM_SIZE[i]) {
 							geo[i] = i2;
-						else {
+						} else {
 							goto tryagain;
 						}
 						if((i2-1)!=1 && 
@@ -657,20 +659,21 @@ extern int new_ba_request(ba_request_t* ba_request)
 				   run.  
 				*/
 				if(i2==1) {
-					error("Can't make a block of "
-					      "%d into a cube.",
-					      ba_request->size);
-					return 0;
+					if(!list_count(
+						   ba_request->elongate_geos))
+						error("Can't make a block of "
+						      "%d into a cube.",
+						      ba_request->size);
+					goto endit;
 /* 					ba_request->size +=1; */
 /* 					goto startagain; */
 				}
-						
 			} else {
 				geo[i] = sz;	
 				break;
 			}					
 		}
-		
+
 		if((geo[X]*geo[Y]) <= DIM_SIZE[Y]) {
 			ba_request->geometry[X] = 1;
 			ba_request->geometry[Y] = geo[X] * geo[Y];
@@ -689,7 +692,10 @@ extern int new_ba_request(ba_request_t* ba_request)
 				    ba_request->rotate);		
 	
 		}
-		if((geo[X]/2) <= DIM_SIZE[Y]) {
+
+		/* Make sure geo[X] is even and then see if we can get
+		   it into the Y or Z dim. */
+		if(!(geo[X]%2) && ((geo[X]/2) <= DIM_SIZE[Y])) {
 			if(geo[Y] == 1) {
 				ba_request->geometry[Y] = geo[X]/2;
 				messed_with = 1;
@@ -746,9 +752,15 @@ extern int new_ba_request(ba_request_t* ba_request)
 			}
 		}
 		
-		_append_geo(geo, 
-			    ba_request->elongate_geos, 
-			    ba_request->rotate);
+		if((geo[X]*geo[Y]*geo[Z]) == ba_request->size)
+			_append_geo(geo, 
+				    ba_request->elongate_geos, 
+				    ba_request->rotate);
+		else
+			error("%d I was just trying to add a geo of %d%d%d "
+			      "while I am trying to request %d midplanes",
+			       __LINE__, geo[X], geo[Y], geo[Z],
+			      ba_request->size);
 	
 		/* see if We can find a cube or square root of the 
 		   size to make an easy cube */
@@ -771,18 +783,22 @@ extern int new_ba_request(ba_request_t* ba_request)
 				else
 					goto endit;
 				
-			_append_geo(geo, 
-				    ba_request->elongate_geos, 
-				    ba_request->rotate);
+			if((geo[X]*geo[Y]*geo[Z]) == ba_request->size)
+				_append_geo(geo, 
+					    ba_request->elongate_geos, 
+					    ba_request->rotate);
+			else
+				error("%d I was just trying to add "
+				      "a geo of %d%d%d "
+				      "while I am trying to request "
+				      "%d midplanes",
+				      __LINE__, geo[X], geo[Y], geo[Z],
+				      ba_request->size);			
 		} 
 	}
 	
 endit:
-	itr = list_iterator_create(ba_request->elongate_geos);
-	geo_ptr = list_next(itr);
-	list_iterator_destroy(itr);
-	
-	if(geo_ptr == NULL)
+	if(!(geo_ptr = list_peek(ba_request->elongate_geos)))
 		return 0;
 
 	ba_request->elongate_count++;
@@ -1036,12 +1052,15 @@ node_info_error:
 					  % (HOSTLIST_BASE * HOSTLIST_BASE))
 					/ HOSTLIST_BASE;
 				end[Z] = (number % HOSTLIST_BASE);
+				j += 3;
+
 				DIM_SIZE[X] = MAX(DIM_SIZE[X], end[X]);
 				DIM_SIZE[Y] = MAX(DIM_SIZE[Y], end[Y]);
 				DIM_SIZE[Z] = MAX(DIM_SIZE[Z], end[Z]);
-				break;
-			}
-				
+
+				if(node->nodenames[j] != ',')
+					break;
+			}			
 		}
 		if ((DIM_SIZE[X]==0) && (DIM_SIZE[Y]==0) && (DIM_SIZE[Z]==0)) 
 			info("are you sure you only have 1 midplane? %s",
@@ -1204,7 +1223,7 @@ extern void ba_fini()
 	bridge_fini();
 #endif
 	_delete_ba_system();
-//	debug2("pa system destroyed");
+//	debug3("pa system destroyed");
 }
 
 
@@ -1294,7 +1313,7 @@ extern int copy_node_path(List nodes, List *dest_nodes)
 		list_iterator_destroy(itr2);
 	
 		if(!new_ba_node) {
-			debug2("adding %c%c%c as a new node",
+			debug3("adding %c%c%c as a new node",
 			       alpha_num[ba_node->coord[X]], 
 			       alpha_num[ba_node->coord[Y]],
 			       alpha_num[ba_node->coord[Z]]);
@@ -1489,7 +1508,7 @@ extern int check_and_set_node_list(List nodes)
 			[ba_node->coord[Y]]
 			[ba_node->coord[Z]];
 		if(ba_node->used && curr_ba_node->used) {
-			debug3("I have already been to "
+			debug4("I have already been to "
 			       "this node %c%c%c",
 			       alpha_num[ba_node->coord[X]], 
 			       alpha_num[ba_node->coord[Y]],
@@ -1512,7 +1531,7 @@ extern int check_and_set_node_list(List nodes)
 				   && curr_ba_switch->int_wire[j].used
 					&& j != curr_ba_switch->
 				   int_wire[j].port_tar) {
-					debug3("%c%c%c dim %d port %d "
+					debug4("%c%c%c dim %d port %d "
 					       "is already in use to %d",
 					       alpha_num[ba_node->coord[X]], 
 					       alpha_num[ba_node->coord[Y]],
@@ -1615,7 +1634,7 @@ extern char *set_bg_block(List results, int *start,
 		if(ba_node->letter == '.') {
 			ba_node->letter = letters[color_count%62];
 			ba_node->color = colors[color_count%6];
-			debug3("count %d setting letter = %c "
+			debug4("count %d setting letter = %c "
 			       "color = %d",
 			       color_count,
 			       ba_node->letter,
@@ -2173,20 +2192,36 @@ extern int *find_bp_loc(char* bp_id)
 			check[4] = '\0';
 		}
 	}
+	
+	if((check[1] < '0' || check[1] > '9')
+	   || (check[2] < '0' || check[2] > '9')
+	   || (check[3] < '0' || check[3] > '9')) {
+		error("%s is not a valid Rack-Midplane (i.e. R000)", bp_id);
+		goto cleanup;
+	}
+			
 #else
 	if(check[3] != '-') {
 		xfree(check);
 		check = xstrdup_printf("R%c%c-M%c",
 				       bp_id[1], bp_id[2], bp_id[3]);
 	}
-#endif
 
+	if((check[1] < '0' || check[1] > '9')
+	   || (check[2] < '0' || check[2] > '9')
+	   || (check[5] < '0' || check[5] > '9')) {
+		error("%s is not a valid Rack-Midplane (i.e. R00-M0)", bp_id);
+		goto cleanup;
+	}
+#endif
+	
 	itr = list_iterator_create(bp_map_list);
 	while ((bp_map = list_next(itr)))  
 		if (!strcasecmp(bp_map->bp_id, check)) 
 			break;	/* we found it */
 	list_iterator_destroy(itr);
 
+cleanup:
 	xfree(check);
 
 	if(bp_map != NULL)
@@ -2213,11 +2248,22 @@ extern char *find_bp_rack_mid(char* xyz)
 	len -= 3;
 	if(len<0)
 		return NULL;
+
+	if((xyz[len] < '0' || xyz[len] > '9')
+	   || (xyz[len+1] < '0' || xyz[len+1] > '9')
+	   || (xyz[len+2] < '0' || xyz[len+2] > '9')) {
+		error("%s is not a valid Location (i.e. 000)", xyz);
+		return NULL;
+	}
+
+
 	number = xstrntol(&xyz[X]+len, NULL,
 			  BA_SYSTEM_DIMENSIONS, HOSTLIST_BASE);
 	coord[X] = number / (HOSTLIST_BASE * HOSTLIST_BASE);
 	coord[Y] = (number % (HOSTLIST_BASE * HOSTLIST_BASE)) / HOSTLIST_BASE;
 	coord[Z] = (number % HOSTLIST_BASE);
+
+
 	if(!bp_map_list) {
 		if(set_bp_map() == -1)
 			return NULL;
@@ -2259,7 +2305,7 @@ extern int load_block_wiring(char *bg_block_id)
 	ba_switch_t *ba_switch = NULL; 
 	int *geo = NULL;
 	
-	debug2("getting info for block %s\n", bg_block_id);
+	debug3("getting info for block %s\n", bg_block_id);
 	
 	if ((rc = bridge_get_block(bg_block_id,  &block_ptr)) != STATUS_OK) {
 		error("bridge_get_block(%s): %s", 
@@ -2275,7 +2321,7 @@ extern int load_block_wiring(char *bg_block_id)
 		return SLURM_ERROR;
 	} 
 	if(!switch_cnt) {
-		debug3("no switch_cnt");
+		debug4("no switch_cnt");
 		if ((rc = bridge_get_data(block_ptr, 
 					  RM_PartitionFirstBP, 
 					  &curr_bp)) 
@@ -2348,7 +2394,7 @@ extern int load_block_wiring(char *bg_block_id)
 			      bg_err_str(rc));
 			return SLURM_ERROR;
 		}
-		debug2("switch id = %s dim %d conns = %d", 
+		debug3("switch id = %s dim %d conns = %d", 
 		       switchid, dim, cnt);
 		ba_switch = &ba_system_ptr->
 			grid[geo[X]][geo[Y]][geo[Z]].axis_switch[dim];
@@ -2421,7 +2467,7 @@ extern int load_block_wiring(char *bg_block_id)
 				ba_system_ptr->grid[geo[X]][geo[Y]][geo[Z]].
 					used = true;		
 			}
-			debug3("connection going from %d -> %d",
+			debug4("connection going from %d -> %d",
 			      curr_conn.p1, curr_conn.p2);
 			
 			if(ba_switch->int_wire[curr_conn.p1].used) {
@@ -2483,7 +2529,7 @@ extern List get_and_set_block_wiring(char *bg_block_id)
 	List results = list_create(destroy_ba_node);
 	ListIterator itr = NULL;
 	
-	debug2("getting info for block %s\n", bg_block_id);
+	debug3("getting info for block %s\n", bg_block_id);
 	
 	if ((rc = bridge_get_block(bg_block_id,  &block_ptr)) != STATUS_OK) {
 		error("bridge_get_block(%s): %s", 
@@ -2499,7 +2545,7 @@ extern List get_and_set_block_wiring(char *bg_block_id)
 		goto end_it;
 	} 
 	if(!switch_cnt) {
-		debug3("no switch_cnt");
+		debug4("no switch_cnt");
 		if ((rc = bridge_get_data(block_ptr, 
 					  RM_PartitionFirstBP, 
 					  &curr_bp)) 
@@ -2578,7 +2624,7 @@ extern List get_and_set_block_wiring(char *bg_block_id)
 			      bg_err_str(rc));
 			goto end_it;
 		}
-		debug2("switch id = %s dim %d conns = %d", 
+		debug3("switch id = %s dim %d conns = %d", 
 		       switchid, dim, cnt);
 		
 		itr = list_iterator_create(results);
@@ -2665,7 +2711,7 @@ extern List get_and_set_block_wiring(char *bg_block_id)
 				}
 				ba_node->used = true;		
 			}
-			debug3("connection going from %d -> %d",
+			debug4("connection going from %d -> %d",
 			      curr_conn.p1, curr_conn.p2);
 			
 			if(ba_switch->int_wire[curr_conn.p1].used) {
@@ -2890,13 +2936,13 @@ static int _append_geo(int *geometry, List geos, int rotate)
 		
 	}
 	list_iterator_destroy(itr);
-	
+
 	if(geo_ptr == NULL) { 
 		geo = xmalloc(sizeof(int)*BA_SYSTEM_DIMENSIONS);
 		geo[X] = geometry[X];
 		geo[Y] = geometry[Y];
 		geo[Z] = geometry[Z];
-		debug3("adding geo %c%c%c",
+		debug4("adding geo %c%c%c",
 		       alpha_num[geo[X]], alpha_num[geo[Y]],
 		       alpha_num[geo[Z]]);
 		list_append(geos, geo);
@@ -2960,7 +3006,7 @@ static int _fill_in_coords(List results, List start_list,
 					continue;
 
 				if (!_node_used(ba_node, geometry[X])) {
-					debug3("here Adding %c%c%c",
+					debug4("here Adding %c%c%c",
 					       alpha_num[ba_node->coord[X]],
 					       alpha_num[ba_node->coord[Y]],
 					       alpha_num[ba_node->coord[Z]]);
@@ -3064,7 +3110,7 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 	node_curr = curr_switch->ext_wire[0].node_tar;
 	node_tar = curr_switch->ext_wire[port_tar].node_tar;
 	if(mark_switch->int_wire[source].used)
-		debug2("setting dim %d %c%c%c %d-> %c%c%c %d",
+		debug3("setting dim %d %c%c%c %d-> %c%c%c %d",
 		       dim,
 		       alpha_num[node_curr[X]],
 		       alpha_num[node_curr[Y]],
@@ -3092,7 +3138,7 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 	   && node_curr[Z] == node_tar[Z]) {
 		/* We are going to the same node! this should never
 		   happen */
-		debug4("something bad happened!! "
+		debug5("something bad happened!! "
 		       "we are on %c%c%c and are going to it "
 		       "from port %d - > %d", 
 		       alpha_num[node_curr[X]],
@@ -3132,7 +3178,7 @@ static int _copy_the_path(List nodes, ba_switch_t *curr_switch,
 					       [mark_node_tar[Z]]);
 			_new_ba_node(ba_node, mark_node_tar, false);
 			list_push(nodes, ba_node);
-			debug3("haven't seen %c%c%c adding it",
+			debug4("haven't seen %c%c%c adding it",
 			       alpha_num[ba_node->coord[X]], 
 			       alpha_num[ba_node->coord[Y]],
 			       alpha_num[ba_node->coord[Z]]);
@@ -3158,7 +3204,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 
 	for(i2=1;i2<=2;i2++) {
 		if(geometry[i2] > 1) {
-			debug3("%d node %c%c%c port 2 -> ",
+			debug4("%d node %c%c%c port 2 -> ",
 			       i2,
 			       alpha_num[ba_node->coord[X]],
 			       alpha_num[ba_node->coord[Y]],
@@ -3166,7 +3212,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 							       
 			dim_curr_switch = &ba_node->axis_switch[i2];
 			if(dim_curr_switch->int_wire[2].used) {
-				debug4("returning here");
+				debug5("returning here");
 				return 0;
 			}
 							
@@ -3175,16 +3221,16 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 			next_node = &ba_system_ptr->
 				grid[node_tar[X]][node_tar[Y]][node_tar[Z]];
 			dim_next_switch = &next_node->axis_switch[i2];
-			debug3("%c%c%c port 5",
+			debug4("%c%c%c port 5",
 			       alpha_num[next_node->coord[X]],
 			       alpha_num[next_node->coord[Y]],
 			       alpha_num[next_node->coord[Z]]);
 							  
 			if(dim_next_switch->int_wire[5].used) {
-				debug2("returning here 2");
+				debug3("returning here 2");
 				return 0;
 			}
-			debug4("%d %d %d %d",i2, node_tar[i2],
+			debug5("%d %d %d %d",i2, node_tar[i2],
 			       first[i2], geometry[i2]);
 
 			/* Here we need to see where we are in
@@ -3201,7 +3247,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 				count = (node_tar[i2]-first[i2]);
 
 			if(count == geometry[i2]) {
-				debug4("found end of me %c%c%c",
+				debug5("found end of me %c%c%c",
 				       alpha_num[node_tar[X]],
 				       alpha_num[node_tar[Y]],
 				       alpha_num[node_tar[Z]]);
@@ -3224,7 +3270,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 								PASS_FOUND_Z;
 					}
 					while(node_tar[i2] != first[i2]) {
-						debug3("on dim %d at %d "
+						debug4("on dim %d at %d "
 						       "looking for %d",
 						       i2,
 						       node_tar[i2],
@@ -3232,7 +3278,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 						
 						if(dim_curr_switch->
 						   int_wire[2].used) {
-							debug3("returning "
+							debug4("returning "
 							       "here 3");
 							return 0;
 						} 
@@ -3262,7 +3308,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 							axis_switch[i2];
 					}
 									
-					debug3("back to first on dim %d "
+					debug4("back to first on dim %d "
 					       "at %d looking for %d",
 					       i2,
 					       node_tar[i2],
@@ -3333,7 +3379,7 @@ static int _find_yz_path(ba_node_t *ba_node, int *first,
 			*/
 
 			dim_curr_switch = &ba_node->axis_switch[i2];
-			debug3("%d node %c%c%c port 0 -> 1",
+			debug4("%d node %c%c%c port 0 -> 1",
 			       i2,
 			       alpha_num[ba_node->coord[X]],
 			       alpha_num[ba_node->coord[Y]],
@@ -3459,7 +3505,7 @@ static int _reset_the_path(ba_switch_t *curr_switch, int source,
 		      port_tar);
 		return 0;
 	}
-	debug4("from %c%c%c %d %d -> %c%c%c %d",
+	debug5("from %c%c%c %d %d -> %c%c%c %d",
 	       alpha_num[node_curr[X]],
 	       alpha_num[node_curr[Y]],
 	       alpha_num[node_curr[Z]],
@@ -3472,7 +3518,7 @@ static int _reset_the_path(ba_switch_t *curr_switch, int source,
 	if(node_curr[X] == node_tar[X]
 	   && node_curr[Y] == node_tar[Y]
 	   && node_curr[Z] == node_tar[Z]) {
-		debug4("%d something bad happened!!", dim);
+		debug5("%d something bad happened!!", dim);
 		return 0;
 	}
 	next_switch = &ba_system_ptr->
@@ -3633,7 +3679,7 @@ start_again:
 		x = startx-1;
 	while(x!=startx) {
 		x++;
-		debug3("finding %c%c%c try %d",
+		debug4("finding %c%c%c try %d",
 		       alpha_num[ba_request->geometry[X]],
 #ifdef HAVE_3D
 		       alpha_num[ba_request->geometry[Y]],
@@ -3643,7 +3689,7 @@ start_again:
 #ifdef HAVE_3D
 	new_node:
 #endif
-		debug2("starting at %c%c%c",
+		debug3("starting at %c%c%c",
 		       alpha_num[start[X]]
 #ifdef HAVE_3D
 		       , alpha_num[start[Y]],
@@ -3660,7 +3706,7 @@ start_again:
 			;
 
 		if (!_node_used(ba_node, ba_request->geometry[X])) {
-			debug3("trying this node %c%c%c %c%c%c %d",
+			debug4("trying this node %c%c%c %c%c%c %d",
 			       alpha_num[start[X]],
 			       alpha_num[start[Y]],
 			       alpha_num[start[Z]],
@@ -3745,7 +3791,7 @@ static bool _node_used(ba_node_t* ba_node, int x_size)
 	
 	/* if we've used this node in another block already */
 	if (!ba_node || ba_node->used) {
-		debug3("node %c%c%c used", 
+		debug4("node %c%c%c used", 
 		       alpha_num[ba_node->coord[X]],
 		       alpha_num[ba_node->coord[Y]],
 		       alpha_num[ba_node->coord[Z]]);
@@ -3767,7 +3813,7 @@ static bool _node_used(ba_node_t* ba_node, int x_size)
 		   other they must be connected to the other ports.
 		*/
 		if(ba_switch->int_wire[3].used && ba_switch->int_wire[5].used) {
-			debug3("switch full in the X dim on node %c%c%c!",
+			debug4("switch full in the X dim on node %c%c%c!",
 			       alpha_num[ba_node->coord[X]],
 			       alpha_num[ba_node->coord[Y]],
 			       alpha_num[ba_node->coord[Z]]);
@@ -3952,7 +3998,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			       _port_enum(from_port),
 			       _port_enum(to_port));	
 		
-		debug2("dim %d from %c%c%c %d -> %c%c%c %d",
+		debug3("dim %d from %c%c%c %d -> %c%c%c %d",
 		       dim,
 		       alpha_num[source->coord[X]],
 		       alpha_num[source->coord[Y]],
@@ -4076,6 +4122,57 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			      count, DIM_SIZE[X]);
 			break;
 		}
+	} else if(DIM_SIZE[X] == 9) {
+		switch(count) {
+		case 0:
+		case 4:
+			/* 0 and 4th Node */
+			/* nothing */
+		case 5:
+		case 6:
+		case 7:
+			/*already handled below */
+			break;
+		case 1:
+			/* 1st Node */
+			target = &ba_system_ptr->grid[7]
+				[source->coord[Y]]
+				[source->coord[Z]];
+			/* 4->3 of 7th and back */
+			_switch_config(source, target, dim, 4, 3);
+			_switch_config(target, source, dim, 4, 3);
+			break;	
+		case 2:
+			/* 2nd Node */
+			target = &ba_system_ptr->grid[6]
+				[source->coord[Y]]
+				[source->coord[Z]];
+			/* 4->3 of 6th and back */
+			_switch_config(source, target, dim, 4, 3);
+			_switch_config(target, source, dim, 4, 3);
+			break;
+		case 3:
+			/* 3rd Node */
+			target = &ba_system_ptr->grid[5]
+				[source->coord[Y]]
+				[source->coord[Z]];
+			/* 4->3 of 5th and back */
+			_switch_config(source, target, dim, 4, 3);
+			_switch_config(target, source, dim, 4, 3);
+			break;
+		case 8:
+			/* 8th Node */
+			target = &ba_system_ptr->grid[0]
+				[source->coord[Y]]
+				[source->coord[Z]];
+			/* 4->3 of 0th */
+			_switch_config(source, target, dim, 4, 3);	
+			break;
+		default:
+			fatal("got %d for a count on a %d X-dim system",
+			      count, DIM_SIZE[X]);
+			break;
+		}
 	} else if(DIM_SIZE[X] == 13) {
 		int temp_num = 0;
 
@@ -4099,7 +4196,7 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 		case 5:
 			/* get the node count - 1 then subtract it
 			 * from 12 to get the new target and then go
-			 * from 3->4 and back again
+			 * from 4->3 and back again
 			 */
 			temp_num = 12 - (count - 1);
 			if(temp_num < 5) 
@@ -4109,10 +4206,10 @@ static int _set_external_wires(int dim, int count, ba_node_t* source,
 			target = &ba_system_ptr->grid[temp_num]
 				[source->coord[Y]]
 				[source->coord[Z]];
-			/* 3->4 */
-			_switch_config(source, target, dim, 3, 4);
-			/* and back 3->4 */
-			_switch_config(target, source, dim, 3, 4);
+			/* 4->3 */
+			_switch_config(source, target, dim, 4, 3);
+			/* and back 4->3 */
+			_switch_config(target, source, dim, 4, 3);
 			break;
 		case 7:
 			/* 7th Node */
@@ -4167,7 +4264,7 @@ static char *_set_internal_wires(List nodes, int size, int conn_type)
 			 alpha_num[ba_node[count]->coord[X]],
 			 alpha_num[ba_node[count]->coord[Y]],
 			 alpha_num[ba_node[count]->coord[Z]]);
-		debug3("name = %s", temp_name);
+		debug4("name = %s", temp_name);
 		count++;
 		hostlist_push(hostlist, temp_name);
 	}
@@ -4184,7 +4281,7 @@ static char *_set_internal_wires(List nodes, int size, int conn_type)
 			if(ba_node[i]->letter == '.') {
 				ba_node[i]->letter = letters[color_count%62];
 				ba_node[i]->color = colors[color_count%6];
-				debug3("count %d setting letter = %c "
+				debug4("count %d setting letter = %c "
 				       "color = %d",
 				       color_count,
 				       ba_node[i]->letter,
@@ -4281,7 +4378,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 		return 0;
 	}			
 	
-	debug3("Algo(%d) found - %d", algo, found);
+	debug4("Algo(%d) found - %d", algo, found);
 
 	/* Check the 2 ports we can leave though in ports_to_try */
 	for(i=0;i<2;i++) {
@@ -4321,7 +4418,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 			   already been before */
 			itr = list_iterator_create(results);
 			while((next_node = list_next(itr))) {
-				debug3("Algo(%d) looking at %c%c%c and %c%c%c",
+				debug4("Algo(%d) looking at %c%c%c and %c%c%c",
 				       algo,
 				       alpha_num[next_node->coord[X]],
 				       alpha_num[next_node->coord[Y]],
@@ -4338,7 +4435,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 			}
 			list_iterator_destroy(itr);
 			if(not_first && found < DIM_SIZE[X]) {
-				debug2("Algo(%d) already been there before",
+				debug3("Algo(%d) already been there before",
 				       algo);
 				not_first = 0;
 				continue;
@@ -4355,17 +4452,17 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 			next_switch = &next_node->axis_switch[X];
 
  			if((conn_type == SELECT_MESH) && (found == (x_size))) {
-				debug2("Algo(%d) we found the end of the mesh",
+				debug3("Algo(%d) we found the end of the mesh",
 				       algo);
 				return 1;
 			}
-			debug3("Algo(%d) Broke = %d Found = %d x_size = %d",
+			debug4("Algo(%d) Broke = %d Found = %d x_size = %d",
 			       algo, broke, found, x_size);
 
 			if(broke && (found == x_size)) {
 				goto found_path;
 			} else if(found == x_size) {
-				debug2("Algo(%d) finishing the torus!", algo);
+				debug3("Algo(%d) finishing the torus!", algo);
 
 				if(deny_pass && (*deny_pass & PASS_DENY_X)) {
 					info("we don't allow passthroughs 1");
@@ -4383,10 +4480,11 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				else
 					path = list_create(_delete_path_list);
 				
-				_finish_torus(curr_switch, 0, X, 0, start);
+				_finish_torus(results, 
+					      curr_switch, 0, X, 0, start);
 
 				if(best_count < BEST_COUNT_INIT) {
-					debug2("Algo(%d) Found a best path "
+					debug3("Algo(%d) Found a best path "
 					       "with %d steps.",
 					       algo, best_count);
 					_set_best_path();
@@ -4401,7 +4499,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 
 			if (!_node_used(next_node, x_size)) {
 #ifdef HAVE_BG
-				debug2("Algo(%d) found %d looking at %c%c%c "
+				debug3("Algo(%d) found %d looking at %c%c%c "
 				       "%d going to %c%c%c %d",
 				       algo,
 				       found,
@@ -4427,7 +4525,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				list_iterator_destroy(itr);
 				if(!check_node) {
 #ifdef HAVE_BG
-					debug2("Algo(%d) add %c%c%c",
+					debug3("Algo(%d) add %c%c%c",
 					       algo,
 					       alpha_num[next_node->coord[X]],
 					       alpha_num[next_node->coord[Y]],
@@ -4436,7 +4534,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 					list_append(results, next_node);
 				} else {
 #ifdef HAVE_BG
-					debug2("Algo(%d) Hey this is already "
+					debug3("Algo(%d) Hey this is already "
 					       "added %c%c%c",
 					       algo,
 					       alpha_num[node_tar[X]],
@@ -4457,7 +4555,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				} else {
 				found_path:
 #ifdef HAVE_BG
-					debug2("Algo(%d) added node %c%c%c "
+					debug3("Algo(%d) added node %c%c%c "
 					       "%d %d -> %c%c%c %d %d",
 					       algo,
 					       alpha_num[ba_node->coord[X]],
@@ -4498,11 +4596,11 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 	}
 
 	if(algo == BLOCK_ALGO_FIRST) {
-		debug2("Algo(%d) couldn't find path", algo);
+		debug3("Algo(%d) couldn't find path", algo);
 		return 0;
 	} else if(algo == BLOCK_ALGO_SECOND) {
 #ifdef HAVE_BG
-		debug2("Algo(%d) looking for the next free node "
+		debug3("Algo(%d) looking for the next free node "
 		       "starting at %c%c%c",
 		       algo,
 		       alpha_num[ba_node->coord[X]],
@@ -4523,7 +4621,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 		_find_next_free_using_port_2(curr_switch, 0, results, X, 0);
 		
 		if(best_count < BEST_COUNT_INIT) {
-			debug2("Algo(%d) yes found next free %d", algo,
+			debug3("Algo(%d) yes found next free %d", algo,
 			       best_count);
 			node_tar = _set_best_path();
 
@@ -4543,7 +4641,7 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 			next_switch = &next_node->axis_switch[X];
 			
 #ifdef HAVE_BG
-			debug2("Algo(%d) found %d looking at %c%c%c "
+			debug3("Algo(%d) found %d looking at %c%c%c "
 			       "going to %c%c%c %d",
 			       algo, found,
 			       alpha_num[ba_node->coord[X]],
@@ -4564,12 +4662,12 @@ static int _find_x_path(List results, ba_node_t *ba_node,
 				found--;
 				_reset_the_path(curr_switch, 0, 1, X);
 				_remove_node(results, next_node->coord);
-				debug2("Algo(%d) couldn't finish "
+				debug3("Algo(%d) couldn't finish "
 				       "the path off this one", algo);
 			}
 		} 
 		
-		debug2("Algo(%d) couldn't find path", algo);
+		debug3("Algo(%d) couldn't find path", algo);
 		return 0;
 	}
 
@@ -4590,7 +4688,7 @@ static int _remove_node(List results, int *node_tar)
 		if(node_tar[X] == ba_node->coord[X] 
 		   && node_tar[Y] == ba_node->coord[Y] 
 		   && node_tar[Z] == ba_node->coord[Z]) {
-			debug2("removing %c%c%c from list",
+			debug3("removing %c%c%c from list",
 			       alpha_num[node_tar[X]],
 			       alpha_num[node_tar[Y]],
 			       alpha_num[node_tar[Z]]);
@@ -4599,7 +4697,7 @@ static int _remove_node(List results, int *node_tar)
 		}
 #else
 		if(node_tar[X] == ba_node->coord[X]) {
-			debug2("removing %d from list",
+			debug3("removing %d from list",
 			       node_tar[X]);
 			list_remove (itr);
 			break;
@@ -4669,7 +4767,7 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 	   .used) {
 		
 #ifdef HAVE_BG
-		debug2("this one not found %c%c%c",
+		debug3("this one not found %c%c%c",
 		       alpha_num[node_tar[X]],
 		       alpha_num[node_tar[Y]],
 		       alpha_num[node_tar[Z]]);
@@ -4730,6 +4828,7 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 		}
 		list_iterator_destroy(itr);
 			
+		/* check to see if wire 0 is used with this port */
 		if(curr_switch->
 		   ext_wire[port_to_try].node_tar[X]
 		   == curr_switch->ext_wire[0].node_tar[X]  
@@ -4767,7 +4866,7 @@ static int _find_next_free_using_port_2(ba_switch_t *curr_switch,
 					dim, count);
 			while((temp_switch = list_pop(path)) != path_add){
 				xfree(temp_switch);
-				debug3("something here 1");
+				debug4("something here 1");
 			}
 		}
 	}
@@ -4793,7 +4892,8 @@ return_0:
  * to apply this path to the main system (ba_system_ptr)
  */
 
-static int _finish_torus(ba_switch_t *curr_switch, int source_port,
+static int _finish_torus(List results, 
+			 ba_switch_t *curr_switch, int source_port,
 			 int dim, int count, int *start)
 {
 	ba_switch_t *next_switch = NULL;
@@ -4890,6 +4990,8 @@ static int _finish_torus(ba_switch_t *curr_switch, int source_port,
 				}
 			}
 			list_iterator_destroy(itr);
+
+			/* check to see if wire 0 is used with this port */
 			if((curr_switch->
 			    ext_wire[ports_to_try[i]].node_tar[X] ==
 			    curr_switch->ext_wire[0].node_tar[X] &&
@@ -4901,12 +5003,50 @@ static int _finish_torus(ba_switch_t *curr_switch, int source_port,
 			    curr_switch->ext_wire[0].node_tar[Z])) {
 				continue;
 			}
+
+
 			if(!used) {
+				ba_node_t *next_node = NULL;
 				port_tar = curr_switch->
 					ext_wire[ports_to_try[i]].port_tar;
 				node_tar = curr_switch->
 					ext_wire[ports_to_try[i]].node_tar;
-				
+
+				/* Check to see if I am going to a place I have
+				   already been before, because even
+				   though we may be able to do this
+				   electrically this doesn't mean the
+				   under lying infrastructure will
+				   allow it. */
+				itr = list_iterator_create(results);
+				while((next_node = list_next(itr))) {
+					debug4("finishing_torus: "
+					       "looking at %c%c%c and %c%c%c",
+					       alpha_num[next_node->coord[X]],
+					       alpha_num[next_node->coord[Y]],
+					       alpha_num[next_node->coord[Z]],
+					       alpha_num[node_tar[X]],
+					       alpha_num[node_tar[Y]],
+					       alpha_num[node_tar[Z]]);
+					if((node_tar[X] == next_node->coord[X]) 
+					   && (node_tar[Y] 
+					       == next_node->coord[Y])
+					   && (node_tar[Z] 
+					       == next_node->coord[Z])) {
+						break;
+					}				
+				}
+				list_iterator_destroy(itr);
+				if(next_node) {
+					debug3("finishing_torus: "
+					       "Can't finish torus with "
+					       "%c%c%c we already were there.",
+					       alpha_num[next_node->coord[X]],
+					       alpha_num[next_node->coord[Y]],
+					       alpha_num[next_node->coord[Z]]);
+					continue;
+				}
+
 				next_switch = &ba_system_ptr->grid[node_tar[X]]
 #ifdef HAVE_3D
 					[node_tar[Y]]
@@ -4918,12 +5058,12 @@ static int _finish_torus(ba_switch_t *curr_switch, int source_port,
 				count++;
 				path_add->out = ports_to_try[i];
 				list_push(path, path_add);
-				_finish_torus(next_switch, port_tar, 
+				_finish_torus(results, next_switch, port_tar, 
 					      dim, count, start);
 				while((temp_switch = list_pop(path))
 				      != path_add){
 					xfree(temp_switch);
-					debug3("something here 3");
+					debug4("something here 3");
 				}
 			}
 		}
@@ -4955,7 +5095,7 @@ static int *_set_best_path()
 			debug2("got a passthrough in X");
 		}
 #ifdef HAVE_3D
-		debug3("mapping %c%c%c %d->%d",
+		debug4("mapping %c%c%c %d->%d",
 		       alpha_num[path_switch->geometry[X]],
 		       alpha_num[path_switch->geometry[Y]],
 		       alpha_num[path_switch->geometry[Z]],
@@ -5016,189 +5156,3 @@ static void _destroy_geo(void *object)
 	int *geo_ptr = (int *)object;
 	xfree(geo_ptr);
 }
-
-//#define BUILD_EXE
-#ifdef BUILD_EXE
-/** */
-int main(int argc, char** argv)
-{
-	ba_request_t *request = (ba_request_t*) xmalloc(sizeof(ba_request_t)); 
-	log_options_t log_opts = LOG_OPTS_INITIALIZER;
-	int debug_level = 6;
-	node_info_msg_t *new_node_ptr = NULL;
-
-	List results;
-//	List results2;
-//	int i,j;
-	log_opts.stderr_level  = debug_level;
-	log_opts.logfile_level = debug_level;
-	log_opts.syslog_level  = debug_level;
-	
-	log_alter(log_opts, LOG_DAEMON, 
-		  "/dev/null");
-	
-	DIM_SIZE[X]=0;
-	DIM_SIZE[Y]=0;
-	DIM_SIZE[Z]=0;
-	while (slurm_load_node((time_t) NULL, &new_node_ptr, SHOW_ALL)) { 
-		
-		sleep(10);	/* keep trying to reconnect */
-	}
-	
-	ba_init(new_node_ptr);
-	init_wires(NULL);
-						
-	results = list_create(NULL);
-	request->geometry[0] = 1;
-	request->geometry[1] = 1;
-	request->geometry[2] = 1;
-	request->start[0] = 6;
-	request->start[1] = 3;
-	request->start[2] = 2;
-	request->start_req = 1;
-//	request->size = 1;
-	request->rotate = 0;
-	request->elongate = 0;
-	request->conn_type = SELECT_TORUS;
-	new_ba_request(request);
-	print_ba_request(request);
-	if(!allocate_block(request, results)) {
-       		debug("couldn't allocate %c%c%c",
-		       request->geometry[0],
-		       request->geometry[1],
-		       request->geometry[2]);
-	}
-	list_destroy(results);
-
-	results = list_create(NULL);
-	request->geometry[0] = 2;
-	request->geometry[1] = 4;
-	request->geometry[2] = 1;
-	request->start[0] = 3;
-	request->start[1] = 0;
-	request->start[2] = 2;
-	request->start_req = 1;
-//	request->size = 16;
-	request->rotate = 0;
-	request->elongate = 0;
-	request->conn_type = SELECT_TORUS;
-	new_ba_request(request);
-	print_ba_request(request);
-	if(!allocate_block(request, results)) {
-       		debug("couldn't allocate %c%c%c",
-		       alpha_num[request->geometry[0]],
-		       alpha_num[request->geometry[1]],
-		       alpha_num[request->geometry[2]]);
-	}
-	list_destroy(results);
-
-	results = list_create(NULL);
-	request->geometry[0] = 2;
-	request->geometry[1] = 1;
-	request->geometry[2] = 4;
-	request->start[0] = 5;
-	request->start[1] = 2;
-	request->start[2] = 0;
-	request->start_req = 1;
-	request->rotate = 0;
-	request->elongate = 0;
-	request->conn_type = SELECT_TORUS;
-	new_ba_request(request);
-	print_ba_request(request);
-	if(!allocate_block(request, results)) {
-       		debug("couldn't allocate %c%c%c",
-		       alpha_num[request->geometry[0]],
-		       alpha_num[request->geometry[1]],
-		       alpha_num[request->geometry[2]]);
-	}
-	list_destroy(results);
-	
-/* 	results = list_create(NULL); */
-/* 	request->geometry[0] = 4; */
-/* 	request->geometry[1] = 4; */
-/* 	request->geometry[2] = 4; */
-/* 	//request->size = 2; */
-/* 	request->conn_type = SELECT_TORUS; */
-/* 	new_ba_request(request); */
-/* 	print_ba_request(request); */
-/* 	if(!allocate_block(request, results)) { */
-/*        		printf("couldn't allocate %c%c%c\n", */
-/* 		       request->geometry[0], */
-/* 		       request->geometry[1], */
-/* 		       request->geometry[2]); */
-/* 	} */
-
-/* 	results = list_create(NULL); */
-/* 	request->geometry[0] = 1; */
-/* 	request->geometry[1] = 4; */
-/* 	request->geometry[2] = 4; */
-/* 	//request->size = 2; */
-/* 	request->conn_type = SELECT_TORUS; */
-/* 	new_ba_request(request); */
-/* 	print_ba_request(request); */
-/* 	if(!allocate_block(request, results)) { */
-/*        		printf("couldn't allocate %c%c%c\n", */
-/* 		       request->geometry[0], */
-/* 		       request->geometry[1], */
-/* 		       request->geometry[2]); */
-/* 	} */
-	
-	int dim,j;
-	int x,y,z;
-	int startx=0;
-	int starty=0;
-	int startz=0;
-	int endx=DIM_SIZE[X];
-	int endy=1;//DIM_SIZE[Y];
-	int endz=1;//DIM_SIZE[Z];
-
-	for(x=startx;x<endx;x++) {
-		for(y=starty;y<endy;y++) {
-			for(z=startz;z<endz;z++) {
-				ba_node_t *curr_node = 
-					&(ba_system_ptr->grid[x][y][z]);
-				info("Node %c%c%c Used = %d Letter = %c",
-				     alpha_num[x],alpha_num[y],alpha_num[z],
-				     curr_node->used,
-				     curr_node->letter);
-				for(dim=0;dim<1;dim++) {
-					info("Dim %d",dim);
-					ba_switch_t *wire =
-						&curr_node->axis_switch[dim];
-					for(j=0;j<NUM_PORTS_PER_NODE;j++)
-						info("\t%d -> %d -> %c%c%c %d "
-						     "Used = %d",
-						     j, wire->int_wire[j].
-						     port_tar,
-						     alpha_num[wire->ext_wire[
-							     wire->int_wire[j].
-							     port_tar].
-							       node_tar[X]],
-						     alpha_num[wire->ext_wire[
-							     wire->int_wire[j].
-							     port_tar].
-						     node_tar[Y]],
-						     alpha_num[wire->ext_wire[
-							     wire->int_wire[j].
-							     port_tar].
-						     node_tar[Z]],
-						     wire->ext_wire[
-							     wire->int_wire[j].
-							     port_tar].
-						     port_tar,
-						     wire->int_wire[j].used);
-				}
-			}
-		}
-	}
-	/* list_destroy(results); */
-
-/* 	ba_fini(); */
-
-/* 	delete_ba_request(request); */
-	
-	return 0;
-}
-
-
-#endif
diff --git a/src/plugins/select/bluegene/block_allocator/block_allocator.h b/src/plugins/select/bluegene/block_allocator/block_allocator.h
index 45aea50ca0f16e7946f6cabad8da7745395a1b6b..61e62e059fb206825cddd9d2414b6270c10454bb 100644
--- a/src/plugins/select/bluegene/block_allocator/block_allocator.h
+++ b/src/plugins/select/bluegene/block_allocator/block_allocator.h
@@ -7,7 +7,8 @@
  *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -272,6 +273,7 @@ extern int DIM_SIZE[BA_SYSTEM_DIMENSIONS]; /* how many midplanes in
 extern s_p_options_t bg_conf_file_options[]; /* used to parse the
 					      * bluegene.conf file. */
 extern uint16_t ba_deny_pass;
+extern ba_system_t *ba_system_ptr;
 
 /* Translate a state enum to a readable string */
 extern char *bg_block_state_string(rm_partition_state_t state);
@@ -314,13 +316,9 @@ extern void destroy_ba_node(void *ptr);
  * IN/OUT - geometry: requested/returned geometry of block
  * IN - linuximage: LinuxImage for this block if not default
  * IN - mloaderimage: MLoaderImage for this block if not default
- * IN - nodecards: Number of nodecards in each block in request only
- *      used of small block allocations.
  * OUT - passthroughs: if there were passthroughs used in the
  *       generation of the block.
  * IN - procs: Number of real processors requested
- * IN - quarters: Number of midplane quarters in each block in request only
- *      used of small block allocations.
  * IN - RamDiskimage: RamDiskImage for this block if not default
  * IN - rotate: if true, allows rotation of block during fit
  * OUT - save_name: hostlist of midplanes used in block
diff --git a/src/plugins/select/bluegene/block_allocator/bridge_linker.c b/src/plugins/select/bluegene/block_allocator/bridge_linker.c
index c2f90061d2a05f8e38f66f71237b68fd3596311d..43ef5210fdfd9dd1ce8a1b6eb11c7e83c60aafa3 100644
--- a/src/plugins/select/bluegene/block_allocator/bridge_linker.c
+++ b/src/plugins/select/bluegene/block_allocator/bridge_linker.c
@@ -7,7 +7,8 @@
  *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/block_allocator/bridge_linker.h b/src/plugins/select/bluegene/block_allocator/bridge_linker.h
index 8c4a00cd8423fec0c4d651ad8a828df2a71817b0..6ec9bf8bc5da1c8dfe14c070647311406b80ddbf 100644
--- a/src/plugins/select/bluegene/block_allocator/bridge_linker.h
+++ b/src/plugins/select/bluegene/block_allocator/bridge_linker.h
@@ -7,7 +7,8 @@
  *  Written by Dan Phung <phung4@llnl.gov>, Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/block_allocator/wire_test.c b/src/plugins/select/bluegene/block_allocator/wire_test.c
new file mode 100644
index 0000000000000000000000000000000000000000..29a720d7c19f0af9bc47e359c6c67775de33ec5d
--- /dev/null
+++ b/src/plugins/select/bluegene/block_allocator/wire_test.c
@@ -0,0 +1,201 @@
+/*****************************************************************************\
+ *  wire_test.c - used to debug and test wires on any given system.  
+ *
+ *  $Id: block_allocator.c 17495 2009-05-14 16:49:52Z da $
+ *****************************************************************************
+ *  Copyright (C) 2004 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include "block_allocator.h"
+#include "src/common/uid.h"
+#include "src/common/timers.h"
+
+/** */
+int main(int argc, char** argv)
+{
+	ba_request_t *request = (ba_request_t*) xmalloc(sizeof(ba_request_t)); 
+	log_options_t log_opts = LOG_OPTS_INITIALIZER;
+	int debug_level = 5;
+
+	List results;
+//	List results2;
+//	int i,j;
+	log_opts.stderr_level  = debug_level;
+	log_opts.logfile_level = debug_level;
+	log_opts.syslog_level  = debug_level;
+	
+	log_alter(log_opts, LOG_DAEMON, 
+		  "/dev/null");
+	
+	DIM_SIZE[X]=0;
+	DIM_SIZE[Y]=0;
+	DIM_SIZE[Z]=0;
+
+	slurm_conf_reinit(NULL);
+	ba_init(NULL);
+	init_wires(NULL);
+		
+	/* [010x831] */
+/* 	results = list_create(NULL); */
+/* 	request->geometry[0] = 9; */
+/* 	request->geometry[1] = 3; */
+/* 	request->geometry[2] = 2; */
+/* 	request->start[0] = 0; */
+/* 	request->start[1] = 1; */
+/* 	request->start[2] = 0; */
+/* 	request->start_req = 1; */
+/* //	request->size = 16; */
+/* 	request->rotate = 0; */
+/* 	request->elongate = 0; */
+/* 	request->conn_type = SELECT_TORUS; */
+/* 	new_ba_request(request); */
+/* 	print_ba_request(request); */
+/* 	if(!allocate_block(request, results)) { */
+/*        		debug("couldn't allocate %c%c%c", */
+/* 		       alpha_num[request->geometry[0]], */
+/* 		       alpha_num[request->geometry[1]], */
+/* 		       alpha_num[request->geometry[2]]); */
+/* 	} */
+/* 	list_destroy(results); */
+
+/* 	/\* [001x801] *\/ */
+/* 	results = list_create(NULL); */
+/* 	request->geometry[0] = 9; */
+/* 	request->geometry[1] = 1; */
+/* 	request->geometry[2] = 1; */
+/* 	request->start[0] = 0; */
+/* 	request->start[1] = 0; */
+/* 	request->start[2] = 1; */
+/* 	request->start_req = 1; */
+/* //	request->size = 1; */
+/* 	request->rotate = 0; */
+/* 	request->elongate = 0; */
+/* 	request->conn_type = SELECT_TORUS; */
+/* 	new_ba_request(request); */
+/* 	print_ba_request(request); */
+/* 	if(!allocate_block(request, results)) { */
+/*        		debug("couldn't allocate %c%c%c", */
+/* 		       request->geometry[0], */
+/* 		       request->geometry[1], */
+/* 		       request->geometry[2]); */
+/* 	} */
+/* 	list_destroy(results); */
+
+	/* [001x801] */
+	results = list_create(NULL);
+	request->geometry[0] = 7;
+	request->geometry[1] = 4;
+	request->geometry[2] = 2;
+	request->start[0] = 0;
+	request->start[1] = 0;
+	request->start[2] = 0;
+	request->start_req = 0;
+//	request->size = 1;
+	request->rotate = 1;
+	request->elongate = 1;
+	request->conn_type = SELECT_TORUS;
+	new_ba_request(request);
+	print_ba_request(request);
+	if(!allocate_block(request, results)) {
+       		debug("couldn't allocate %c%c%c",
+		       request->geometry[0],
+		       request->geometry[1],
+		       request->geometry[2]);
+	}
+	list_destroy(results);
+
+	
+	int dim,j;
+	int x,y,z;
+	int startx=0;
+	int starty=0;
+	int startz=0;
+	int endx=DIM_SIZE[X];
+	int endy=1;//DIM_SIZE[Y];
+	int endz=1;//DIM_SIZE[Z];
+
+	for(x=startx;x<endx;x++) {
+		for(y=starty;y<endy;y++) {
+			for(z=startz;z<endz;z++) {
+				ba_node_t *curr_node = 
+					&(ba_system_ptr->grid[x][y][z]);
+				info("Node %c%c%c Used = %d Letter = %c",
+				     alpha_num[x],alpha_num[y],alpha_num[z],
+				     curr_node->used,
+				     curr_node->letter);
+				for(dim=0;dim<1;dim++) {
+					info("Dim %d",dim);
+					ba_switch_t *wire =
+						&curr_node->axis_switch[dim];
+					for(j=0;j<NUM_PORTS_PER_NODE;j++)
+						info("\t%d -> %d -> %c%c%c %d "
+						     "Used = %d",
+						     j, wire->int_wire[j].
+						     port_tar,
+						     alpha_num[wire->ext_wire[
+							     wire->int_wire[j].
+							     port_tar].
+							       node_tar[X]],
+						     alpha_num[wire->ext_wire[
+							     wire->int_wire[j].
+							     port_tar].
+						     node_tar[Y]],
+						     alpha_num[wire->ext_wire[
+							     wire->int_wire[j].
+							     port_tar].
+						     node_tar[Z]],
+						     wire->ext_wire[
+							     wire->int_wire[j].
+							     port_tar].
+						     port_tar,
+						     wire->int_wire[j].used);
+				}
+			}
+		}
+	}
+	/* list_destroy(results); */
+
+/* 	ba_fini(); */
+
+/* 	delete_ba_request(request); */
+	
+	return 0;
+}
diff --git a/src/plugins/select/bluegene/plugin/Makefile.in b/src/plugins/select/bluegene/plugin/Makefile.in
index a862654eebba3f305d436b2d595f26bb20d10785..aca3aceb39e658e30bc40261b0220521cdf229ce 100644
--- a/src/plugins/select/bluegene/plugin/Makefile.in
+++ b/src/plugins/select/bluegene/plugin/Makefile.in
@@ -46,14 +46,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -176,6 +180,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/select/bluegene/plugin/bg_block_info.c b/src/plugins/select/bluegene/plugin/bg_block_info.c
index 5bc20ac1551d7fa567d3c0dc8da2c93d9bf6d0de..114334712df40f567969a9b84a01b90be6424350 100644
--- a/src/plugins/select/bluegene/plugin/bg_block_info.c
+++ b/src/plugins/select/bluegene/plugin/bg_block_info.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  bg_block_info.c - bluegene block information from the db2 database.
  *
- *  $Id: bg_block_info.c 17202 2009-04-09 16:56:23Z da $
+ *  $Id: bg_block_info.c 17534 2009-05-19 00:58:46Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -90,7 +91,7 @@ static int _block_is_deallocating(bg_record_t *bg_record)
 	if(bg_record->modifying)
 		return SLURM_SUCCESS;
 	
-	user_name = xstrdup(bg_slurm_user_name);
+	user_name = xstrdup(bg_conf->slurm_user_name);
 	if(remove_all_users(bg_record->bg_block_id, NULL) 
 	   == REMOVE_USER_ERR) {
 		error("Something happened removing "
@@ -136,9 +137,10 @@ static int _block_is_deallocating(bg_record_t *bg_record)
 		bg_record->target_name = xstrdup(bg_record->user_name);
 	}
 
-	if(remove_from_bg_list(bg_job_block_list, bg_record) == SLURM_SUCCESS) 
+	if(remove_from_bg_list(bg_lists->job_running, bg_record)
+	   == SLURM_SUCCESS) 
 		num_unused_cpus += bg_record->cpu_cnt;			       
-	remove_from_bg_list(bg_booted_block_list, bg_record);
+	remove_from_bg_list(bg_lists->booted, bg_record);
 
 	xfree(user_name);
 			
@@ -174,7 +176,7 @@ extern int block_ready(struct job_record *job_ptr)
 	rc = select_g_get_jobinfo(job_ptr->select_jobinfo,
 				  SELECT_DATA_BLOCK_ID, &block_id);
 	if (rc == SLURM_SUCCESS) {
-		bg_record = find_bg_record_in_list(bg_list, block_id);
+		bg_record = find_bg_record_in_list(bg_lists->main, block_id);
 		slurm_mutex_lock(&block_state_mutex);
 		
 		if(bg_record) {
@@ -189,7 +191,7 @@ extern int block_ready(struct job_record *job_ptr)
 			else
 				rc = READY_JOB_ERROR;	/* try again */
 		} else {
-			error("block_ready: block %s not in bg_list.",
+			error("block_ready: block %s not in bg_lists->main.",
 			      block_id);
 			rc = READY_JOB_FATAL;	/* fatal error */
 		}
@@ -213,10 +215,9 @@ extern void pack_block(bg_record_t *bg_record, Buf buffer)
 	pack16((uint16_t)bg_record->conn_type, buffer);
 #ifdef HAVE_BGL
 	pack16((uint16_t)bg_record->node_use, buffer);	
-	pack16((uint16_t)0, buffer);	
-	pack16((uint16_t)0, buffer);	
 #endif
 	pack32((uint32_t)bg_record->node_cnt, buffer);
+	pack32((uint32_t)bg_record->job_running, buffer);
 	pack_bit_fmt(bg_record->bitmap, buffer);
 	pack_bit_fmt(bg_record->ionode_bitmap, buffer);
 #ifdef HAVE_BGL
@@ -248,18 +249,18 @@ extern int update_block_list()
 	if(!kill_job_list)
 		kill_job_list = list_create(_destroy_kill_struct);
 
-	if(!bg_list) 
+	if(!bg_lists->main) 
 		return updated;
 	
 	slurm_mutex_lock(&block_state_mutex);
-	itr = list_iterator_create(bg_list);
+	itr = list_iterator_create(bg_lists->main);
 	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
 		if(!bg_record->bg_block_id)
 			continue;
 		name = bg_record->bg_block_id;
 		if ((rc = bridge_get_block_info(name, &block_ptr)) 
 		    != STATUS_OK) {
-			if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+			if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {
 				switch(rc) {
 				case INCONSISTENT_DATA:
 					debug2("got inconsistent data when "
@@ -301,8 +302,8 @@ extern int update_block_list()
 			updated = 1;
 		}
 #else
-		if((bg_record->node_cnt < bluegene_bp_node_cnt) 
-		   || (bluegene_bp_node_cnt == bluegene_nodecard_node_cnt)) {
+		if((bg_record->node_cnt < bg_conf->bp_node_cnt) 
+		   || (bg_conf->bp_node_cnt == bg_conf->nodecard_node_cnt)) {
 			char *mode = NULL;
 			uint16_t conn_type = SELECT_SMALL;
 			if ((rc = bridge_get_data(block_ptr,
@@ -404,12 +405,12 @@ extern int update_block_list()
 			else if(bg_record->state == RM_PARTITION_CONFIGURING) 
 				bg_record->boot_state = 1;
 			else if(bg_record->state == RM_PARTITION_FREE) {
-				if(remove_from_bg_list(bg_job_block_list, 
+				if(remove_from_bg_list(bg_lists->job_running, 
 						       bg_record) 
 				   == SLURM_SUCCESS) {
 					num_unused_cpus += bg_record->cpu_cnt;
 				}
-				remove_from_bg_list(bg_booted_block_list,
+				remove_from_bg_list(bg_lists->booted,
 						    bg_record);
 			} 
 			updated = 1;
@@ -445,7 +446,7 @@ extern int update_block_list()
 					freeit->jobid = bg_record->job_running;
 					list_push(kill_job_list, freeit);
 					if(remove_from_bg_list(
-						   bg_job_block_list, 
+						   bg_lists->job_running, 
 						   bg_record) 
 					   == SLURM_SUCCESS) {
 						num_unused_cpus += 
@@ -455,7 +456,7 @@ extern int update_block_list()
 					error("block %s in an error "
 					      "state while booting.",
 					      bg_record->bg_block_id);
-				remove_from_bg_list(bg_booted_block_list,
+				remove_from_bg_list(bg_lists->booted,
 						    bg_record);
 				trigger_block_error();
 				break;
@@ -495,14 +496,14 @@ extern int update_block_list()
 					bg_record->boot_state = 0;
 					bg_record->boot_count = 0;
 					if(remove_from_bg_list(
-						   bg_job_block_list, 
+						   bg_lists->job_running, 
 						   bg_record) 
 					   == SLURM_SUCCESS) {
 						num_unused_cpus += 
 							bg_record->cpu_cnt;
 					} 
 					remove_from_bg_list(
-						bg_booted_block_list,
+						bg_lists->booted,
 						bg_record);
 				}
 				break;
@@ -576,11 +577,11 @@ extern int update_freeing_block_list()
 	bg_record_t *bg_record = NULL;
 	ListIterator itr = NULL;
 	
-	if(!bg_freeing_list) 
+	if(!bg_lists->freeing) 
 		return updated;
 	
 	slurm_mutex_lock(&block_state_mutex);
-	itr = list_iterator_create(bg_freeing_list);
+	itr = list_iterator_create(bg_lists->freeing);
 	while ((bg_record = (bg_record_t *) list_next(itr)) != NULL) {
 		if(!bg_record->bg_block_id)
 			continue;
@@ -588,7 +589,7 @@ extern int update_freeing_block_list()
 		name = bg_record->bg_block_id;
 		if ((rc = bridge_get_block_info(name, &block_ptr)) 
 		    != STATUS_OK) {
-			if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+			if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {
 				switch(rc) {
 				case INCONSISTENT_DATA:
 					debug2("got inconsistent data when "
diff --git a/src/plugins/select/bluegene/plugin/bg_block_info.h b/src/plugins/select/bluegene/plugin/bg_block_info.h
index d50db0d0953bb0715af2d15861082bcc564d1d1e..9d20a11b64973519eb080b55bf13091db32bbca5 100644
--- a/src/plugins/select/bluegene/plugin/bg_block_info.h
+++ b/src/plugins/select/bluegene/plugin/bg_block_info.h
@@ -6,7 +6,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/plugin/bg_boot_time.h b/src/plugins/select/bluegene/plugin/bg_boot_time.h
index dd53c74c36c8502ff1c6ee5a66fcd631691c4fe4..d1cd399b7dd50a65389374f58ac6740e7146cfb9 100644
--- a/src/plugins/select/bluegene/plugin/bg_boot_time.h
+++ b/src/plugins/select/bluegene/plugin/bg_boot_time.h
@@ -7,7 +7,8 @@
  *  Written by Morris Jette <jette1@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.c b/src/plugins/select/bluegene/plugin/bg_job_place.c
index abd4c48af6a2d02e8c278ad5e93dcf20f13616cc..0174a675a08943b69f916f63528588c4370b4d87 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.c
@@ -1,15 +1,15 @@
 /*****************************************************************************\
  *  bg_job_place.c - blue gene job placement (e.g. base block selection)
  *  functions.
- *
- *  $Id: bg_job_place.c 17205 2009-04-09 17:24:11Z da $ 
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dan Phung <phung4@llnl.gov> and Morris Jette <jette1@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -46,8 +46,6 @@
 #include "bluegene.h"
 #include "dynamic_block.h"
 
-#ifdef HAVE_BG 
-
 #define _DEBUG 0
 #define MAX_GROUPS 128
 
@@ -63,8 +61,8 @@ pthread_mutex_t create_dynamic_mutex = PTHREAD_MUTEX_INITIALIZER;
 pthread_mutex_t job_list_test_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 /* This list is for the test_job_list function because we will be
- * adding and removing blocks off the bg_job_block_list and don't want
- * to ruin that list in submit_job it should = bg_job_block_list
+ * adding and removing blocks off the bg_lists->job_running and don't want
+ * to ruin that list in submit_job it should = bg_lists->job_running
  * otherwise it should be a copy of that list.
  */
 List job_block_test_list = NULL;
@@ -99,7 +97,6 @@ static int _check_for_booted_overlapping_blocks(
 	bool test_only);
 static int _dynamically_request(List block_list, int *blocks_added,
 				ba_request_t *request,
-				bitstr_t* slurm_block_bitmap,
 				char *user_req_nodes);
 static int _find_best_block_match(List block_list, int *blocks_added,
 				  struct job_record* job_ptr,
@@ -284,7 +281,7 @@ static int _check_images(struct job_record* job_ptr,
 			     SELECT_DATA_BLRTS_IMAGE, blrtsimage);
 	
 	if (*blrtsimage) {
-		allow = _test_image_perms(*blrtsimage, bg_blrtsimage_list, 
+		allow = _test_image_perms(*blrtsimage, bg_conf->blrts_list, 
 					  job_ptr);
 		if (!allow) {
 			error("User %u:%u is not allowed to use BlrtsImage %s",
@@ -298,7 +295,7 @@ static int _check_images(struct job_record* job_ptr,
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
 			     SELECT_DATA_LINUX_IMAGE, linuximage);
 	if (*linuximage) {
-		allow = _test_image_perms(*linuximage, bg_linuximage_list, 
+		allow = _test_image_perms(*linuximage, bg_conf->linux_list, 
 					  job_ptr);
 		if (!allow) {
 			error("User %u:%u is not allowed to use LinuxImage %s",
@@ -310,7 +307,8 @@ static int _check_images(struct job_record* job_ptr,
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
 			     SELECT_DATA_MLOADER_IMAGE, mloaderimage);
 	if (*mloaderimage) {
-		allow = _test_image_perms(*mloaderimage, bg_mloaderimage_list, 
+		allow = _test_image_perms(*mloaderimage,
+					  bg_conf->mloader_list, 
 					  job_ptr);
 		if(!allow) {
 			error("User %u:%u is not allowed "
@@ -324,7 +322,8 @@ static int _check_images(struct job_record* job_ptr,
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
 			     SELECT_DATA_RAMDISK_IMAGE, ramdiskimage);
 	if (*ramdiskimage) {
-		allow = _test_image_perms(*ramdiskimage, bg_ramdiskimage_list, 
+		allow = _test_image_perms(*ramdiskimage,
+					  bg_conf->ramdisk_list, 
 					  job_ptr);
 		if(!allow) {
 			error("User %u:%u is not allowed "
@@ -372,9 +371,9 @@ static bg_record_t *_find_matching_block(List block_list,
 			continue;
 		} else if((bg_record->job_running != NO_JOB_RUNNING) 
 			  && (bg_record->job_running != job_ptr->job_id)
-			  && (bluegene_layout_mode == LAYOUT_DYNAMIC 
+			  && (bg_conf->layout_mode == LAYOUT_DYNAMIC 
 			      || (!test_only 
-				  && bluegene_layout_mode != LAYOUT_DYNAMIC))) {
+				  && bg_conf->layout_mode != LAYOUT_DYNAMIC))) {
 			debug("block %s in use by %s job %d", 
 			      bg_record->bg_block_id,
 			      bg_record->user_name,
@@ -478,7 +477,16 @@ static bg_record_t *_find_matching_block(List block_list,
 					continue;			
 				} 
 				goto good_conn_type;
-			} 
+			} else if(bg_record->conn_type >= SELECT_SMALL) {
+				/* since we already checked to see if
+				   the cpus were good this means we are
+				   looking for a block in a range that
+				   includes small and regular blocks.
+				   So we can just continue on.
+				*/
+				goto good_conn_type;				
+			}
+			
 #endif
 			debug("bg block %s conn-type not usable asking for %s "
 			      "bg_record is %s", 
@@ -537,7 +545,7 @@ static int _check_for_booted_overlapping_blocks(
 	int overlap = 0;
 
 	 /* this test only is for actually picking a block not testing */
-	if(test_only && bluegene_layout_mode == LAYOUT_DYNAMIC)
+	if(test_only && bg_conf->layout_mode == LAYOUT_DYNAMIC)
 		return rc;
 
 	/* Make sure no other blocks are under this block 
@@ -604,7 +612,7 @@ static int _check_for_booted_overlapping_blocks(
 			 * overlapping that we could avoid freeing if
 			 * we choose something else
 			 */
-			if(bluegene_layout_mode == LAYOUT_OVERLAP
+			if(bg_conf->layout_mode == LAYOUT_OVERLAP
 			   && ((overlap_check == 0 && bg_record->state 
 				!= RM_PARTITION_READY)
 			       || (overlap_check == 1 && found_record->state 
@@ -636,7 +644,8 @@ static int _check_for_booted_overlapping_blocks(
 					      found_record->job_running,
 					      found_record->bg_block_id);
 				
-				if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+				if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {
+					List temp_list = list_create(NULL);
 					/* this will remove and
 					 * destroy the memory for
 					 * bg_record
@@ -649,44 +658,35 @@ static int _check_for_booted_overlapping_blocks(
 						found_record =
 							bg_record->original;
 						remove_from_bg_list(
-							bg_list, found_record);
+							bg_lists->main,
+							found_record);
 					} else {
 						debug("looking for original");
 						found_record =
 							find_and_remove_org_from_bg_list(
-								bg_list,
+								bg_lists->main,
 								bg_record);
 					}
-					destroy_bg_record(bg_record);
+
 					if(!found_record) {
-						/* There may be a bug
-						   here where on a real
-						   system we don't go
-						   destroy this block
-						   in the real system.
-						   If that is the case we
-						   need to add the
-						   bg_record to the
-						   free_block_list
-						   instead of destroying
-						   it like above.
-						*/ 
-						debug("This record wasn't "
-						      "found in the bg_list, "
+						debug("This record %s wasn't "
+						      "found in the "
+						      "bg_lists->main, "
 						      "no big deal, it "
-						      "probably wasn't added");
-						//rc = SLURM_ERROR;
-					} else {
-						debug("removing the block "
-						      "from the system");
-						List temp_list =
-							list_create(NULL);
-						list_push(temp_list, 
-							  found_record);
-						num_block_to_free++;
-						free_block_list(temp_list);
-						list_destroy(temp_list);
-					}
+						      "probably wasn't added",
+						      bg_record->bg_block_id);
+						found_record = bg_record;
+					} else
+						destroy_bg_record(bg_record);
+					
+					debug("removing the block %s"
+					      "from the system",
+					      bg_record->bg_block_id);
+					
+					list_push(temp_list, found_record);
+					free_block_list(temp_list);
+					list_destroy(temp_list);
+					
 					slurm_mutex_unlock(&block_state_mutex);
 				} 
 				rc = 1;
@@ -708,7 +708,6 @@ static int _check_for_booted_overlapping_blocks(
 
 static int _dynamically_request(List block_list, int *blocks_added,
 				ba_request_t *request,
-				bitstr_t* slurm_block_bitmap,
 				char *user_req_nodes)
 {
 	List list_of_lists = NULL;
@@ -727,10 +726,10 @@ static int _dynamically_request(List block_list, int *blocks_added,
 		list_append(list_of_lists, job_block_test_list);
 	else {
 		list_append(list_of_lists, block_list);
-		if(job_block_test_list == bg_job_block_list &&
-		   list_count(block_list) != list_count(bg_booted_block_list)) {
-			list_append(list_of_lists, bg_booted_block_list);
-			if(list_count(bg_booted_block_list) 
+		if(job_block_test_list == bg_lists->job_running &&
+		   list_count(block_list) != list_count(bg_lists->booted)) {
+			list_append(list_of_lists, bg_lists->booted);
+			if(list_count(bg_lists->booted) 
 			   != list_count(job_block_test_list)) 
 				list_append(list_of_lists, job_block_test_list);
 		} else if(list_count(block_list) 
@@ -758,7 +757,7 @@ static int _dynamically_request(List block_list, int *blocks_added,
 					destroy_bg_record(bg_record);
 				else {
 					if(job_block_test_list 
-					   == bg_job_block_list) {
+					   == bg_lists->job_running) {
 						if(configure_block(bg_record)
 						   == SLURM_ERROR) {
 							destroy_bg_record(
@@ -848,7 +847,7 @@ static int _find_best_block_match(List block_list,
 
 	if(!total_cpus)
 		total_cpus = DIM_SIZE[X] * DIM_SIZE[Y] * DIM_SIZE[Z] 
-			* procs_per_node;
+			* bg_conf->procs_per_bp;
 
 	if(req_nodes > max_nodes) {
 		error("can't run this job max bps is %u asking for %u",
@@ -930,13 +929,13 @@ static int _find_best_block_match(List block_list,
 				tmp_record->bg_block_list =
 					list_create(destroy_ba_node);
 				
-				len += strlen(bg_slurm_node_prefix)+1;
+				len += strlen(bg_conf->slurm_node_prefix)+1;
 				tmp_record->nodes = xmalloc(len);
 				
 				snprintf(tmp_record->nodes,
 					 len,
 					 "%s%s", 
-					 bg_slurm_node_prefix, 
+					 bg_conf->slurm_node_prefix, 
 					 tmp_nodes+i);
 				
 			
@@ -1000,7 +999,7 @@ static int _find_best_block_match(List block_list,
 	 *  need to set a max_procs if given
 	 */
 	if(max_procs == (uint32_t)NO_VAL) 
-		max_procs = max_nodes * procs_per_node;
+		max_procs = max_nodes * bg_conf->procs_per_bp;
 	
 	while(1) {
 		/* Here we are creating a list of all the blocks that
@@ -1008,7 +1007,7 @@ static int _find_best_block_match(List block_list,
 		 * works we will have can look and see the earliest
 		 * the job can start.  This doesn't apply to Dynamic mode.
 		 */ 
-		if(test_only && bluegene_layout_mode != LAYOUT_DYNAMIC) 
+		if(test_only && bg_conf->layout_mode != LAYOUT_DYNAMIC) 
 			overlapped_list = list_create(NULL);
 		
 		bg_record = _find_matching_block(block_list, 
@@ -1021,7 +1020,7 @@ static int _find_best_block_match(List block_list,
 						 overlapped_list,
 						 test_only);
 		if(!bg_record && test_only
-		   && bluegene_layout_mode != LAYOUT_DYNAMIC
+		   && bg_conf->layout_mode != LAYOUT_DYNAMIC
 		   && list_count(overlapped_list)) {
 			ListIterator itr =
 				list_iterator_create(overlapped_list);
@@ -1035,7 +1034,7 @@ static int _find_best_block_match(List block_list,
 			list_iterator_destroy(itr);
 		}
 		
-		if(test_only && bluegene_layout_mode != LAYOUT_DYNAMIC)
+		if(test_only && bg_conf->layout_mode != LAYOUT_DYNAMIC)
 			list_destroy(overlapped_list);
 
 		/* set the bitmap and do other allocation activities */
@@ -1080,18 +1079,17 @@ static int _find_best_block_match(List block_list,
 
 		/* all these assume that the *bg_record is NULL */
 
-		if(bluegene_layout_mode == LAYOUT_OVERLAP
+		if(bg_conf->layout_mode == LAYOUT_OVERLAP
 		   && !test_only && overlap_check < 2) {
 			overlap_check++;
 			continue;
 		}
 		
-		if(create_try || bluegene_layout_mode != LAYOUT_DYNAMIC)
+		if(create_try || bg_conf->layout_mode != LAYOUT_DYNAMIC)
 			goto no_match;
 		
 		if((rc = _dynamically_request(block_list, blocks_added,
 					      &request, 
-					      slurm_block_bitmap, 
 					      job_ptr->details->req_nodes))
 		   == SLURM_SUCCESS) {
 			create_try = 1;
@@ -1104,7 +1102,7 @@ static int _find_best_block_match(List block_list,
 			List job_list = NULL;
 			debug("trying with empty machine");
 			slurm_mutex_lock(&block_state_mutex);
-			if(job_block_test_list == bg_job_block_list) 
+			if(job_block_test_list == bg_lists->job_running) 
 				job_list = copy_bg_list(job_block_test_list);
 			else
 				job_list = job_block_test_list;
@@ -1134,12 +1132,7 @@ static int _find_best_block_match(List block_list,
 						debug2("taking off (%s) "
 						       "which is in an error "
 						       "state",
-						       bg_record->job_running,
-						       bg_record->bg_block_id,
-						       bg_record->job_ptr->
-						       start_time,
-						       bg_record->job_ptr->
-						       end_time);
+						       bg_record->bg_block_id);
 				} else 
 					/* This means we didn't have
 					   any jobs to take off
@@ -1186,7 +1179,8 @@ static int _find_best_block_match(List block_list,
 					destroy_bg_record(bg_record);
 				}
 					
-				if(job_block_test_list != bg_job_block_list) {
+				if(job_block_test_list 
+				   != bg_lists->job_running) {
 					list_append(block_list,
 						    (*found_bg_record));
 					while((bg_record = 
@@ -1208,7 +1202,7 @@ static int _find_best_block_match(List block_list,
 				break;
 			}
 
-			if(job_block_test_list == bg_job_block_list) 
+			if(job_block_test_list == bg_lists->job_running) 
 				list_destroy(job_list);
 
 			goto end_it;
@@ -1267,7 +1261,63 @@ static int _sync_block_lists(List full_list, List incomp_list)
 	return count;
 }
 
-#endif // HAVE_BG
+static void _build_select_struct(struct job_record *job_ptr, bitstr_t *bitmap)
+{
+	int i, j, k;
+	int first_bit, last_bit;
+	uint32_t node_cpus, total_cpus = 0, node_cnt;
+	select_job_res_t select_ptr;
+
+	if (job_ptr->select_job) {
+		error("select_p_job_test: already have select_job");
+		free_select_job_res(&job_ptr->select_job);
+	}
+
+
+	node_cnt = bit_set_count(bitmap);
+	job_ptr->select_job = select_ptr = create_select_job_res();
+	select_ptr->cpu_array_reps = xmalloc(sizeof(uint32_t) * node_cnt);
+	select_ptr->cpu_array_value = xmalloc(sizeof(uint16_t) * node_cnt);
+	select_ptr->cpus = xmalloc(sizeof(uint16_t) * node_cnt);
+	select_ptr->cpus_used = xmalloc(sizeof(uint16_t) * node_cnt);
+	select_ptr->nhosts = node_cnt;
+	select_ptr->node_bitmap = bit_copy(bitmap);
+	if (select_ptr->node_bitmap == NULL)
+		fatal("bit_copy malloc failure");
+	select_ptr->nprocs = job_ptr->num_procs;
+	if (build_select_job_res(select_ptr, (void *)node_record_table_ptr, 1))
+		error("select_p_job_test: build_select_job_res: %m");
+
+	if (job_ptr->num_procs <= bg_conf->procs_per_bp)
+		node_cpus = job_ptr->num_procs;
+	else
+		node_cpus = bg_conf->procs_per_bp;
+
+	first_bit = bit_ffs(bitmap);
+	last_bit  = bit_fls(bitmap);
+	for (i=first_bit, j=0, k=-1; i<=last_bit; i++) {
+		if (!bit_test(bitmap, i))
+			continue;
+
+		select_ptr->cpus[j] = node_cpus;
+		if ((k == -1) || 
+		    (select_ptr->cpu_array_value[k] != node_cpus)) {
+			select_ptr->cpu_array_cnt++;
+			select_ptr->cpu_array_reps[++k] = 1;
+			select_ptr->cpu_array_value[k] = node_cpus;
+		} else
+			select_ptr->cpu_array_reps[k]++;
+		total_cpus += node_cpus;
+
+		if (set_select_job_res_node(select_ptr, j))
+			error("select_p_job_test: set_select_job_res_node: %m");
+		j++;
+	}
+	if (select_ptr->nprocs != total_cpus) {
+		error("select_p_job_test: nprocs mismatch %u != %u",
+		      select_ptr->nprocs, total_cpus);
+	}
+}
 
 /*
  * Try to find resources for a given job request
@@ -1286,7 +1336,6 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 		      uint32_t req_nodes, int mode)
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_BG
 	bg_record_t* bg_record = NULL;
 	char buf[100];
 	uint16_t conn_type = (uint16_t)NO_VAL;
@@ -1302,16 +1351,16 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 	else	
 		return EINVAL;	/* something not yet supported */
 
-	if(bluegene_layout_mode == LAYOUT_DYNAMIC)
+	if(bg_conf->layout_mode == LAYOUT_DYNAMIC)
 		slurm_mutex_lock(&create_dynamic_mutex);
 
-	job_block_test_list = bg_job_block_list;
+	job_block_test_list = bg_lists->job_running;
 	
 	select_g_get_jobinfo(job_ptr->select_jobinfo,
 			     SELECT_DATA_CONN_TYPE, &conn_type);
 	if(conn_type == SELECT_NAV) {
 		uint32_t max_procs = (uint32_t)NO_VAL;
-		if(bluegene_bp_node_cnt == bluegene_nodecard_node_cnt)
+		if(bg_conf->bp_node_cnt == bg_conf->nodecard_node_cnt)
 			conn_type = SELECT_SMALL;
 		else if(min_nodes > 1) {
 			conn_type = SELECT_TORUS;
@@ -1324,7 +1373,7 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 			select_g_get_jobinfo(job_ptr->select_jobinfo,
 					     SELECT_DATA_MAX_PROCS,
 					     &max_procs);
-			if((max_procs > procs_per_node)
+			if((max_procs > bg_conf->procs_per_bp)
 			   || (max_procs == NO_VAL))
 				conn_type = SELECT_TORUS;
 			else
@@ -1362,7 +1411,7 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 	debug2("RamDiskIoLoadImage=%s", buf);
 #endif	
 	slurm_mutex_lock(&block_state_mutex);
-	block_list = copy_bg_list(bg_list);
+	block_list = copy_bg_list(bg_lists->main);
 	slurm_mutex_unlock(&block_state_mutex);
 	
 	list_sort(block_list, (ListCmpF)_bg_record_sort_aval_dec);
@@ -1450,28 +1499,30 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 /* 						     SELECT_DATA_CONN_TYPE,  */
 /* 						     &tmp16); */
 			}
+			if (mode == SELECT_MODE_RUN_NOW) {
+				_build_select_struct(job_ptr, 
+						     slurm_block_bitmap);
+			}
 		} else {
 			error("we got a success, but no block back");
 		}
 	}
 
-	if(bluegene_layout_mode == LAYOUT_DYNAMIC) {		
+	if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {		
 		slurm_mutex_lock(&block_state_mutex);
 		if(blocks_added) 
-			_sync_block_lists(block_list, bg_list);		
+			_sync_block_lists(block_list, bg_lists->main);		
 		slurm_mutex_unlock(&block_state_mutex);
 		slurm_mutex_unlock(&create_dynamic_mutex);
 	}
 
 	list_destroy(block_list);
-#endif
 	return rc;
 }
 
 extern int test_job_list(List req_list)
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_BG
 	bg_record_t* bg_record = NULL;
 	bg_record_t* new_record = NULL;
 	char buf[100];
@@ -1484,13 +1535,13 @@ extern int test_job_list(List req_list)
 
 	slurm_mutex_lock(&job_list_test_mutex);
 	
-	if(bluegene_layout_mode == LAYOUT_DYNAMIC)
+	if(bg_conf->layout_mode == LAYOUT_DYNAMIC)
 		slurm_mutex_lock(&create_dynamic_mutex);
 
-	job_block_test_list = copy_bg_list(bg_job_block_list);
+	job_block_test_list = copy_bg_list(bg_lists->job_running);
 
 	slurm_mutex_lock(&block_state_mutex);
-	block_list = copy_bg_list(bg_list);
+	block_list = copy_bg_list(bg_lists->main);
 	slurm_mutex_unlock(&block_state_mutex);
 
 	itr = list_iterator_create(req_list);
@@ -1520,7 +1571,7 @@ extern int test_job_list(List req_list)
 					will_run->job_ptr->select_jobinfo,
 					SELECT_DATA_MAX_PROCS,
 					&max_procs);
-				if((max_procs > procs_per_node)
+				if((max_procs > bg_conf->procs_per_bp)
 				   || (max_procs == NO_VAL))
 					conn_type = SELECT_TORUS;
 				else
@@ -1640,17 +1691,17 @@ extern int test_job_list(List req_list)
 /* 						SELECT_DATA_BLOCK_ID, */
 /* 						"unassigned"); */
 /* 					if(will_run->job_ptr->num_procs */
-/* 					   < bluegene_bp_node_cnt  */
+/* 					   < bg_conf->bp_node_cnt  */
 /* 					   && will_run->job_ptr->num_procs */
 /* 					   > 0) { */
-/* 						i = procs_per_node/ */
+/* 						i = bg_conf->procs_per_bp/ */
 /* 							will_run->job_ptr-> */
 /* 							num_procs; */
 /* 						debug2("divide by %d", i); */
 /* 					} else  */
 /* 						i = 1; */
 /* 					will_run->min_nodes *=  */
-/* 						bluegene_bp_node_cnt/i; */
+/* 						bg_conf->bp_node_cnt/i; */
 /* 					select_g_set_jobinfo( */
 /* 						will_run->job_ptr-> */
 /* 						select_jobinfo, */
@@ -1708,7 +1759,7 @@ extern int test_job_list(List req_list)
 	}
 	list_iterator_destroy(itr);
 
-	if(bluegene_layout_mode == LAYOUT_DYNAMIC) 		
+	if(bg_conf->layout_mode == LAYOUT_DYNAMIC) 		
 		slurm_mutex_unlock(&create_dynamic_mutex);
 	
 
@@ -1716,6 +1767,5 @@ extern int test_job_list(List req_list)
 	list_destroy(job_block_test_list);
 	
 	slurm_mutex_unlock(&job_list_test_mutex);
-#endif
 	return rc;
 }
diff --git a/src/plugins/select/bluegene/plugin/bg_job_place.h b/src/plugins/select/bluegene/plugin/bg_job_place.h
index 448698e58323df3c24389482c684e7363b88d383..a50a454c5ee5703f6c50c828abdf85e236289ef2 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_place.h
+++ b/src/plugins/select/bluegene/plugin/bg_job_place.h
@@ -7,7 +7,8 @@
  *  Written by Dan Phung <phung4@llnl.gov> et. al.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.c b/src/plugins/select/bluegene/plugin/bg_job_run.c
index 800a6813d9d831407eb44d945d7f20bee99ab060..317a1ca48a3b7b5d5455e228ce32ba4d568e5931 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.c
+++ b/src/plugins/select/bluegene/plugin/bg_job_run.c
@@ -2,14 +2,15 @@
  *  bg_job_run.c - blue gene job execution (e.g. initiation and termination) 
  *  functions.
  *
- *  $Id: bg_job_run.c 17202 2009-04-09 16:56:23Z da $ 
+ *  $Id: bg_job_run.c 17529 2009-05-18 18:43:42Z da $ 
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -160,7 +161,6 @@ static int _remove_job(db_job_id_t job_id)
 		if(job_state == RM_JOB_TERMINATED)
 			return STATUS_OK;
 		else if(job_state == RM_JOB_DYING) {
-			/* start sending sigkills for the last 5 tries */
 			if(count > MAX_POLL_RETRIES) 
 				error("Job %d isn't dying, trying for "
 				      "%d seconds", count*POLL_INTERVAL);
@@ -215,14 +215,14 @@ static int _reset_block(bg_record_t *bg_record)
 		/* remove user from list */		
 		
 		if(bg_record->target_name) {
-			if(strcmp(bg_record->target_name, bg_slurm_user_name)) {
+			if(strcmp(bg_record->target_name, bg_conf->slurm_user_name)) {
 				xfree(bg_record->target_name);
 				bg_record->target_name = 
-					xstrdup(bg_slurm_user_name);
+					xstrdup(bg_conf->slurm_user_name);
 			}
 			update_block_user(bg_record, 1);
 		} else {
-			bg_record->target_name = xstrdup(bg_slurm_user_name);
+			bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
 		}	
 		
 			
@@ -230,7 +230,7 @@ static int _reset_block(bg_record_t *bg_record)
 		bg_record->boot_count = 0;
 		
 		last_bg_update = time(NULL);
-		if(remove_from_bg_list(bg_job_block_list, bg_record) 
+		if(remove_from_bg_list(bg_lists->job_running, bg_record) 
 		   == SLURM_SUCCESS) {
 			num_unused_cpus += bg_record->cpu_cnt;
 		}
@@ -262,7 +262,8 @@ static void _sync_agent(bg_update_t *bg_update_ptr)
 {
 	bg_record_t * bg_record = NULL;
 	
-	bg_record = find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id);
+	bg_record = find_bg_record_in_list(bg_lists->main,
+					   bg_update_ptr->bg_block_id);
 	if(!bg_record) {
 		error("No block %s", bg_update_ptr->bg_block_id);
 		return;
@@ -272,12 +273,12 @@ static void _sync_agent(bg_update_t *bg_update_ptr)
 	bg_record->job_running = bg_update_ptr->job_ptr->job_id;
 	bg_record->job_ptr = bg_update_ptr->job_ptr;
 
-	if(!block_ptr_exist_in_list(bg_job_block_list, bg_record)) {
-		list_push(bg_job_block_list, bg_record);
+	if(!block_ptr_exist_in_list(bg_lists->job_running, bg_record)) {
+		list_push(bg_lists->job_running, bg_record);
 		num_unused_cpus -= bg_record->cpu_cnt;
 	}
-	if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record)) 
-		list_push(bg_booted_block_list, bg_record);
+	if(!block_ptr_exist_in_list(bg_lists->booted, bg_record)) 
+		list_push(bg_lists->booted, bg_record);
 	slurm_mutex_unlock(&block_state_mutex);
 
 	if(bg_record->state == RM_PARTITION_READY) {
@@ -325,10 +326,11 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 
 	slurm_mutex_lock(&job_start_mutex);
 		
-	bg_record = find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id);
+	bg_record = find_bg_record_in_list(bg_lists->main, 
+					   bg_update_ptr->bg_block_id);
 
 	if(!bg_record) {
-		error("block %s not found in bg_list",
+		error("block %s not found in bg_lists->main",
 		      bg_update_ptr->bg_block_id);
 		/* wait for the slurmd to begin 
 		   the batch script, slurm_fail_job() 
@@ -370,7 +372,7 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	
 	delete_list = list_create(NULL);
 	slurm_mutex_lock(&block_state_mutex);
-	itr = list_iterator_create(bg_list);
+	itr = list_iterator_create(bg_lists->main);
 	while ((found_record = list_next(itr))) {
 		if ((!found_record) || (bg_record == found_record))
 			continue;
@@ -400,16 +402,13 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 		       found_record->bg_block_id, 
 		       bg_record->bg_block_id);
 		list_push(delete_list, found_record);
-		if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+		if(bg_conf->layout_mode == LAYOUT_DYNAMIC) 
 			list_remove(itr);
-		}
-		num_block_to_free++;
 	}		
 	list_iterator_destroy(itr);
 
 	if(requeue_job) {
-		num_block_to_free = 0;
-		num_block_freed = 0;
+		num_block_to_free = num_block_freed = 0;
 		list_destroy(delete_list);
 
 		_reset_block(bg_record);
@@ -446,8 +445,7 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	}
 	/* Zero out the values here because we are done with them and
 	   they will be ready for the next job */
-	num_block_to_free = 0;
-	num_block_freed = 0;
+	num_block_to_free = num_block_freed = 0;
 	
 	slurm_mutex_lock(&block_state_mutex);
 	if(bg_record->job_running <= NO_JOB_RUNNING) {
@@ -637,7 +635,7 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 	} else if (bg_record->state == RM_PARTITION_CONFIGURING) {
 		bg_record->boot_state = 1;		
 	}
-	
+
 	if(bg_record->job_running <= NO_JOB_RUNNING) {
 		slurm_mutex_unlock(&job_start_mutex);
 		debug("job %u finished during the start of the boot "
@@ -672,7 +670,7 @@ static void _start_agent(bg_update_t *bg_update_ptr)
 		   incase the fail job isn't ran */
 		(void) slurm_fail_job(bg_record->job_running);
 		slurm_mutex_lock(&block_state_mutex);
-		if (remove_from_bg_list(bg_job_block_list, bg_record)
+		if (remove_from_bg_list(bg_lists->job_running, bg_record)
 		    == SLURM_SUCCESS) {
 			num_unused_cpus += bg_record->cpu_cnt;
 		}
@@ -710,7 +708,8 @@ static void _term_agent(bg_update_t *bg_update_ptr)
 	}
 	
 			
-	if ((rc = bridge_get_data(job_list, RM_JobListSize, &jobs)) != STATUS_OK) {
+	if ((rc = bridge_get_data(job_list, RM_JobListSize, &jobs)) 
+	    != STATUS_OK) {
 		error("bridge_get_data(RM_JobListSize): %s", bg_err_str(rc));
 		jobs = 0;
 	}
@@ -778,7 +777,8 @@ static void _term_agent(bg_update_t *bg_update_ptr)
 #endif
 	
 	/* remove the block's users */
-	bg_record = find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id);
+	bg_record = find_bg_record_in_list(bg_lists->main,
+					   bg_update_ptr->bg_block_id);
 	if(bg_record) {
 		debug("got the record %s user is %s",
 		      bg_record->bg_block_id,
@@ -804,9 +804,9 @@ static void _term_agent(bg_update_t *bg_update_ptr)
 		
 		slurm_mutex_unlock(&block_state_mutex);
 		
-	} else if (bluegene_layout_mode == LAYOUT_DYNAMIC) {
+	} else if (bg_conf->layout_mode == LAYOUT_DYNAMIC) {
 		debug2("Hopefully we are destroying this block %s "
-		       "since it isn't in the bg_list",
+		       "since it isn't in the bg_lists->main",
 		       bg_update_ptr->bg_block_id);
 	} else {
 		error("Could not find block %s previously assigned to job.  "
@@ -894,8 +894,9 @@ static void _block_op(bg_update_t *bg_update_ptr)
 		slurm_mutex_unlock(&agent_cnt_mutex);
 		return;
 	}
-	agent_cnt++;
+
 	slurm_mutex_unlock(&agent_cnt_mutex);
+	agent_cnt++;
 	/* spawn an agent */
 	slurm_attr_init(&attr_agent);
 	if (pthread_attr_setdetachstate(&attr_agent, 
@@ -925,8 +926,8 @@ static List _get_all_blocks(void)
 	if (!ret_list)
 		fatal("malloc error");
 
-	if(bg_list) {
-		itr = list_iterator_create(bg_list);
+	if(bg_lists->main) {
+		itr = list_iterator_create(bg_lists->main);
 		while ((block_ptr = (bg_record_t *) list_next(itr))) {
 			if ((block_ptr->user_name == NULL)
 			    ||  (block_ptr->user_name[0] == '\0')
@@ -941,7 +942,7 @@ static List _get_all_blocks(void)
 		}
 		list_iterator_destroy(itr);
 	} else {
-		error("_get_all_blocks: no bg_list");
+		error("_get_all_blocks: no bg_lists->main");
 	}
 
 	return ret_list;
@@ -1040,7 +1041,8 @@ extern int start_job(struct job_record *job_ptr)
 			     &(bg_update_ptr->reboot));
 #ifdef HAVE_BGL
 	if(!bg_update_ptr->blrtsimage) {
-		bg_update_ptr->blrtsimage = xstrdup(default_blrtsimage);
+		bg_update_ptr->blrtsimage =
+			xstrdup(bg_conf->default_blrtsimage);
 		select_g_set_jobinfo(job_ptr->select_jobinfo,
 				     SELECT_DATA_BLRTS_IMAGE, 
 				     bg_update_ptr->blrtsimage);
@@ -1055,7 +1057,8 @@ extern int start_job(struct job_record *job_ptr)
 			     SELECT_DATA_LINUX_IMAGE, 
 			     &(bg_update_ptr->linuximage));
 	if(!bg_update_ptr->linuximage) {
-		bg_update_ptr->linuximage = xstrdup(default_linuximage);
+		bg_update_ptr->linuximage =
+			xstrdup(bg_conf->default_linuximage);
 		select_g_set_jobinfo(job_ptr->select_jobinfo,
 				     SELECT_DATA_LINUX_IMAGE, 
 				     bg_update_ptr->linuximage);
@@ -1064,7 +1067,8 @@ extern int start_job(struct job_record *job_ptr)
 			     SELECT_DATA_MLOADER_IMAGE, 
 			     &(bg_update_ptr->mloaderimage));
 	if(!bg_update_ptr->mloaderimage) {
-		bg_update_ptr->mloaderimage = xstrdup(default_mloaderimage);
+		bg_update_ptr->mloaderimage = 
+			xstrdup(bg_conf->default_mloaderimage);
 		select_g_set_jobinfo(job_ptr->select_jobinfo,
 				     SELECT_DATA_MLOADER_IMAGE, 
 				     bg_update_ptr->mloaderimage);
@@ -1073,24 +1077,27 @@ extern int start_job(struct job_record *job_ptr)
 			     SELECT_DATA_RAMDISK_IMAGE, 
 			     &(bg_update_ptr->ramdiskimage));
 	if(!bg_update_ptr->ramdiskimage) {
-		bg_update_ptr->ramdiskimage = xstrdup(default_ramdiskimage);
+		bg_update_ptr->ramdiskimage =
+			xstrdup(bg_conf->default_ramdiskimage);
 		select_g_set_jobinfo(job_ptr->select_jobinfo,
 				     SELECT_DATA_RAMDISK_IMAGE, 
 				     bg_update_ptr->ramdiskimage);
 	}
 	bg_record = 
-		find_bg_record_in_list(bg_list, bg_update_ptr->bg_block_id);
+		find_bg_record_in_list(bg_lists->main, 
+				       bg_update_ptr->bg_block_id);
 	if (bg_record) {
 		slurm_mutex_lock(&block_state_mutex);
 		job_ptr->num_procs = bg_record->cpu_cnt;
+		job_ptr->total_procs = job_ptr->num_procs;
 		bg_record->job_running = bg_update_ptr->job_ptr->job_id;
 		bg_record->job_ptr = bg_update_ptr->job_ptr;
-		if(!block_ptr_exist_in_list(bg_job_block_list, bg_record)) {
-			list_push(bg_job_block_list, bg_record);
+		if(!block_ptr_exist_in_list(bg_lists->job_running, bg_record)) {
+			list_push(bg_lists->job_running, bg_record);
 			num_unused_cpus -= bg_record->cpu_cnt;
 		}
-		if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record))
-			list_push(bg_booted_block_list, bg_record);
+		if(!block_ptr_exist_in_list(bg_lists->booted, bg_record))
+			list_push(bg_lists->booted, bg_record);
 		slurm_mutex_unlock(&block_state_mutex);
 	} else {
 		error("bg_record %s doesn't exist, requested for job (%d)", 
@@ -1258,11 +1265,11 @@ extern int boot_block(bg_record_t *bg_record)
 	int rc;	
 		
 	if ((rc = bridge_set_block_owner(bg_record->bg_block_id, 
-					 bg_slurm_user_name)) 
+					 bg_conf->slurm_user_name)) 
 	    != STATUS_OK) {
 		error("bridge_set_block_owner(%s,%s): %s", 
 		      bg_record->bg_block_id, 
-		      bg_slurm_user_name,
+		      bg_conf->slurm_user_name,
 		      bg_err_str(rc));
 		return SLURM_ERROR;
 	}	
@@ -1291,8 +1298,8 @@ extern int boot_block(bg_record_t *bg_record)
 	}
 	
 	slurm_mutex_lock(&block_state_mutex);
-	if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record))
-		list_push(bg_booted_block_list, bg_record);
+	if(!block_ptr_exist_in_list(bg_lists->booted, bg_record))
+		list_push(bg_lists->booted, bg_record);
 	slurm_mutex_unlock(&block_state_mutex);
 	
 	rc = 0;
@@ -1317,8 +1324,8 @@ extern int boot_block(bg_record_t *bg_record)
 	slurm_mutex_unlock(&block_state_mutex);
 #else
 	slurm_mutex_lock(&block_state_mutex);
-	if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record))
-		list_push(bg_booted_block_list, bg_record);
+	if(!block_ptr_exist_in_list(bg_lists->booted, bg_record))
+		list_push(bg_lists->booted, bg_record);
 	bg_record->state = RM_PARTITION_READY;
 	last_bg_update = time(NULL);
 	slurm_mutex_unlock(&block_state_mutex);				
diff --git a/src/plugins/select/bluegene/plugin/bg_job_run.h b/src/plugins/select/bluegene/plugin/bg_job_run.h
index 694df91afa3373136f754eefca3d5cd79502593c..d4e57bae836724d62af8598a657485702846cd9f 100644
--- a/src/plugins/select/bluegene/plugin/bg_job_run.h
+++ b/src/plugins/select/bluegene/plugin/bg_job_run.h
@@ -7,7 +7,8 @@
  *  Written by Morris Jette <jette1@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.c b/src/plugins/select/bluegene/plugin/bg_record_functions.c
index e46ff618aeb3f63e2f597cd5631c2a2dd334d9b6..551c1538b01a6f04259342af2870431c83bd92eb 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.c
+++ b/src/plugins/select/bluegene/plugin/bg_record_functions.c
@@ -8,7 +8,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -44,10 +45,8 @@
 #include "src/slurmctld/locks.h"
 
 /* some local functions */
-#ifdef HAVE_BG
 static int  _addto_node_list(bg_record_t *bg_record, int *start, int *end);
 static int  _ba_node_cmpf_inc(ba_node_t *node_a, ba_node_t *node_b);
-#endif
 
 extern void print_bg_record(bg_record_t* bg_record)
 {
@@ -163,7 +162,6 @@ extern int block_ptr_exist_in_list(List my_list, bg_record_t *bg_record)
 
 extern void process_nodes(bg_record_t *bg_record, bool startup)
 {
-#ifdef HAVE_BG
 	int j=0, number;
 	int diff=0;
 	int largest_diff=-1;
@@ -182,7 +180,8 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 		memset(&best_start, 0, sizeof(best_start));
 		bg_record->bp_count = 0;
 		if((bg_record->conn_type >= SELECT_SMALL) && (!startup))
-			error("We shouldn't be here there could be some "
+			error("process_nodes: "
+			      "We shouldn't be here there could be some "
 			      "badness if we use this logic %s",
 			      bg_record->nodes);
 		while (bg_record->nodes[j] != '\0') {
@@ -217,7 +216,8 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 					best_start[X] = start[X];
 					best_start[Y] = start[Y];
 					best_start[Z] = start[Z];
-					debug3("start is now %dx%dx%d",
+					debug3("process_nodes: "
+					       "start is now %dx%dx%d",
 					       best_start[X],
 					       best_start[Y],
 					       best_start[Z]);
@@ -250,7 +250,8 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 					best_start[X] = start[X];
 					best_start[Y] = start[Y];
 					best_start[Z] = start[Z];
-					debug3("start is now %dx%dx%d",
+					debug3("process_nodes: "
+					       "start is now %dx%dx%d",
 					       best_start[X],
 					       best_start[Y],
 					       best_start[Z]);
@@ -272,7 +273,8 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 		bg_record->start[X] = best_start[X];
 		bg_record->start[Y] = best_start[Y];
 		bg_record->start[Z] = best_start[Z];
-		debug2("start is %dx%dx%d",
+		debug2("process_nodes: "
+		       "start is %dx%dx%d",
 		       bg_record->start[X],
 		       bg_record->start[Y],
 		       bg_record->start[Z]);
@@ -291,7 +293,8 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 	while ((ba_node = list_next(itr)) != NULL) {
 		if(!ba_node->used)
 			continue;
-		debug4("%c%c%c is included in this block",
+		debug4("process_nodes: "
+		       "%c%c%c is included in this block",
 		       alpha_num[ba_node->coord[X]],
 		       alpha_num[ba_node->coord[Y]],
 		       alpha_num[ba_node->coord[Z]]);
@@ -310,7 +313,8 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 		}
 	}
 	list_iterator_destroy(itr);
-	debug3("geo = %c%c%c bp count is %d\n",
+	debug3("process_nodes: "
+	       "geo = %c%c%c bp count is %d\n",
 	       alpha_num[bg_record->geo[X]],
 	       alpha_num[bg_record->geo[Y]],
 	       alpha_num[bg_record->geo[Z]],
@@ -325,10 +329,9 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 		    && (bg_record->geo[Z] == DIM_SIZE[Z])) {
 			bg_record->full_block = 1;	
 		}	
-	} else if(bg_record->node_cnt == bluegene_bp_node_cnt)
+	} else if(bg_record->node_cnt == bg_conf->bp_node_cnt)
 		bg_record->full_block = 1;
 	
-	
 /* #ifndef HAVE_BG_FILES */
 /* 	max_dim[X] = MAX(max_dim[X], end[X]); */
 /* 	max_dim[Y] = MAX(max_dim[Y], end[Y]); */
@@ -338,10 +341,10 @@ extern void process_nodes(bg_record_t *bg_record, bool startup)
 	if (node_name2bitmap(bg_record->nodes, 
 			     false, 
 			     &bg_record->bitmap)) {
-		fatal("1 Unable to convert nodes %s to bitmap", 
+		fatal("process_nodes: "
+		      "1 Unable to convert nodes %s to bitmap", 
 		      bg_record->nodes);
 	}
-#endif
 	return;
 }
 
@@ -524,7 +527,7 @@ extern int update_block_user(bg_record_t *bg_record, int set)
 	}
 	if(!bg_record->user_name) {
 		error("No user_name");
-		bg_record->user_name = xstrdup(bg_slurm_user_name);
+		bg_record->user_name = xstrdup(bg_conf->slurm_user_name);
 	}
 #ifdef HAVE_BG_FILES
 	int rc=0;	
@@ -538,7 +541,7 @@ extern int update_block_user(bg_record_t *bg_record, int set)
 			return -1;
 		} else if (rc == REMOVE_USER_NONE) {
 			if (strcmp(bg_record->target_name, 
-				   bg_slurm_user_name)) {
+				   bg_conf->slurm_user_name)) {
 				info("Adding user %s to Block %s",
 				     bg_record->target_name, 
 				     bg_record->bg_block_id);
@@ -601,7 +604,7 @@ extern void drain_as_needed(bg_record_t *bg_record, char *reason)
 		}
 		unlock_slurmctld(job_write_lock);
 		slurm_mutex_lock(&block_state_mutex);
-		if(remove_from_bg_list(bg_job_block_list, bg_record) 
+		if(remove_from_bg_list(bg_lists->job_running, bg_record) 
 		   == SLURM_SUCCESS) {
 			num_unused_cpus += bg_record->cpu_cnt;
 		}
@@ -609,7 +612,7 @@ extern void drain_as_needed(bg_record_t *bg_record, char *reason)
 	}
 
 	/* small blocks */
-	if(bg_record->cpu_cnt < procs_per_node) {
+	if(bg_record->cpu_cnt < bg_conf->procs_per_bp) {
 		debug2("small block");
 		goto end_it;
 	}
@@ -651,7 +654,7 @@ extern int set_ionodes(bg_record_t *bg_record, int io_start, int io_nodes)
 	if(!bg_record)
 		return SLURM_ERROR;
 	
-	bg_record->ionode_bitmap = bit_alloc(bluegene_numpsets);
+	bg_record->ionode_bitmap = bit_alloc(bg_conf->numpsets);
 	/* Set the correct ionodes being used in this block */
 	bit_nset(bg_record->ionode_bitmap, io_start, io_start+io_nodes);
 	bit_fmt(bitstring, BITSIZE, bg_record->ionode_bitmap);
@@ -669,18 +672,19 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 	int i, len;
 	int small_count = 0;
 
+	xassert(bg_conf->slurm_user_name);
+
 	if(!records) {
 		fatal("add_bg_record: no records list given");
 	}
-	bg_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));
-	
+	bg_record = (bg_record_t*) xmalloc(sizeof(bg_record_t));	
 	
-	bg_record->user_name = xstrdup(bg_slurm_user_name);
-	bg_record->target_name = xstrdup(bg_slurm_user_name);
+	bg_record->user_name = xstrdup(bg_conf->slurm_user_name);
+	bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
 	
 	pw_uid = uid_from_string(bg_record->user_name);
 	if(pw_uid == (uid_t) -1) {
-		error("No such user: %s", bg_record->user_name);
+		error("add_bg_record: No such user: %s", bg_record->user_name);
 	} else {
 		bg_record->user_uid = pw_uid;
 	}
@@ -689,17 +693,18 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 	if(used_nodes) {
 		if(copy_node_path(used_nodes, &bg_record->bg_block_list)
 		   == SLURM_ERROR)
-			error("couldn't copy the path for the allocation");
+			error("add_bg_record: "
+			      "couldn't copy the path for the allocation");
 		bg_record->bp_count = list_count(used_nodes);
 	}
 	/* bg_record->boot_state = 0; 	Implicit */
 	/* bg_record->state = 0;	Implicit */
 #ifdef HAVE_BGL
-	debug2("asking for %s %d %d %s", 
+	debug2("add_bg_record: asking for %s %d %d %s", 
 	       blockreq->block, blockreq->small32, blockreq->small128,
 	       convert_conn_type(blockreq->conn_type));
 #else
-	debug2("asking for %s %d %d %d %d %d %s", 
+	debug2("add_bg_record: asking for %s %d %d %d %d %d %s", 
 	       blockreq->block, blockreq->small256, 
 	       blockreq->small128, blockreq->small64,
 	       blockreq->small32, blockreq->small16, 
@@ -708,7 +713,7 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 	/* Set the bitmap blank here if it is a full node we don't
 	   want anything set we also don't want the bg_record->ionodes set.
 	*/
-	bg_record->ionode_bitmap = bit_alloc(bluegene_numpsets);
+	bg_record->ionode_bitmap = bit_alloc(bg_conf->numpsets);
 
 	len = strlen(blockreq->block);
 	i=0;
@@ -721,12 +726,13 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 	if(i<len) {
 		len -= i;
 		
-		len += strlen(bg_slurm_node_prefix)+1;
+		len += strlen(bg_conf->slurm_node_prefix)+1;
 		bg_record->nodes = xmalloc(len);
 		snprintf(bg_record->nodes, len, "%s%s", 
-			bg_slurm_node_prefix, blockreq->block+i);
+			bg_conf->slurm_node_prefix, blockreq->block+i);
 	} else 
-		fatal("BPs=%s is in a weird format", blockreq->block); 
+		fatal("add_bg_record: BPs=%s is in a weird format",
+		      blockreq->block); 
 	
 	process_nodes(bg_record, false);
 	
@@ -734,30 +740,32 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 	bg_record->node_use = SELECT_COPROCESSOR_MODE;
 #endif
 	bg_record->conn_type = blockreq->conn_type;
-	bg_record->cpu_cnt = procs_per_node * bg_record->bp_count;
-	bg_record->node_cnt = bluegene_bp_node_cnt * bg_record->bp_count;
+	bg_record->cpu_cnt = bg_conf->procs_per_bp * bg_record->bp_count;
+	bg_record->node_cnt = bg_conf->bp_node_cnt * bg_record->bp_count;
 	bg_record->job_running = NO_JOB_RUNNING;
 
 #ifdef HAVE_BGL
 	if(blockreq->blrtsimage)
 		bg_record->blrtsimage = xstrdup(blockreq->blrtsimage);
 	else
-		bg_record->blrtsimage = xstrdup(default_blrtsimage);
+		bg_record->blrtsimage = xstrdup(bg_conf->default_blrtsimage);
 #endif
 	if(blockreq->linuximage)
 		bg_record->linuximage = xstrdup(blockreq->linuximage);
 	else
-		bg_record->linuximage = xstrdup(default_linuximage);
+		bg_record->linuximage = xstrdup(bg_conf->default_linuximage);
 
 	if(blockreq->mloaderimage)
 		bg_record->mloaderimage = xstrdup(blockreq->mloaderimage);
 	else
-		bg_record->mloaderimage = xstrdup(default_mloaderimage);
+		bg_record->mloaderimage =
+			xstrdup(bg_conf->default_mloaderimage);
 
 	if(blockreq->ramdiskimage)
 		bg_record->ramdiskimage = xstrdup(blockreq->ramdiskimage);
 	else
-		bg_record->ramdiskimage = xstrdup(default_ramdiskimage);
+		bg_record->ramdiskimage =
+			xstrdup(bg_conf->default_ramdiskimage);
 		
 	if(bg_record->conn_type != SELECT_SMALL) {
 		/* this needs to be an append so we keep things in the
@@ -766,66 +774,73 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 		/* this isn't a correct list so we need to set it later for
 		   now we just used it to be the bp number */
 		if(!used_nodes) {
-			debug4("we didn't get a request list so we are "
+			debug4("add_bg_record: "
+			       "we didn't get a request list so we are "
 			       "destroying this bp list");
 			list_destroy(bg_record->bg_block_list);
 			bg_record->bg_block_list = NULL;
 		}
 	} else {
-		debug("adding a small block");
+		debug("add_bg_record: adding a small block");
 		if(no_check)
 			goto no_check;
 		/* if the ionode cnt for small32 is 0 then don't
 		   allow a sub quarter allocation 
 		*/
-		if(bluegene_nodecard_ionode_cnt < 2) {
-			if(!bluegene_nodecard_ionode_cnt && blockreq->small32) 
-				fatal("There is an error in your "
+		if(bg_conf->nodecard_ionode_cnt < 2) {
+			if(!bg_conf->nodecard_ionode_cnt && blockreq->small32) 
+				fatal("add_bg_record: "
+				      "There is an error in your "
 				      "bluegene.conf file.\n"
 				      "Can't create a 32 node block with "
 				      "Numpsets=%u. (Try setting it "
 				      "to at least 16)",
-				      bluegene_numpsets);
+				      bg_conf->numpsets);
 #ifndef HAVE_BGL
 			if(blockreq->small16) 
-				fatal("There is an error in your "
+				fatal("add_bg_record: "
+				      "There is an error in your "
 				      "bluegene.conf file.\n"
 				      "Can't create a 16 node block with "
 				      "Numpsets=%u. (Try setting it to "
 				      "at least 32)",
-				      bluegene_numpsets);
-			if((bluegene_io_ratio < 0.5) && blockreq->small64) 
-				fatal("There is an error in your "
+				      bg_conf->numpsets);
+			if((bg_conf->io_ratio < 0.5) && blockreq->small64) 
+				fatal("add_bg_record: "
+				      "There is an error in your "
 				      "bluegene.conf file.\n"
 				      "Can't create a 64 node block with "
 				      "Numpsets=%u. (Try setting it "
 				      "to at least 8)",
-				      bluegene_numpsets);
+				      bg_conf->numpsets);
 #endif
 		}
 
 #ifdef HAVE_BGL
 		if(blockreq->small32==0 && blockreq->small128==0) {
-			info("No specs given for this small block, "
+			info("add_bg_record: "
+			     "No specs given for this small block, "
 			     "I am spliting this block into 4 128CnBlocks");
 			blockreq->small128=4;
 		}		
 
-		i = (blockreq->small32*bluegene_nodecard_node_cnt) + 
-			(blockreq->small128*bluegene_quarter_node_cnt);
-		if(i != bluegene_bp_node_cnt)
-			fatal("There is an error in your bluegene.conf file.\n"
+		i = (blockreq->small32*bg_conf->nodecard_node_cnt) + 
+			(blockreq->small128*bg_conf->quarter_node_cnt);
+		if(i != bg_conf->bp_node_cnt)
+			fatal("add_bg_record: "
+			      "There is an error in your bluegene.conf file.\n"
 			      "I am unable to request %d nodes consisting of "
 			      "%u 32CnBlocks and\n%u 128CnBlocks in one "
 			      "base partition with %u nodes.", 
 			      i, blockreq->small32, blockreq->small128,
-			      bluegene_bp_node_cnt);
+			      bg_conf->bp_node_cnt);
 		small_count = blockreq->small32+blockreq->small128; 
 #else
 		if(!blockreq->small16 && !blockreq->small32 
 		   && !blockreq->small64 && !blockreq->small128 
 		   && !blockreq->small256) {
-			info("No specs given for this small block, "
+			info("add_bg_record: "
+			     "No specs given for this small block, "
 			     "I am spliting this block into 2 256CnBlocks");
 			blockreq->small256=2;
 		}		
@@ -835,8 +850,9 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 			+ (blockreq->small64*64) 
 			+ (blockreq->small128*128)
 			+ (blockreq->small256*256);
-		if(i != bluegene_bp_node_cnt)
-			fatal("There is an error in your bluegene.conf file.\n"
+		if(i != bg_conf->bp_node_cnt)
+			fatal("add_bg_record: "
+			      "There is an error in your bluegene.conf file.\n"
 			      "I am unable to request %d nodes consisting of "
 			      "%u 16CNBlocks, %u 32CNBlocks,\n"
 			      "%u 64CNBlocks, %u 128CNBlocks, "
@@ -844,7 +860,7 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 			      "in one base partition with %u nodes.", 
 			      i, blockreq->small16, blockreq->small32, 
 			      blockreq->small64, blockreq->small128, 
-			      blockreq->small256, bluegene_bp_node_cnt);
+			      blockreq->small256, bg_conf->bp_node_cnt);
 		small_count = blockreq->small16
 			+ blockreq->small32
 			+ blockreq->small64
@@ -872,7 +888,7 @@ extern int add_bg_record(List records, List used_nodes, blockreq_t *blockreq,
 extern int handle_small_record_request(List records, blockreq_t *blockreq,
 				       bg_record_t *bg_record, bitoff_t start)
 {
-	bitstr_t *ionodes = bit_alloc(bluegene_numpsets);
+	bitstr_t *ionodes = bit_alloc(bg_conf->numpsets);
 	int i=0, ionode_cnt = 0;
 	bg_record_t *found_record = NULL;
 
@@ -881,7 +897,7 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 	xassert(bg_record);
 
 	xassert(start >= 0);
-	xassert(start < bluegene_numpsets);
+	xassert(start < bg_conf->numpsets);
 
 #ifndef HAVE_BGL
 	for(i=0; i<blockreq->small16; i++) {
@@ -895,7 +911,7 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 		start++;
 	}
 #endif
-	if((ionode_cnt = bluegene_nodecard_ionode_cnt))
+	if((ionode_cnt = bg_conf->nodecard_ionode_cnt))
 		ionode_cnt--;
 	for(i=0; i<blockreq->small32; i++) {
 		bit_nset(ionodes, start, start+ionode_cnt);
@@ -909,7 +925,7 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 	}
 	
 #ifndef HAVE_BGL
-	if((ionode_cnt = bluegene_nodecard_ionode_cnt * 2))
+	if((ionode_cnt = bg_conf->nodecard_ionode_cnt * 2))
 		ionode_cnt--;
 	for(i=0; i<blockreq->small64; i++) {
 		bit_nset(ionodes, start, start+ionode_cnt);
@@ -922,7 +938,7 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 		start+=ionode_cnt+1;
 	}
 #endif
-	if((ionode_cnt = bluegene_quarter_ionode_cnt))
+	if((ionode_cnt = bg_conf->quarter_ionode_cnt))
 		ionode_cnt--;
 	for(i=0; i<blockreq->small128; i++) {
 		bit_nset(ionodes, start, start+ionode_cnt);
@@ -936,7 +952,7 @@ extern int handle_small_record_request(List records, blockreq_t *blockreq,
 	}
 
 #ifndef HAVE_BGL
-	if((ionode_cnt = bluegene_quarter_ionode_cnt * 2))
+	if((ionode_cnt = bg_conf->quarter_ionode_cnt * 2))
 		ionode_cnt--;
 	for(i=0; i<blockreq->small256; i++) {
 		bit_nset(ionodes, start, start+ionode_cnt);
@@ -987,14 +1003,15 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 	if(io_cnt == NO_VAL) {
 		io_cnt = 1;
 		/* Translate 1 nodecard count to ionode count */
-		if((io_cnt *= bluegene_io_ratio))
+		if((io_cnt *= bg_conf->io_ratio))
 			io_cnt--;
+
 		/* make sure we create something that is able to be
 		   created */
-		if(bluegene_smallest_block < bluegene_nodecard_node_cnt)
-			create_size = bluegene_nodecard_node_cnt;
+		if(bg_conf->smallest_block < bg_conf->nodecard_node_cnt)
+			create_size = bg_conf->nodecard_node_cnt;
 		else
-			create_size = bluegene_smallest_block;
+			create_size = bg_conf->smallest_block;
 	}
 
 	node_ptr = find_node_record(bp_name);
@@ -1003,6 +1020,16 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 		       bp_name);
 		return EINVAL;
 	}
+
+	/* this is here for sanity check to make sure we don't core on
+	   these bits when we set them below. */
+	if(io_start >= bg_conf->numpsets 
+	   || (io_start+io_cnt) >= bg_conf->numpsets) {
+		debug("io %d-%d not configured on this "
+		      "system, only %d ionodes per midplane",
+		      io_start, io_start+io_cnt, bg_conf->numpsets);
+		return EINVAL;
+	}
 	bp_bit = (node_ptr - node_record_table_ptr);
 	
 	memset(&blockreq, 0, sizeof(blockreq_t));
@@ -1010,21 +1037,21 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 	blockreq.conn_type = SELECT_SMALL;
 	blockreq.block = bp_name;
 
-	debug3("here setting %d of %d and %d-%d of %d",
+	debug3("here setting node %d of %d and ionodes %d-%d of %d",
 	       bp_bit, node_record_count, io_start, 
-	       io_start+io_cnt, bluegene_numpsets);
+	       io_start+io_cnt, bg_conf->numpsets);
 
 	memset(&tmp_record, 0, sizeof(bg_record_t));
 	tmp_record.bp_count = 1;
-	tmp_record.node_cnt = bluegene_nodecard_node_cnt;
+	tmp_record.node_cnt = bg_conf->nodecard_node_cnt;
 	tmp_record.bitmap = bit_alloc(node_record_count);
 	bit_set(tmp_record.bitmap, bp_bit);
 
-	tmp_record.ionode_bitmap = bit_alloc(bluegene_numpsets);
+	tmp_record.ionode_bitmap = bit_alloc(bg_conf->numpsets);
 	bit_nset(tmp_record.ionode_bitmap, io_start, io_start+io_cnt);
 
 	slurm_mutex_lock(&block_state_mutex);
-	itr = list_iterator_create(bg_list);
+	itr = list_iterator_create(bg_lists->main);
 	while ((bg_record = list_next(itr))) {
 		if(!bit_test(bg_record->bitmap, bp_bit))
 			continue;
@@ -1036,7 +1063,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 			slurm_fail_job(bg_record->job_running);
 
 		/* mark every one of these in an error state */
-		if(bluegene_layout_mode != LAYOUT_DYNAMIC) {
+		if(bg_conf->layout_mode != LAYOUT_DYNAMIC) {
 			if(!delete_list)
 				delete_list = list_create(NULL);
 			list_append(delete_list, bg_record);
@@ -1048,8 +1075,12 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 		/* if the block is smaller than the create size just
 		   continue on.
 		*/
-		if(bg_record->node_cnt < create_size)
+		if(bg_record->node_cnt < create_size) {
+			if(!delete_list)
+				delete_list = list_create(NULL);
+			list_append(delete_list, bg_record);
 			continue;
+		}
 
 		if(!smallest_bg_record || 
 		   (smallest_bg_record->node_cnt > bg_record->node_cnt))
@@ -1058,7 +1089,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 	list_iterator_destroy(itr);
 	slurm_mutex_unlock(&block_state_mutex);
 	
-	if(bluegene_layout_mode != LAYOUT_DYNAMIC) {
+	if(bg_conf->layout_mode != LAYOUT_DYNAMIC) {
 		debug3("running non-dynamic mode");
 		if(delete_list) {
 			int cnt_set = 0;
@@ -1100,8 +1131,49 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 		goto cleanup;
 	} 
 
+	/* below is only for Dynamic mode */
 	
-	if(smallest_bg_record) {
+	if(delete_list) {
+		int cnt_set = 0;
+		bitstr_t *iobitmap = bit_alloc(bg_conf->numpsets);
+		/* don't lock here since it is handled inside
+		   the put_block_in_error_state
+		*/
+		itr = list_iterator_create(delete_list);
+		while ((bg_record = list_next(itr))) {
+			debug2("combining smaller than nodecard "
+			       "dynamic block %s",
+			       bg_record->bg_block_id);
+			while(bg_record->job_running > NO_JOB_RUNNING)
+				sleep(1);
+
+			bit_or(iobitmap, bg_record->ionode_bitmap);
+			cnt_set++;
+		}
+		list_iterator_destroy(itr);
+		list_destroy(delete_list);
+		if(!cnt_set) {
+			FREE_NULL_BITMAP(iobitmap);
+			rc = SLURM_ERROR;
+			goto cleanup;
+		}
+		/* set the start to be the same as the start of the
+		   ionode_bitmap.  If no ionodes set (not a small
+		   block) set io_start = 0. */
+		if((io_start = bit_ffs(iobitmap)) == -1) {
+			io_start = 0;
+			if(create_size > bg_conf->nodecard_node_cnt) 
+				blockreq.small128 = 4;
+			else
+				blockreq.small32 = 16;
+		} else if(create_size <= bg_conf->nodecard_node_cnt) 
+			blockreq.small32 = 1;
+		else
+			/* this should never happen */
+			blockreq.small128 = 1;
+		
+		FREE_NULL_BITMAP(iobitmap);		
+	} else if(smallest_bg_record) {
 		debug2("smallest dynamic block is %s",
 		       smallest_bg_record->bg_block_id);
 		if(smallest_bg_record->state == RM_PARTITION_ERROR) {
@@ -1146,15 +1218,15 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 			break;
 		}
 
-		if(create_size != bluegene_nodecard_node_cnt) {
+		if(create_size != bg_conf->nodecard_node_cnt) {
 			blockreq.small128 = blockreq.small32 / 4;
 			blockreq.small32 = 0;
-		}
-		/* set the start to be the same as the start of the
-		   ionode_bitmap.  If no ionodes set (not a small
-		   block) set io_start = 0. */
-		if((io_start = bit_ffs(smallest_bg_record->ionode_bitmap))
-		   == -1)
+			io_start = 0;
+		} else if((io_start =
+			   bit_ffs(smallest_bg_record->ionode_bitmap)) == -1)
+			/* set the start to be the same as the start of the
+			   ionode_bitmap.  If no ionodes set (not a small
+			   block) set io_start = 0. */
 			io_start = 0;
 	} else {
 		switch(create_size) {
@@ -1208,13 +1280,12 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 	delete_list = list_create(NULL);
 	while((bg_record = list_pop(requests))) {
 		slurm_mutex_lock(&block_state_mutex);
-		itr = list_iterator_create(bg_list);
+		itr = list_iterator_create(bg_lists->main);
 		while((found_record = list_next(itr))) {
 			if(!blocks_overlap(bg_record, found_record))
 				continue;
 			list_push(delete_list, found_record);
 			list_remove(itr);
-			num_block_to_free++;
 		}
 		list_iterator_destroy(itr);
 		slurm_mutex_unlock(&block_state_mutex);
@@ -1232,7 +1303,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 		      bg_record->bg_block_id);
 		print_bg_record(bg_record);
 		slurm_mutex_lock(&block_state_mutex);
-		list_append(bg_list, bg_record);
+		list_append(bg_lists->main, bg_record);
 		slurm_mutex_unlock(&block_state_mutex);
 		if(bit_overlap(bg_record->ionode_bitmap, 
 			       tmp_record.ionode_bitmap)) {
@@ -1247,7 +1318,7 @@ extern int down_nodecard(char *bp_name, bitoff_t io_start)
 	slurm_mutex_lock(&block_state_mutex);
 	free_block_list(delete_list);
 	list_destroy(delete_list);
-	sort_bg_record_inc_size(bg_list);
+	sort_bg_record_inc_size(bg_lists->main);
 	slurm_mutex_unlock(&block_state_mutex);
 	last_bg_update = time(NULL);	
 
@@ -1279,7 +1350,7 @@ extern int up_nodecard(char *bp_name, bitstr_t *ionode_bitmap)
 	bp_bit = (node_ptr - node_record_table_ptr);
 	
 	slurm_mutex_lock(&block_state_mutex);
-	itr = list_iterator_create(bg_list);
+	itr = list_iterator_create(bg_lists->main);
 	while((bg_record = list_next(itr))) {
 		if(bg_record->job_running != BLOCK_ERROR_STATE)
 			continue;
@@ -1330,12 +1401,12 @@ extern int put_block_in_error_state(bg_record_t *bg_record, int state)
 	info("Setting Block %s to ERROR state.", bg_record->bg_block_id);
 	/* we add the block to these lists so we don't try to schedule
 	   on them. */
-	if(!block_ptr_exist_in_list(bg_job_block_list, bg_record)) {
-		list_push(bg_job_block_list, bg_record);
+	if(!block_ptr_exist_in_list(bg_lists->job_running, bg_record)) {
+		list_push(bg_lists->job_running, bg_record);
 		num_unused_cpus -= bg_record->cpu_cnt;
 	}
-	if(!block_ptr_exist_in_list(bg_booted_block_list, bg_record)) 
-		list_push(bg_booted_block_list, bg_record);
+	if(!block_ptr_exist_in_list(bg_lists->booted, bg_record)) 
+		list_push(bg_lists->booted, bg_record);
 	
 	slurm_mutex_lock(&block_state_mutex);
 	bg_record->job_running = state;
@@ -1343,8 +1414,8 @@ extern int put_block_in_error_state(bg_record_t *bg_record, int state)
 
 	xfree(bg_record->user_name);
 	xfree(bg_record->target_name);
-	bg_record->user_name = xstrdup(bg_slurm_user_name);
-	bg_record->target_name = xstrdup(bg_slurm_user_name);
+	bg_record->user_name = xstrdup(bg_conf->slurm_user_name);
+	bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
 	
 	pw_uid = uid_from_string(bg_record->user_name);
 	if(pw_uid == (uid_t) -1) {
@@ -1355,7 +1426,6 @@ extern int put_block_in_error_state(bg_record_t *bg_record, int state)
 	slurm_mutex_unlock(&block_state_mutex);
 
 	trigger_block_error();
-	last_bg_update = time(NULL);
 
 	return SLURM_SUCCESS;
 }
@@ -1372,9 +1442,10 @@ extern int resume_block(bg_record_t *bg_record)
 	     "being in an error state.",
 	      bg_record->bg_block_id);
 
-	if(remove_from_bg_list(bg_job_block_list, bg_record) == SLURM_SUCCESS) 
+	if(remove_from_bg_list(bg_lists->job_running, bg_record)
+	   == SLURM_SUCCESS) 
 		num_unused_cpus += bg_record->cpu_cnt;
-	remove_from_bg_list(bg_booted_block_list, bg_record);
+	remove_from_bg_list(bg_lists->booted, bg_record);
 
 	bg_record->job_running = NO_JOB_RUNNING;
 	bg_record->state = RM_PARTITION_FREE;
@@ -1385,7 +1456,6 @@ extern int resume_block(bg_record_t *bg_record)
 
 /************************* local functions ***************************/
 
-#ifdef HAVE_BG
 static int _addto_node_list(bg_record_t *bg_record, int *start, int *end)
 {
 	int node_count=0;
@@ -1420,7 +1490,7 @@ static int _addto_node_list(bg_record_t *bg_record, int *start, int *end)
 				
 				snprintf(node_name_tmp, sizeof(node_name_tmp),
 					 "%s%c%c%c", 
-					 bg_slurm_node_prefix,
+					 bg_conf->slurm_node_prefix,
 					 alpha_num[x], alpha_num[y],
 					 alpha_num[z]);		
 				
@@ -1460,6 +1530,3 @@ static int _ba_node_cmpf_inc(ba_node_t *node_a, ba_node_t *node_b)
 }
 
 
-#endif //HAVE_BG
-
-
diff --git a/src/plugins/select/bluegene/plugin/bg_record_functions.h b/src/plugins/select/bluegene/plugin/bg_record_functions.h
index 1727e24fa6b960bf1b3ae427b1fec60be6487470..12bf010283c3ae7724a3b46141249653a09f0d67 100644
--- a/src/plugins/select/bluegene/plugin/bg_record_functions.h
+++ b/src/plugins/select/bluegene/plugin/bg_record_functions.h
@@ -8,7 +8,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/plugin/bg_switch_connections.c b/src/plugins/select/bluegene/plugin/bg_switch_connections.c
index c8a382c28a81add8d376b71ece4973e11146538a..59f7d74e18b2f5456d15e94faaf0495bc1bce0b4 100644
--- a/src/plugins/select/bluegene/plugin/bg_switch_connections.c
+++ b/src/plugins/select/bluegene/plugin/bg_switch_connections.c
@@ -2,14 +2,15 @@
  *  bg_switch_connections.c - Blue Gene switch management functions, 
  *  establish switch connections
  *
- *  $Id: bg_switch_connections.c 17104 2009-04-01 17:20:31Z da $
+ *  $Id: bg_switch_connections.c 17483 2009-05-13 18:31:42Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dan Phung <phung4@llnl.gov> and Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -221,7 +222,7 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 				break;	
 			}
 			conn[i].part_state = RM_PARTITION_READY;
-			debug2("adding %d -> %d", source, ba_conn->port_tar);
+			debug3("adding %d -> %d", source, ba_conn->port_tar);
 			list_push(conn_list, &conn[i]);
 		}
 	}
@@ -236,7 +237,7 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 			return SLURM_ERROR;
 		} 
 	} else {
-		debug("we got a switch with no connections");
+		debug2("we got a switch with no connections");
 		list_destroy(conn_list);
                 return SLURM_ERROR;
 	}
@@ -287,12 +288,12 @@ static int _used_switches(ba_node_t* ba_node)
 	int i = 0, j = 0, switch_count = 0;
 	int source = 0;
 	
-	debug4("checking node %c%c%c",
+	debug5("checking node %c%c%c",
 	       alpha_num[ba_node->coord[X]], 
 	       alpha_num[ba_node->coord[Y]], 
 	       alpha_num[ba_node->coord[Z]]);
 	for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) {
-		debug4("dim %d", i);
+		debug5("dim %d", i);
 		ba_switch = &ba_node->axis_switch[i];
 		for(j=0; j<num_connections; j++) {
 			/* set the source port(-) to check */
@@ -314,7 +315,7 @@ static int _used_switches(ba_node_t* ba_node)
 			ba_conn = &ba_switch->int_wire[source];
 			if(ba_conn->used && ba_conn->port_tar != source) {
 				switch_count++;
-				debug4("used");
+				debug5("used");
 				break;
 			}
 		}
@@ -338,7 +339,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 	rm_nodecard_t *ncard;
 	rm_nodecard_list_t *ncard_list = NULL;
 	int num, i;
-	int use_nc[bluegene_bp_nodecard_cnt];
+	int use_nc[bg_conf->bp_nodecard_cnt];
 	double nc_pos = 0;
 #endif
 	xassert(bg_record->ionode_bitmap);
@@ -348,7 +349,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 		return SLURM_ERROR;
 	}
 /* 	info("configuring small block on ionodes %s out of %d ncs",  */
-/* 	     bg_record->ionodes, bluegene_bp_nodecard_cnt); */
+/* 	     bg_record->ionodes, bg_conf->bp_nodecard_cnt); */
 #ifdef HAVE_BG_FILES	
 	/* set that we are doing a small block */
 	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionSmall, 
@@ -358,7 +359,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 		      bg_err_str(rc));
 	}
 
-	num_ncards = bg_record->node_cnt/bluegene_nodecard_node_cnt;
+	num_ncards = bg_record->node_cnt/bg_conf->nodecard_node_cnt;
 	if(num_ncards < 1) {
 		num_ncards = 1;
 		sub_nodecard = 1;
@@ -367,11 +368,11 @@ extern int configure_small_block(bg_record_t *bg_record)
 
 	/* find out how many nodecards to get for each ionode */
 		
-	for(i = 0; i<bluegene_numpsets; i++) {
+	for(i = 0; i<bg_conf->numpsets; i++) {
 		if(bit_test(bg_record->ionode_bitmap, i)) {
-			if(bluegene_nc_ratio > 1) {
+			if(bg_conf->nc_ratio > 1) {
 				int j=0;
-				for(j=0; j<bluegene_nc_ratio; j++)
+				for(j=0; j<bg_conf->nc_ratio; j++)
 					use_nc[(int)nc_pos+j] = 1;
 			} else {
 				use_nc[(int)nc_pos] = 1;
@@ -379,7 +380,7 @@ extern int configure_small_block(bg_record_t *bg_record)
 					ionode_card = 1;
 			}
 		}
-		nc_pos += bluegene_nc_ratio;
+		nc_pos += bg_conf->nc_ratio;
 	}
 
 	if ((rc = bridge_set_data(bg_record->bg_block,
@@ -687,8 +688,8 @@ extern int configure_block_switches(bg_record_t * bg_record)
 		goto cleanup;
 	}
 #endif	
-	debug3("BP count %d", bg_record->bp_count);
-	debug3("switch count %d", bg_record->switch_count);
+	debug4("BP count %d", bg_record->bp_count);
+	debug4("switch count %d", bg_record->switch_count);
 
 	list_iterator_reset(itr);
 	while ((ba_node = list_next(itr))) {
@@ -700,13 +701,13 @@ extern int configure_block_switches(bg_record_t * bg_record)
 		}
 #endif
 		if(!ba_node->used) {
-			debug3("%c%c%c is a passthrough, "
+			debug4("%c%c%c is a passthrough, "
 			       "not including in request",
 			       alpha_num[ba_node->coord[X]], 
 			       alpha_num[ba_node->coord[Y]], 
 			       alpha_num[ba_node->coord[Z]]);
 		} else {
-			debug2("using node %c%c%c",
+			debug3("using node %c%c%c",
 			       alpha_num[ba_node->coord[X]], 
 			       alpha_num[ba_node->coord[Y]], 
 			       alpha_num[ba_node->coord[Z]]);
@@ -757,7 +758,7 @@ extern int configure_block_switches(bg_record_t * bg_record)
 			if(_add_switch_conns(coord_switch[i],
 					     &ba_node->axis_switch[i])
 			   == SLURM_SUCCESS) {
-				debug2("adding switch dim %d", i);
+				debug3("adding switch dim %d", i);
 				if (first_switch){
 					if ((rc = bridge_set_data(
 						     bg_record->bg_block,
diff --git a/src/plugins/select/bluegene/plugin/block_sys.c b/src/plugins/select/bluegene/plugin/block_sys.c
index 6abd01ef7fccca3db41df11fbd42abf120cc1d60..0df90170554c67c506ef253396ba3ca9e97749da 100755
--- a/src/plugins/select/bluegene/plugin/block_sys.c
+++ b/src/plugins/select/bluegene/plugin/block_sys.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  block_sys.c - component used for wiring up the blocks
  *
- *  $Id: block_sys.c 17162 2009-04-06 20:18:23Z da $
+ *  $Id: block_sys.c 17680 2009-06-02 21:27:50Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dan Phung <phung4@llnl.gov> and Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -103,7 +104,7 @@ static void _pre_allocate(bg_record_t *bg_record)
 {
 #ifdef HAVE_BG_FILES
 	int rc;
-	int send_psets=bluegene_numpsets;
+	int send_psets=bg_conf->numpsets;
 
 #ifdef HAVE_BGL
 	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionBlrtsImg,   
@@ -153,9 +154,9 @@ static void _pre_allocate(bg_record_t *bg_record)
 		error("bridge_set_data(RM_PartitionConnection)", 
 		      bg_err_str(rc));
 	
-	/* rc = bluegene_bp_node_cnt/bg_record->node_cnt; */
+	/* rc = bg_conf->bp_node_cnt/bg_record->node_cnt; */
 /* 	if(rc > 1) */
-/* 		send_psets = bluegene_numpsets/rc; */
+/* 		send_psets = bg_conf->numpsets/rc; */
 	
 	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionPsetsPerBP, 
 				  &send_psets)) != STATUS_OK)
@@ -163,7 +164,7 @@ static void _pre_allocate(bg_record_t *bg_record)
 		      bg_err_str(rc));
 	
 	if ((rc = bridge_set_data(bg_record->bg_block, RM_PartitionUserName, 
-				  bg_slurm_user_name)) 
+				  bg_conf->slurm_user_name)) 
 	    != STATUS_OK)
 		error("bridge_set_data(RM_PartitionUserName)", bg_err_str(rc));
 	
@@ -223,11 +224,11 @@ static int _post_allocate(bg_record_t *bg_record)
 
 		
 		bg_record->target_name = 
-			xstrdup(bg_slurm_user_name);
+			xstrdup(bg_conf->slurm_user_name);
 
 		xfree(bg_record->user_name);
 		bg_record->user_name = 
-			xstrdup(bg_slurm_user_name);
+			xstrdup(bg_conf->slurm_user_name);
 		
 
 		my_uid = uid_from_string(bg_record->user_name);
@@ -381,7 +382,7 @@ extern int configure_block(bg_record_t *bg_record)
 #endif
 	_pre_allocate(bg_record);
 
-	if(bg_record->cpu_cnt < procs_per_node)
+	if(bg_record->cpu_cnt < bg_conf->procs_per_bp)
 		configure_small_block(bg_record);
 	else
 		configure_block_switches(bg_record);
@@ -394,7 +395,7 @@ extern int configure_block(bg_record_t *bg_record)
 /*
  * Download from MMCS the initial BG block information
  */
-int read_bg_blocks()
+int read_bg_blocks(List curr_block_list)
 {
 	int rc = SLURM_SUCCESS;
 
@@ -485,7 +486,7 @@ int read_bg_blocks()
 		/* New BG Block record */		
 		
 		bg_record = xmalloc(sizeof(bg_record_t));
-		list_push(bg_curr_block_list, bg_record);
+		list_push(curr_block_list, bg_record);
 		
 		bg_record->bg_block_id = xstrdup(tmp_char);
 		free(tmp_char);
@@ -505,7 +506,7 @@ int read_bg_blocks()
 			goto clean_up;
 
 		bg_record->node_cnt = bp_cnt;
-		bg_record->cpu_cnt = bluegene_proc_ratio * bg_record->node_cnt;
+		bg_record->cpu_cnt = bg_conf->proc_ratio * bg_record->node_cnt;
 #endif
 		bg_record->job_running = NO_JOB_RUNNING;
 		
@@ -592,7 +593,7 @@ int read_bg_blocks()
 			}
 #ifdef HAVE_BGL
 			/* Translate nodecard count to ionode count */
-			if((io_cnt = nc_cnt * bluegene_io_ratio))
+			if((io_cnt = nc_cnt * bg_conf->io_ratio))
 				io_cnt--;
 
 			nc_id = 0;
@@ -600,9 +601,9 @@ int read_bg_blocks()
 				_find_nodecard(block_ptr, &nc_id);
 			
 			bg_record->node_cnt = 
-				nc_cnt * bluegene_nodecard_node_cnt;
+				nc_cnt * bg_conf->nodecard_node_cnt;
 			bg_record->cpu_cnt =
-				bluegene_proc_ratio * bg_record->node_cnt;
+				bg_conf->proc_ratio * bg_record->node_cnt;
 
 			if ((rc = bridge_get_data(ncard, 
 						  RM_NodeCardQuarter, 
@@ -610,11 +611,11 @@ int read_bg_blocks()
 				error("bridge_get_data(CardQuarter): %d",rc);
 				goto clean_up;
 			}
-			io_start *= bluegene_quarter_ionode_cnt;
-			io_start += bluegene_nodecard_ionode_cnt * (nc_id%4);
+			io_start *= bg_conf->quarter_ionode_cnt;
+			io_start += bg_conf->nodecard_ionode_cnt * (nc_id%4);
 #else
 			/* Translate nodecard count to ionode count */
-			if((io_cnt = nc_cnt * bluegene_io_ratio))
+			if((io_cnt = nc_cnt * bg_conf->io_ratio))
 				io_cnt--;
 
 			if ((rc = bridge_get_data(ncard, 
@@ -632,8 +633,8 @@ int read_bg_blocks()
 			*/
 			nc_id = atoi((char*)tmp_char+1);
 			free(tmp_char);
-			io_start = nc_id * bluegene_io_ratio;
-			if(bg_record->node_cnt < bluegene_nodecard_node_cnt) {
+			io_start = nc_id * bg_conf->io_ratio;
+			if(bg_record->node_cnt < bg_conf->nodecard_node_cnt) {
 				rm_ionode_t *ionode;
 
 				/* figure out the ionode we are using */
@@ -679,9 +680,9 @@ int read_bg_blocks()
 			       bg_record->ionodes);
 		} else {
 #ifdef HAVE_BGL
-			bg_record->cpu_cnt = procs_per_node 
+			bg_record->cpu_cnt = bg_conf->procs_per_bp 
 				* bg_record->bp_count;
-			bg_record->node_cnt =  bluegene_bp_node_cnt
+			bg_record->node_cnt =  bg_conf->bp_node_cnt
 				* bg_record->bp_count;
 #endif
 			if ((rc = bridge_get_data(block_ptr, 
@@ -697,7 +698,7 @@ int read_bg_blocks()
 			   node we don't want anything set we also
 			   don't want the bg_record->ionodes set.
 			*/
-			bg_record->ionode_bitmap = bit_alloc(bluegene_numpsets);
+			bg_record->ionode_bitmap = bit_alloc(bg_conf->numpsets);
 		}		
 		
 		bg_record->bg_block_list =
@@ -757,7 +758,7 @@ int read_bg_blocks()
 			snprintf(node_name_tmp, 
 				 sizeof(node_name_tmp),
 				 "%s%c%c%c", 
-				 bg_slurm_node_prefix,
+				 bg_conf->slurm_node_prefix,
 				 alpha_num[coord[X]], alpha_num[coord[Y]],
 				 alpha_num[coord[Z]]);
 			
@@ -803,10 +804,10 @@ int read_bg_blocks()
 		/* We can stop processing information now since we
 		   don't need to rest of the information to decide if
 		   this is the correct block. */
-		if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+		if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {
 			bg_record_t *tmp_record = xmalloc(sizeof(bg_record_t));
 			copy_bg_record(bg_record, tmp_record);
-			list_push(bg_list, tmp_record);
+			list_push(bg_lists->main, tmp_record);
 		}
 
 		if ((rc = bridge_get_data(block_ptr, RM_PartitionUsersNum,
@@ -818,9 +819,9 @@ int read_bg_blocks()
 			if(bp_cnt==0) {
 				
 				bg_record->user_name = 
-					xstrdup(bg_slurm_user_name);
+					xstrdup(bg_conf->slurm_user_name);
 				bg_record->target_name = 
-					xstrdup(bg_slurm_user_name);
+					xstrdup(bg_conf->slurm_user_name);
 				
 			} else {
 				user_name = NULL;
@@ -844,7 +845,7 @@ int read_bg_blocks()
 				if(!bg_record->boot_state) {
 					
 					bg_record->target_name = 
-						xstrdup(bg_slurm_user_name);
+						xstrdup(bg_conf->slurm_user_name);
 					
 				} else
 					bg_record->target_name = 
@@ -964,7 +965,7 @@ int read_bg_blocks()
 
 #endif
 
-extern int load_state_file(char *dir_name)
+extern int load_state_file(List curr_block_list, char *dir_name)
 {
 	int state_fd, i, j=0;
 	char *state_file = NULL;
@@ -995,7 +996,7 @@ extern int load_state_file(char *dir_name)
 		return SLURM_SUCCESS;
 	}
 
-	xassert(bg_curr_block_list);
+	xassert(curr_block_list);
 
 	state_file = xstrdup(dir_name);
 	xstrcat(state_file, "/block_state");
@@ -1064,13 +1065,13 @@ extern int load_state_file(char *dir_name)
 		 * everthing else should have been set up already */
 		if(bg_info_record->state == RM_PARTITION_ERROR) {
 			if((bg_record = find_bg_record_in_list(
-				    bg_curr_block_list,
+				    curr_block_list,
 				    bg_info_record->bg_block_id)))
 				/* put_block_in_error_state should be
-				   called after the bg_list has been
+				   called after the bg_lists->main has been
 				   made.  We can't call it here since
 				   this record isn't the record kept
-				   around in bg_list.
+				   around in bg_lists->main.
 				*/
 				bg_record->state = bg_info_record->state;
 		}
@@ -1100,7 +1101,7 @@ extern int load_state_file(char *dir_name)
 	removable_set_bps(non_usable_nodes);
 
 	node_bitmap = bit_alloc(node_record_count);	
-	ionode_bitmap = bit_alloc(bluegene_numpsets);	
+	ionode_bitmap = bit_alloc(bg_conf->numpsets);	
 	for (i=0; i<node_select_ptr->record_count; i++) {
 		bg_info_record = &(node_select_ptr->bg_info_array[i]);
 		
@@ -1125,10 +1126,10 @@ extern int load_state_file(char *dir_name)
 		j = 0;
 		while(bg_info_record->ionode_inx[j] >= 0) {
 			if (bg_info_record->ionode_inx[j+1]
-			    >= bluegene_numpsets) {
+			    >= bg_conf->numpsets) {
 				fatal("Job state recovered incompatable with "
 					"bluegene.conf. ionodes=%u state=%d",
-					bluegene_numpsets,
+					bg_conf->numpsets,
 					bg_info_record->ionode_inx[j+1]);
 			}
 			bit_nset(ionode_bitmap,
@@ -1146,22 +1147,22 @@ extern int load_state_file(char *dir_name)
 			xstrdup(bg_info_record->ionodes);
 		bg_record->ionode_bitmap = bit_copy(ionode_bitmap);
 		/* put_block_in_error_state should be
-		   called after the bg_list has been
+		   called after the bg_lists->main has been
 		   made.  We can't call it here since
 		   this record isn't the record kept
-		   around in bg_list.
+		   around in bg_lists->main.
 		*/
 		bg_record->state = bg_info_record->state;
 		bg_record->job_running = NO_JOB_RUNNING;
 
-		bg_record->bp_count = bit_size(node_bitmap);
+		bg_record->bp_count = bit_set_count(node_bitmap);
 		bg_record->node_cnt = bg_info_record->node_cnt;
-		if(bluegene_bp_node_cnt > bg_record->node_cnt) {
-			ionodes = bluegene_bp_node_cnt 
+		if(bg_conf->bp_node_cnt > bg_record->node_cnt) {
+			ionodes = bg_conf->bp_node_cnt 
 				/ bg_record->node_cnt;
-			bg_record->cpu_cnt = procs_per_node / ionodes;
+			bg_record->cpu_cnt = bg_conf->procs_per_bp / ionodes;
 		} else {
-			bg_record->cpu_cnt = procs_per_node
+			bg_record->cpu_cnt = bg_conf->procs_per_bp
 				* bg_record->bp_count;
 		}
 #ifdef HAVE_BGL
@@ -1172,8 +1173,8 @@ extern int load_state_file(char *dir_name)
 
 		process_nodes(bg_record, true);
 
-		bg_record->target_name = xstrdup(bg_slurm_user_name);
-		bg_record->user_name = xstrdup(bg_slurm_user_name);
+		bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
+		bg_record->user_name = xstrdup(bg_conf->slurm_user_name);
 			
 		my_uid = uid_from_string(bg_record->user_name);
 		if (my_uid == (uid_t) -1) {
@@ -1197,7 +1198,8 @@ extern int load_state_file(char *dir_name)
 		for(j=0; j<BA_SYSTEM_DIMENSIONS; j++) 
 			geo[j] = bg_record->geo[j];
 				
-		if(bluegene_layout_mode == LAYOUT_OVERLAP) {
+		if((bg_conf->layout_mode == LAYOUT_OVERLAP)
+		   || bg_record->full_block) {
 			reset_ba_system(false);
 			removable_set_bps(non_usable_nodes);
 		}
@@ -1218,7 +1220,7 @@ extern int load_state_file(char *dir_name)
 
 			
 		snprintf(temp, sizeof(temp), "%s%s",
-			 bg_slurm_node_prefix,
+			 bg_conf->slurm_node_prefix,
 			 name);
 		
 		xfree(name);
@@ -1237,11 +1239,11 @@ extern int load_state_file(char *dir_name)
 			
 		configure_block(bg_record);
 		blocks++;
-		list_push(bg_curr_block_list, bg_record);		
-		if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+		list_push(curr_block_list, bg_record);		
+		if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {
 			bg_record_t *tmp_record = xmalloc(sizeof(bg_record_t));
 			copy_bg_record(bg_record, tmp_record);
-			list_push(bg_list, tmp_record);
+			list_push(bg_lists->main, tmp_record);
 		}
 	}
 
@@ -1249,7 +1251,7 @@ extern int load_state_file(char *dir_name)
 	FREE_NULL_BITMAP(ionode_bitmap);
 	FREE_NULL_BITMAP(node_bitmap);
 
-	sort_bg_record_inc_size(bg_curr_block_list);
+	sort_bg_record_inc_size(curr_block_list);
 	slurm_mutex_unlock(&block_state_mutex);
 		
 	info("Recovered %d blocks", blocks);
diff --git a/src/plugins/select/bluegene/plugin/bluegene.c b/src/plugins/select/bluegene/plugin/bluegene.c
index 0378a615a75046e259cb8c06108ef45e90efed07..c7b64845e6d02ac30afd5edfab78259686794ea6 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.c
+++ b/src/plugins/select/bluegene/plugin/bluegene.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  bluegene.c - blue gene node configuration processing module. 
  *
- *  $Id: bluegene.c 17202 2009-04-09 16:56:23Z da $
+ *  $Id: bluegene.c 17605 2009-05-27 16:21:13Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <auble1@llnl.gov> et. al.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -46,47 +47,10 @@
 
 #define _DEBUG 0
 
-char* bg_conf = NULL;
-
 /* Global variables */
-List bg_list = NULL;			/* total list of bg_record entries */
-List bg_curr_block_list = NULL;  	/* current bg blocks in bluegene.conf*/
-List bg_job_block_list = NULL;  	/* jobs running in these blocks */
-List bg_booted_block_list = NULL;  	/* blocks that are booted */
-List bg_freeing_list = NULL;  	        /* blocks that being freed */
-
-#ifdef HAVE_BGL
-List bg_blrtsimage_list = NULL;
-#endif
-List bg_linuximage_list = NULL;
-List bg_mloaderimage_list = NULL;
-List bg_ramdiskimage_list = NULL;
-#ifdef HAVE_BGL
-char *default_blrtsimage = NULL;
-#endif
-List bg_valid_small32 = NULL;
-List bg_valid_small64 = NULL;
-List bg_valid_small128 = NULL;
-List bg_valid_small256 = NULL;
-char *default_linuximage = NULL;
-char *default_mloaderimage = NULL, *default_ramdiskimage = NULL;
-char *bridge_api_file = NULL; 
-char *bg_slurm_user_name = NULL;
-char *bg_slurm_node_prefix = NULL;
-bg_layout_t bluegene_layout_mode = NO_VAL;
-double bluegene_io_ratio = 0.0;
-double bluegene_nc_ratio = 0.0;
-uint32_t bluegene_smallest_block = 512;
-uint16_t bluegene_proc_ratio = 0;
-uint16_t bluegene_numpsets = 0;
-uint16_t bluegene_bp_node_cnt = 0;
-uint16_t bluegene_bp_nodecard_cnt = 0;
-uint16_t bluegene_quarter_node_cnt = 0;
-uint16_t bluegene_quarter_ionode_cnt = 0;
-uint16_t bluegene_nodecard_node_cnt = 0;
-uint16_t bluegene_nodecard_ionode_cnt = 0;
-uint16_t bridge_api_verb = 0;
 
+bg_config_t *bg_conf = NULL;
+bg_lists_t *bg_lists = NULL;
 bool agent_fini = false;
 time_t last_bg_update;
 pthread_mutex_t block_state_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -112,9 +76,14 @@ int max_dim[BA_SYSTEM_DIMENSIONS] = { 0 };
 #endif
 
 
+static void _destroy_bg_config(bg_config_t *bg_conf);
+static void _destroy_bg_lists(bg_lists_t *bg_lists);
+
 static void _set_bg_lists();
-static int  _validate_config_nodes(List *bg_found_block_list, char *dir);
-static int _delete_old_blocks(List bg_found_block_list);
+static int  _validate_config_nodes(List curr_block_list, 
+				   List found_block_list, char *dir);
+static int _delete_old_blocks(List curr_block_list, 
+			      List found_block_list);
 static char *_get_bg_conf(void);
 static int  _reopen_bridge_log(void);
 static void _destroy_bitmap(void *object);
@@ -122,6 +91,35 @@ static void _destroy_bitmap(void *object);
 /* Initialize all plugin variables */
 extern int init_bg(void)
 {
+	_set_bg_lists();
+
+	if(!bg_conf)
+		bg_conf = xmalloc(sizeof(bg_config_t));
+
+	xfree(bg_conf->slurm_user_name);
+	xfree(bg_conf->slurm_node_prefix);
+	slurm_conf_lock();
+	xassert(slurmctld_conf.slurm_user_name);
+	xassert(slurmctld_conf.node_prefix);
+	bg_conf->slurm_user_name = xstrdup(slurmctld_conf.slurm_user_name);
+	bg_conf->slurm_node_prefix = xstrdup(slurmctld_conf.node_prefix);
+	slurm_conf_unlock();	
+
+#ifdef HAVE_BGL
+	if(bg_conf->blrts_list)
+		list_destroy(bg_conf->blrts_list);
+	bg_conf->blrts_list = list_create(destroy_image);
+#endif
+	if(bg_conf->linux_list)
+		list_destroy(bg_conf->linux_list);
+	bg_conf->linux_list = list_create(destroy_image);
+	if(bg_conf->mloader_list)
+		list_destroy(bg_conf->mloader_list);
+	bg_conf->mloader_list = list_create(destroy_image);
+	if(bg_conf->ramdisk_list)
+		list_destroy(bg_conf->ramdisk_list);
+	bg_conf->ramdisk_list = list_create(destroy_image);	
+
 	ba_init(NULL);
 
 	info("BlueGene plugin loaded successfully");
@@ -144,74 +142,10 @@ extern void fini_bg(void)
 		pthread_cond_wait(&freed_cond, &freed_cnt_mutex);
 	if(destroy_cnt)
 		pthread_cond_wait(&destroy_cond, &freed_cnt_mutex);
-	
-	if (bg_list) {
-		list_destroy(bg_list);
-		bg_list = NULL;
-	}	
-	if (bg_curr_block_list) {
-		list_destroy(bg_curr_block_list);
-		bg_curr_block_list = NULL;
-	}	
-	if (bg_job_block_list) {
-		list_destroy(bg_job_block_list);
-		bg_job_block_list = NULL;
-		num_unused_cpus = 0;
-	}
-	if (bg_booted_block_list) {
-		list_destroy(bg_booted_block_list);
-		bg_booted_block_list = NULL;
-	}
-		
-#ifdef HAVE_BGL
-	if(bg_blrtsimage_list) {
-		list_destroy(bg_blrtsimage_list);
-		bg_blrtsimage_list = NULL;
-	}
-#endif	
-	if(bg_linuximage_list) {
-		list_destroy(bg_linuximage_list);
-		bg_linuximage_list = NULL;
-	}
-	
-	if(bg_mloaderimage_list) {
-		list_destroy(bg_mloaderimage_list);
-		bg_mloaderimage_list = NULL;
-	}
-
-	if(bg_ramdiskimage_list) {
-		list_destroy(bg_ramdiskimage_list);
-		bg_ramdiskimage_list = NULL;
-	}
-	
-	if(bg_valid_small32) {
-		list_destroy(bg_valid_small32);
-		bg_valid_small32 = NULL;
-	}
-	if(bg_valid_small64) {
-		list_destroy(bg_valid_small64);
-		bg_valid_small64 = NULL;
-	}
-	if(bg_valid_small128) {
-		list_destroy(bg_valid_small128);
-		bg_valid_small128 = NULL;
-	}
-	if(bg_valid_small256) {
-		list_destroy(bg_valid_small256);
-		bg_valid_small256 = NULL;
-	}
 
-#ifdef HAVE_BGL
-	xfree(default_blrtsimage);
-#endif
-	xfree(default_linuximage);
-	xfree(default_mloaderimage);
-	xfree(default_ramdiskimage);
-	xfree(bridge_api_file);
-	xfree(bg_conf);
-	xfree(bg_slurm_user_name);
-	xfree(bg_slurm_node_prefix);
-	
+	_destroy_bg_config(bg_conf);
+	_destroy_bg_lists(bg_lists);
+		
 	ba_fini();
 }
 
@@ -233,8 +167,8 @@ extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b)
 	if (!bit_overlap(rec_a->bitmap, rec_b->bitmap)) 
 		return false;
 
-	if((rec_a->node_cnt >= bluegene_bp_node_cnt)
-	   || (rec_b->node_cnt >= bluegene_bp_node_cnt))
+	if((rec_a->node_cnt >= bg_conf->bp_node_cnt)
+	   || (rec_b->node_cnt >= bg_conf->bp_node_cnt))
 		return true;
 	
 	if (!bit_overlap(rec_a->ionode_bitmap, rec_b->ionode_bitmap)) 
@@ -253,7 +187,7 @@ extern int remove_all_users(char *bg_block_id, char *user_name)
 
 	if ((rc = bridge_get_block(bg_block_id,  &block_ptr)) != STATUS_OK) {
 		if(rc == INCONSISTENT_DATA
-		   && bluegene_layout_mode == LAYOUT_DYNAMIC)
+		   && bg_conf->layout_mode == LAYOUT_DYNAMIC)
 			return REMOVE_USER_FOUND;
 			
 		error("bridge_get_block(%s): %s", 
@@ -299,7 +233,7 @@ extern int remove_all_users(char *bg_block_id, char *user_name)
 			error("No user was returned from database");
 			continue;
 		}
-		if(!strcmp(user, bg_slurm_user_name)) {
+		if(!strcmp(user, bg_conf->slurm_user_name)) {
 			free(user);
 			continue;
 		}
@@ -351,7 +285,7 @@ extern int set_block_user(bg_record_t *bg_record)
 		rc = SLURM_ERROR;
 	}	
 	xfree(bg_record->target_name);
-	bg_record->target_name = xstrdup(bg_slurm_user_name);
+	bg_record->target_name = xstrdup(bg_conf->slurm_user_name);
 
 	return rc;
 }
@@ -435,7 +369,7 @@ extern void *block_agent(void *args)
 					last_bg_update = now;
 				} else if(rc == -1)
 					error("Error with update_block_list");
-				if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+				if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {
 					if((rc = update_freeing_block_list())
 					   == 1) {
 						last_bg_update = now;
@@ -577,8 +511,8 @@ extern int bg_free_block(bg_record_t *bg_record)
 		if (bg_record->state != NO_VAL
 		    && bg_record->state != RM_PARTITION_FREE 
 		    && bg_record->state != RM_PARTITION_DEALLOCATING) {
+			debug2("bridge_destroy %s", bg_record->bg_block_id);
 #ifdef HAVE_BG_FILES
-			debug2("bridge_destroy %s",bg_record->bg_block_id);
 			
 			rc = bridge_destroy_block(bg_record->bg_block_id);
 			if (rc != STATUS_OK) {
@@ -615,7 +549,7 @@ extern int bg_free_block(bg_record_t *bg_record)
 		slurm_mutex_unlock(&block_state_mutex);			
 		sleep(3);
 	}
-	remove_from_bg_list(bg_booted_block_list, bg_record);
+	remove_from_bg_list(bg_lists->booted, bg_record);
 	slurm_mutex_unlock(&block_state_mutex);			
 		
 	return SLURM_SUCCESS;
@@ -675,9 +609,9 @@ extern void *mult_destroy_block(void *args)
 	int rc;
 #endif
 	slurm_mutex_lock(&freed_cnt_mutex);
-	if ((bg_freeing_list == NULL) 
-	    && ((bg_freeing_list = list_create(destroy_bg_record)) == NULL))
-		fatal("malloc failure in bg_freeing_list");
+	if ((bg_lists->freeing == NULL) 
+	    && ((bg_lists->freeing = list_create(destroy_bg_record)) == NULL))
+		fatal("malloc failure in bg_lists->freeing");
 	slurm_mutex_unlock(&freed_cnt_mutex);
 	
 	/*
@@ -695,15 +629,15 @@ extern void *mult_destroy_block(void *args)
 			continue;
 		}
 		slurm_mutex_lock(&block_state_mutex);
-		remove_from_bg_list(bg_list, bg_record);
-		list_push(bg_freeing_list, bg_record);
+		remove_from_bg_list(bg_lists->main, bg_record);
+		list_push(bg_lists->freeing, bg_record);
 		
 		/* 
 		 * we only are sorting this so when we send it to a
 		 * tool such as smap it will be in a nice order
 		 */
-		sort_bg_record_inc_size(bg_freeing_list);
-		if(remove_from_bg_list(bg_job_block_list, bg_record) 
+		sort_bg_record_inc_size(bg_lists->freeing);
+		if(remove_from_bg_list(bg_lists->job_running, bg_record) 
 		   == SLURM_SUCCESS) {
 			num_unused_cpus += bg_record->cpu_cnt;
 		}
@@ -719,7 +653,7 @@ extern void *mult_destroy_block(void *args)
 		}
 		debug2("done destroying");
 		slurm_mutex_lock(&block_state_mutex);
-		remove_from_bg_list(bg_freeing_list, bg_record);
+		remove_from_bg_list(bg_lists->freeing, bg_record);
 		slurm_mutex_unlock(&block_state_mutex);
 								
 #ifdef HAVE_BG_FILES
@@ -755,9 +689,9 @@ extern void *mult_destroy_block(void *args)
 	slurm_mutex_lock(&freed_cnt_mutex);
 	destroy_cnt--;
 	if(destroy_cnt == 0) {
-		if(bg_freeing_list) {
-			list_destroy(bg_freeing_list);
-			bg_freeing_list = NULL;
+		if(bg_lists->freeing) {
+			list_destroy(bg_lists->freeing);
+			bg_lists->freeing = NULL;
 		}
 		list_destroy(bg_destroy_block_list);
 		bg_destroy_block_list = NULL;
@@ -781,7 +715,7 @@ extern int free_block_list(List delete_list)
 		return SLURM_SUCCESS;
 
 	/* set up which list to push onto */
-	if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+	if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {
 		block_list = &bg_destroy_block_list;
 		count = &destroy_cnt;
 	} else {
@@ -799,12 +733,12 @@ extern int free_block_list(List delete_list)
 		/* push job onto queue in a FIFO */
 		debug3("adding %s to be freed", found_record->bg_block_id);
 		if(!block_ptr_exist_in_list(*block_list, found_record)) {
+			num_block_to_free++;
 			if (list_push(*block_list, found_record) == NULL)
 				fatal("malloc failure in _block_op/list_push");
 		} else {
 			error("we had block %s already on the freeing list",
 			      found_record->bg_block_id);
-			num_block_to_free--;
 			continue;
 		}
 		/* already running MAX_AGENTS we don't really need more 
@@ -820,7 +754,7 @@ extern int free_block_list(List delete_list)
 			    PTHREAD_CREATE_DETACHED))
 			error("pthread_attr_setdetachstate error %m");
 		retries = 0;
-		if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+		if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {
 			while (pthread_create(&thread_agent, 
 					      &attr_agent, 
 					      mult_destroy_block,
@@ -869,23 +803,25 @@ extern int read_bg_conf(void)
 	static time_t last_config_update = (time_t) 0;
 	struct stat config_stat;
 	ListIterator itr = NULL;
-	
+	char* bg_conf_file = NULL;
+
 	debug("Reading the bluegene.conf file");
 
 	/* check if config file has changed */
-	if (!bg_conf)
-		bg_conf = _get_bg_conf();
-	if (stat(bg_conf, &config_stat) < 0)
-		fatal("can't stat bluegene.conf file %s: %m", bg_conf);
+	bg_conf_file = _get_bg_conf();
+
+	if (stat(bg_conf_file, &config_stat) < 0)
+		fatal("can't stat bluegene.conf file %s: %m", bg_conf_file);
 	if (last_config_update) {
 		_reopen_bridge_log();
 		if(last_config_update == config_stat.st_mtime)
-			debug("%s unchanged", bg_conf);
+			debug("%s unchanged", bg_conf_file);
 		else {
 			info("Restart slurmctld for %s changes to take effect", 
-			     bg_conf);
+			     bg_conf_file);
 		}
 		last_config_update = config_stat.st_mtime; 
+		xfree(bg_conf_file);
 		return SLURM_SUCCESS;
 	}
 	last_config_update = config_stat.st_mtime; 
@@ -894,338 +830,351 @@ extern int read_bg_conf(void)
 	/* bg_conf defined in bg_node_alloc.h */
 	tbl = s_p_hashtbl_create(bg_conf_file_options);
 	
-	if(s_p_parse_file(tbl, bg_conf) == SLURM_ERROR)
+	if(s_p_parse_file(tbl, bg_conf_file) == SLURM_ERROR)
 		fatal("something wrong with opening/reading bluegene "
 		      "conf file");
+	xfree(bg_conf_file);
 	
-	_set_bg_lists();	
 #ifdef HAVE_BGL
 	if (s_p_get_array((void ***)&image_array, 
 			  &count, "AltBlrtsImage", tbl)) {
 		for (i = 0; i < count; i++) {
-			list_append(bg_blrtsimage_list, image_array[i]);
+			list_append(bg_conf->blrts_list, image_array[i]);
 			image_array[i] = NULL;
 		}
 	}
-	if (!s_p_get_string(&default_blrtsimage, "BlrtsImage", tbl)) {
-		if(!list_count(bg_blrtsimage_list))
+	if (!s_p_get_string(&bg_conf->default_blrtsimage, "BlrtsImage", tbl)) {
+		if(!list_count(bg_conf->blrts_list))
 			fatal("BlrtsImage not configured "
 			      "in bluegene.conf");
-		itr = list_iterator_create(bg_blrtsimage_list);
+		itr = list_iterator_create(bg_conf->blrts_list);
 		image = list_next(itr);
 		image->def = true;
 		list_iterator_destroy(itr);
-		default_blrtsimage = xstrdup(image->name);
+		bg_conf->default_blrtsimage = xstrdup(image->name);
 		info("Warning: using %s as the default BlrtsImage.  "
 		     "If this isn't correct please set BlrtsImage",
-		     default_blrtsimage); 
+		     bg_conf->default_blrtsimage); 
 	} else {
-		debug3("default BlrtsImage %s", default_blrtsimage);
+		debug3("default BlrtsImage %s", bg_conf->default_blrtsimage);
 		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(default_blrtsimage);
+		image->name = xstrdup(bg_conf->default_blrtsimage);
 		image->def = true;
 		image->groups = NULL;
 		/* we want it to be first */
-		list_push(bg_blrtsimage_list, image);
+		list_push(bg_conf->blrts_list, image);
 	}
 		
 	if (s_p_get_array((void ***)&image_array, 
 			  &count, "AltLinuxImage", tbl)) {
 		for (i = 0; i < count; i++) {
-			list_append(bg_linuximage_list, image_array[i]);
+			list_append(bg_conf->linux_list, image_array[i]);
 			image_array[i] = NULL;
 		}
 	}
-	if (!s_p_get_string(&default_linuximage, "LinuxImage", tbl)) {
-		if(!list_count(bg_linuximage_list))
+	if (!s_p_get_string(&bg_conf->default_linuximage, "LinuxImage", tbl)) {
+		if(!list_count(bg_conf->linux_list))
 			fatal("LinuxImage not configured "
 			      "in bluegene.conf");
-		itr = list_iterator_create(bg_linuximage_list);
+		itr = list_iterator_create(bg_conf->linux_list);
 		image = list_next(itr);
 		image->def = true;
 		list_iterator_destroy(itr);
-		default_linuximage = xstrdup(image->name);
+		bg_conf->default_linuximage = xstrdup(image->name);
 		info("Warning: using %s as the default LinuxImage.  "
 		     "If this isn't correct please set LinuxImage",
-		     default_linuximage); 
+		     bg_conf->default_linuximage); 
 	} else {
-		debug3("default LinuxImage %s", default_linuximage);
+		debug3("default LinuxImage %s", bg_conf->default_linuximage);
 		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(default_linuximage);
+		image->name = xstrdup(bg_conf->default_linuximage);
 		image->def = true;
 		image->groups = NULL;
 		/* we want it to be first */
-		list_push(bg_linuximage_list, image);		
+		list_push(bg_conf->linux_list, image);		
 	}
 
 	if (s_p_get_array((void ***)&image_array, 
 			  &count, "AltRamDiskImage", tbl)) {
 		for (i = 0; i < count; i++) {
-			list_append(bg_ramdiskimage_list, image_array[i]);
+			list_append(bg_conf->ramdisk_list, image_array[i]);
 			image_array[i] = NULL;
 		}
 	}
-	if (!s_p_get_string(&default_ramdiskimage,
+	if (!s_p_get_string(&bg_conf->default_ramdiskimage,
 			    "RamDiskImage", tbl)) {
-		if(!list_count(bg_ramdiskimage_list))
+		if(!list_count(bg_conf->ramdisk_list))
 			fatal("RamDiskImage not configured "
 			      "in bluegene.conf");
-		itr = list_iterator_create(bg_ramdiskimage_list);
+		itr = list_iterator_create(bg_conf->ramdisk_list);
 		image = list_next(itr);
 		image->def = true;
 		list_iterator_destroy(itr);
-		default_ramdiskimage = xstrdup(image->name);
+		bg_conf->default_ramdiskimage = xstrdup(image->name);
 		info("Warning: using %s as the default RamDiskImage.  "
 		     "If this isn't correct please set RamDiskImage",
-		     default_ramdiskimage); 
+		     bg_conf->default_ramdiskimage); 
 	} else {
-		debug3("default RamDiskImage %s", default_ramdiskimage);
+		debug3("default RamDiskImage %s",
+		       bg_conf->default_ramdiskimage);
 		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(default_ramdiskimage);
+		image->name = xstrdup(bg_conf->default_ramdiskimage);
 		image->def = true;
 		image->groups = NULL;
 		/* we want it to be first */
-		list_push(bg_ramdiskimage_list, image);		
+		list_push(bg_conf->ramdisk_list, image);		
 	}
 #else
 
 	if (s_p_get_array((void ***)&image_array, 
 			  &count, "AltCnloadImage", tbl)) {
 		for (i = 0; i < count; i++) {
-			list_append(bg_linuximage_list, image_array[i]);
+			list_append(bg_conf->linux_list, image_array[i]);
 			image_array[i] = NULL;
 		}
 	}
-	if (!s_p_get_string(&default_linuximage, "CnloadImage", tbl)) {
-		if(!list_count(bg_linuximage_list))
+	if (!s_p_get_string(&bg_conf->default_linuximage, "CnloadImage", tbl)) {
+		if(!list_count(bg_conf->linux_list))
 			fatal("CnloadImage not configured "
 			      "in bluegene.conf");
-		itr = list_iterator_create(bg_linuximage_list);
+		itr = list_iterator_create(bg_conf->linux_list);
 		image = list_next(itr);
 		image->def = true;
 		list_iterator_destroy(itr);
-		default_linuximage = xstrdup(image->name);
+		bg_conf->default_linuximage = xstrdup(image->name);
 		info("Warning: using %s as the default CnloadImage.  "
 		     "If this isn't correct please set CnloadImage",
-		     default_linuximage); 
+		     bg_conf->default_linuximage); 
 	} else {
-		debug3("default CnloadImage %s", default_linuximage);
+		debug3("default CnloadImage %s", bg_conf->default_linuximage);
 		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(default_linuximage);
+		image->name = xstrdup(bg_conf->default_linuximage);
 		image->def = true;
 		image->groups = NULL;
 		/* we want it to be first */
-		list_push(bg_linuximage_list, image);		
+		list_push(bg_conf->linux_list, image);		
 	}
 
 	if (s_p_get_array((void ***)&image_array, 
 			  &count, "AltIoloadImage", tbl)) {
 		for (i = 0; i < count; i++) {
-			list_append(bg_ramdiskimage_list, image_array[i]);
+			list_append(bg_conf->ramdisk_list, image_array[i]);
 			image_array[i] = NULL;
 		}
 	}
-	if (!s_p_get_string(&default_ramdiskimage,
+	if (!s_p_get_string(&bg_conf->default_ramdiskimage,
 			    "IoloadImage", tbl)) {
-		if(!list_count(bg_ramdiskimage_list))
+		if(!list_count(bg_conf->ramdisk_list))
 			fatal("IoloadImage not configured "
 			      "in bluegene.conf");
-		itr = list_iterator_create(bg_ramdiskimage_list);
+		itr = list_iterator_create(bg_conf->ramdisk_list);
 		image = list_next(itr);
 		image->def = true;
 		list_iterator_destroy(itr);
-		default_ramdiskimage = xstrdup(image->name);
+		bg_conf->default_ramdiskimage = xstrdup(image->name);
 		info("Warning: using %s as the default IoloadImage.  "
 		     "If this isn't correct please set IoloadImage",
-		     default_ramdiskimage); 
+		     bg_conf->default_ramdiskimage); 
 	} else {
-		debug3("default IoloadImage %s", default_ramdiskimage);
+		debug3("default IoloadImage %s", bg_conf->default_ramdiskimage);
 		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(default_ramdiskimage);
+		image->name = xstrdup(bg_conf->default_ramdiskimage);
 		image->def = true;
 		image->groups = NULL;
 		/* we want it to be first */
-		list_push(bg_ramdiskimage_list, image);		
+		list_push(bg_conf->ramdisk_list, image);		
 	}
 
 #endif
 	if (s_p_get_array((void ***)&image_array, 
 			  &count, "AltMloaderImage", tbl)) {
 		for (i = 0; i < count; i++) {
-			list_append(bg_mloaderimage_list, image_array[i]);
+			list_append(bg_conf->mloader_list, image_array[i]);
 			image_array[i] = NULL;
 		}
 	}
-	if (!s_p_get_string(&default_mloaderimage,
+	if (!s_p_get_string(&bg_conf->default_mloaderimage,
 			    "MloaderImage", tbl)) {
-		if(!list_count(bg_mloaderimage_list))
+		if(!list_count(bg_conf->mloader_list))
 			fatal("MloaderImage not configured "
 			      "in bluegene.conf");
-		itr = list_iterator_create(bg_mloaderimage_list);
+		itr = list_iterator_create(bg_conf->mloader_list);
 		image = list_next(itr);
 		image->def = true;
 		list_iterator_destroy(itr);
-		default_mloaderimage = xstrdup(image->name);
+		bg_conf->default_mloaderimage = xstrdup(image->name);
 		info("Warning: using %s as the default MloaderImage.  "
 		     "If this isn't correct please set MloaderImage",
-		     default_mloaderimage); 
+		     bg_conf->default_mloaderimage); 
 	} else {
-		debug3("default MloaderImage %s", default_mloaderimage);
+		debug3("default MloaderImage %s",
+		       bg_conf->default_mloaderimage);
 		image = xmalloc(sizeof(image_t));
-		image->name = xstrdup(default_mloaderimage);
+		image->name = xstrdup(bg_conf->default_mloaderimage);
 		image->def = true;
 		image->groups = NULL;
 		/* we want it to be first */
-		list_push(bg_mloaderimage_list, image);		
+		list_push(bg_conf->mloader_list, image);		
 	}
 
 	if (!s_p_get_uint16(
-		    &bluegene_bp_node_cnt, "BasePartitionNodeCnt", tbl)) {
+		    &bg_conf->bp_node_cnt, "BasePartitionNodeCnt", tbl)) {
 		error("BasePartitionNodeCnt not configured in bluegene.conf "
 		      "defaulting to 512 as BasePartitionNodeCnt");
-		bluegene_bp_node_cnt = 512;
-		bluegene_quarter_node_cnt = 128;
+		bg_conf->bp_node_cnt = 512;
+		bg_conf->quarter_node_cnt = 128;
 	} else {
-		if(bluegene_bp_node_cnt<=0)
+		if(bg_conf->bp_node_cnt <= 0)
 			fatal("You should have more than 0 nodes "
 			      "per base partition");
 
-		bluegene_quarter_node_cnt = bluegene_bp_node_cnt/4;
+		bg_conf->quarter_node_cnt = bg_conf->bp_node_cnt/4;
 	}
-
-	/* select_p_node_init needs to be called before this to set
-	   this up correctly
-	*/
-	bluegene_proc_ratio = procs_per_node/bluegene_bp_node_cnt;
-	if(!bluegene_proc_ratio)
+	/* bg_conf->procs_per_bp should had already been set from the
+	 * node_init */
+	if(bg_conf->procs_per_bp < bg_conf->bp_node_cnt) {
+		fatal("For some reason we have only %u procs per bp, but "
+		      "have %u cnodes per bp.  You need at least the same "
+		      "number of procs as you have cnodes per bp.  "
+		      "Check the NodeName Procs= "
+		      "definition in the slurm.conf.", 
+		      bg_conf->procs_per_bp, bg_conf->bp_node_cnt); 
+	}
+	
+	bg_conf->proc_ratio = bg_conf->procs_per_bp/bg_conf->bp_node_cnt;
+	if(!bg_conf->proc_ratio)
 		fatal("We appear to have less than 1 proc on a cnode.  "
 		      "You specified %u for BasePartitionNodeCnt "
 		      "in the blugene.conf and %u procs "
 		      "for each node in the slurm.conf",
-		      bluegene_bp_node_cnt, procs_per_node);
+		      bg_conf->bp_node_cnt, bg_conf->procs_per_bp);
+	num_unused_cpus = 
+		DIM_SIZE[X] * DIM_SIZE[Y] * DIM_SIZE[Z] 
+		* bg_conf->procs_per_bp;
 
 	if (!s_p_get_uint16(
-		    &bluegene_nodecard_node_cnt, "NodeCardNodeCnt", tbl)) {
+		    &bg_conf->nodecard_node_cnt, "NodeCardNodeCnt", tbl)) {
 		error("NodeCardNodeCnt not configured in bluegene.conf "
 		      "defaulting to 32 as NodeCardNodeCnt");
-		bluegene_nodecard_node_cnt = 32;
+		bg_conf->nodecard_node_cnt = 32;
 	}
 	
-	if(bluegene_nodecard_node_cnt<=0)
+	if(bg_conf->nodecard_node_cnt<=0)
 		fatal("You should have more than 0 nodes per nodecard");
 
-	bluegene_bp_nodecard_cnt = 
-		bluegene_bp_node_cnt / bluegene_nodecard_node_cnt;
+	bg_conf->bp_nodecard_cnt = 
+		bg_conf->bp_node_cnt / bg_conf->nodecard_node_cnt;
 
-	if (!s_p_get_uint16(&bluegene_numpsets, "Numpsets", tbl))
+	if (!s_p_get_uint16(&bg_conf->numpsets, "Numpsets", tbl))
 		fatal("Warning: Numpsets not configured in bluegene.conf");
 
-	if(bluegene_numpsets) {
+	if(bg_conf->numpsets) {
 		bitstr_t *tmp_bitmap = NULL;
 		int small_size = 1;
 
 		/* THIS IS A HACK TO MAKE A 1 NODECARD SYSTEM WORK */
-		if(bluegene_bp_node_cnt == bluegene_nodecard_node_cnt) {
-			bluegene_quarter_ionode_cnt = 2;
-			bluegene_nodecard_ionode_cnt = 2;
+		if(bg_conf->bp_node_cnt == bg_conf->nodecard_node_cnt) {
+			bg_conf->quarter_ionode_cnt = 2;
+			bg_conf->nodecard_ionode_cnt = 2;
 		} else {
-			bluegene_quarter_ionode_cnt = bluegene_numpsets/4;
-			bluegene_nodecard_ionode_cnt =
-				bluegene_quarter_ionode_cnt/4;
+			bg_conf->quarter_ionode_cnt = bg_conf->numpsets/4;
+			bg_conf->nodecard_ionode_cnt =
+				bg_conf->quarter_ionode_cnt/4;
 		}
 			
 		/* How many nodecards per ionode */
-		bluegene_nc_ratio = 
-			((double)bluegene_bp_node_cnt 
-			 / (double)bluegene_nodecard_node_cnt) 
-			/ (double)bluegene_numpsets;
+		bg_conf->nc_ratio = 
+			((double)bg_conf->bp_node_cnt 
+			 / (double)bg_conf->nodecard_node_cnt) 
+			/ (double)bg_conf->numpsets;
 		/* How many ionodes per nodecard */
-		bluegene_io_ratio = 
-			(double)bluegene_numpsets /
-			((double)bluegene_bp_node_cnt 
-			 / (double)bluegene_nodecard_node_cnt);
-		//info("got %f %f", bluegene_nc_ratio, bluegene_io_ratio);
+		bg_conf->io_ratio = 
+			(double)bg_conf->numpsets /
+			((double)bg_conf->bp_node_cnt 
+			 / (double)bg_conf->nodecard_node_cnt);
+		//info("got %f %f", bg_conf->nc_ratio, bg_conf->io_ratio);
 		/* figure out the smallest block we can have on the
 		   system */
 #ifdef HAVE_BGL
-		if(bluegene_io_ratio >= 2)
-			bluegene_smallest_block=32;
+		if(bg_conf->io_ratio >= 1)
+			bg_conf->smallest_block=32;
 		else
-			bluegene_smallest_block=128;
+			bg_conf->smallest_block=128;
 #else
-		if(bluegene_io_ratio >= 2)
-			bluegene_smallest_block=16;
-		else if(bluegene_io_ratio == 1)
-			bluegene_smallest_block=32;
-		else if(bluegene_io_ratio == .5)
-			bluegene_smallest_block=64;
-		else if(bluegene_io_ratio == .25)
-			bluegene_smallest_block=128;
-		else if(bluegene_io_ratio == .125)
-			bluegene_smallest_block=256;
+		if(bg_conf->io_ratio >= 2)
+			bg_conf->smallest_block=16;
+		else if(bg_conf->io_ratio == 1)
+			bg_conf->smallest_block=32;
+		else if(bg_conf->io_ratio == .5)
+			bg_conf->smallest_block=64;
+		else if(bg_conf->io_ratio == .25)
+			bg_conf->smallest_block=128;
+		else if(bg_conf->io_ratio == .125)
+			bg_conf->smallest_block=256;
 		else {
 			error("unknown ioratio %f.  Can't figure out "
 			      "smallest block size, setting it to midplane");
-			bluegene_smallest_block=512;
+			bg_conf->smallest_block=512;
 		}
 #endif
 		debug("Smallest block possible on this system is %u",
-		      bluegene_smallest_block);
+		      bg_conf->smallest_block);
 		/* below we are creating all the possible bitmaps for
 		 * each size of small block
 		 */
-		if((int)bluegene_nodecard_ionode_cnt < 1) {
-			bluegene_nodecard_ionode_cnt = 0;
+		if((int)bg_conf->nodecard_ionode_cnt < 1) {
+			bg_conf->nodecard_ionode_cnt = 0;
 		} else {
-			bg_valid_small32 = list_create(_destroy_bitmap);
-			if((small_size = bluegene_nodecard_ionode_cnt))
+			bg_lists->valid_small32 = list_create(_destroy_bitmap);
+			if((small_size = bg_conf->nodecard_ionode_cnt))
 				small_size--;
 			i = 0;
-			while(i<bluegene_numpsets) {
-				tmp_bitmap = bit_alloc(bluegene_numpsets);
+			while(i<bg_conf->numpsets) {
+				tmp_bitmap = bit_alloc(bg_conf->numpsets);
 				bit_nset(tmp_bitmap, i, i+small_size);
 				i += small_size+1;
-				list_append(bg_valid_small32, tmp_bitmap);
+				list_append(bg_lists->valid_small32,
+					    tmp_bitmap);
 			}
 		}
 		/* If we only have 1 nodecard just jump to the end
 		   since this will never need to happen below.
 		   Pretty much a hack to avoid seg fault;). */
-		if(bluegene_bp_node_cnt == bluegene_nodecard_node_cnt) 
+		if(bg_conf->bp_node_cnt == bg_conf->nodecard_node_cnt) 
 			goto no_calc;
 
-		bg_valid_small128 = list_create(_destroy_bitmap);
-		if((small_size = bluegene_quarter_ionode_cnt))
+		bg_lists->valid_small128 = list_create(_destroy_bitmap);
+		if((small_size = bg_conf->quarter_ionode_cnt))
 			small_size--;
 		i = 0;
-		while(i<bluegene_numpsets) {
-			tmp_bitmap = bit_alloc(bluegene_numpsets);
+		while(i<bg_conf->numpsets) {
+			tmp_bitmap = bit_alloc(bg_conf->numpsets);
 			bit_nset(tmp_bitmap, i, i+small_size);
 			i += small_size+1;
-			list_append(bg_valid_small128, tmp_bitmap);
+			list_append(bg_lists->valid_small128, tmp_bitmap);
 		}
 
 #ifndef HAVE_BGL
-		bg_valid_small64 = list_create(_destroy_bitmap);
-		if((small_size = bluegene_nodecard_ionode_cnt * 2))
+		bg_lists->valid_small64 = list_create(_destroy_bitmap);
+		if((small_size = bg_conf->nodecard_ionode_cnt * 2))
 			small_size--;
 		i = 0;
-		while(i<bluegene_numpsets) {
-			tmp_bitmap = bit_alloc(bluegene_numpsets);
+		while(i<bg_conf->numpsets) {
+			tmp_bitmap = bit_alloc(bg_conf->numpsets);
 			bit_nset(tmp_bitmap, i, i+small_size);
 			i += small_size+1;
-			list_append(bg_valid_small64, tmp_bitmap);
+			list_append(bg_lists->valid_small64, tmp_bitmap);
 		}
 
-		bg_valid_small256 = list_create(_destroy_bitmap);
-		if((small_size = bluegene_quarter_ionode_cnt * 2))
+		bg_lists->valid_small256 = list_create(_destroy_bitmap);
+		if((small_size = bg_conf->quarter_ionode_cnt * 2))
 			small_size--;
 		i = 0;
-		while(i<bluegene_numpsets) {
-			tmp_bitmap = bit_alloc(bluegene_numpsets);
+		while(i<bg_conf->numpsets) {
+			tmp_bitmap = bit_alloc(bg_conf->numpsets);
 			bit_nset(tmp_bitmap, i, i+small_size);
 			i += small_size+1;
-			list_append(bg_valid_small256, tmp_bitmap);
+			list_append(bg_lists->valid_small256, tmp_bitmap);
 		}
 #endif			
 	} else {
@@ -1234,10 +1183,11 @@ extern int read_bg_conf(void)
 
 no_calc:
 
-	if (!s_p_get_uint16(&bridge_api_verb, "BridgeAPIVerbose", tbl))
+	if (!s_p_get_uint16(&bg_conf->bridge_api_verb, "BridgeAPIVerbose", tbl))
 		info("Warning: BridgeAPIVerbose not configured "
 		     "in bluegene.conf");
-	if (!s_p_get_string(&bridge_api_file, "BridgeAPILogFile", tbl)) 
+	if (!s_p_get_string(&bg_conf->bridge_api_file,
+			    "BridgeAPILogFile", tbl)) 
 		info("BridgeAPILogFile not configured in bluegene.conf");
 	else
 		_reopen_bridge_log();
@@ -1251,21 +1201,21 @@ no_calc:
 			ba_deny_pass |= PASS_DENY_Z;
 		if(!strcasecmp(layout, "ALL")) 
 			ba_deny_pass |= PASS_DENY_ALL;
-		
+		bg_conf->deny_pass = ba_deny_pass;
 		xfree(layout);
 	}
 
 	if (!s_p_get_string(&layout, "LayoutMode", tbl)) {
 		info("Warning: LayoutMode was not specified in bluegene.conf "
 		     "defaulting to STATIC partitioning");
-		bluegene_layout_mode = LAYOUT_STATIC;
+		bg_conf->layout_mode = LAYOUT_STATIC;
 	} else {
 		if(!strcasecmp(layout,"STATIC")) 
-			bluegene_layout_mode = LAYOUT_STATIC;
+			bg_conf->layout_mode = LAYOUT_STATIC;
 		else if(!strcasecmp(layout,"OVERLAP")) 
-			bluegene_layout_mode = LAYOUT_OVERLAP;
+			bg_conf->layout_mode = LAYOUT_OVERLAP;
 		else if(!strcasecmp(layout,"DYNAMIC")) 
-			bluegene_layout_mode = LAYOUT_DYNAMIC;
+			bg_conf->layout_mode = LAYOUT_DYNAMIC;
 		else {
 			fatal("I don't understand this LayoutMode = %s", 
 			      layout);
@@ -1274,7 +1224,7 @@ no_calc:
 	}
 
 	/* add blocks defined in file */
-	if(bluegene_layout_mode != LAYOUT_DYNAMIC) {
+	if(bg_conf->layout_mode != LAYOUT_DYNAMIC) {
 		if (!s_p_get_array((void ***)&blockreq_array, 
 				   &count, "BPs", tbl)) {
 			info("WARNING: no blocks defined in bluegene.conf, "
@@ -1283,7 +1233,8 @@ no_calc:
 		}
 		
 		for (i = 0; i < count; i++) {
-			add_bg_record(bg_list, NULL, blockreq_array[i], 0, 0);
+			add_bg_record(bg_lists->main, NULL,
+				      blockreq_array[i], 0, 0);
 		}
 	}
 	s_p_hashtbl_destroy(tbl);
@@ -1294,7 +1245,8 @@ no_calc:
 extern int validate_current_blocks(char *dir)
 {
 	/* found bg blocks already on system */
-	List bg_found_block_list = NULL;
+	List curr_block_list = NULL;
+	List found_block_list = NULL;
 	static time_t last_config_update = (time_t) 0;
 	ListIterator itr = NULL;
 	bg_record_t *bg_record = NULL;
@@ -1304,20 +1256,22 @@ extern int validate_current_blocks(char *dir)
 		return SLURM_SUCCESS;
 
 	last_config_update = time(NULL);
-	bg_found_block_list = list_create(NULL);
+	curr_block_list = list_create(destroy_bg_record);
+	found_block_list = list_create(NULL);
 //#if 0	
 	/* Check to see if the configs we have are correct */
-	if (_validate_config_nodes(&bg_found_block_list, dir) == SLURM_ERROR) { 
-		_delete_old_blocks(bg_found_block_list);
+	if (_validate_config_nodes(curr_block_list, found_block_list, dir)
+	    == SLURM_ERROR) { 
+		_delete_old_blocks(curr_block_list, found_block_list);
 	}
 //#endif
 	/* looking for blocks only I created */
-	if(bluegene_layout_mode == LAYOUT_DYNAMIC) {
+	if(bg_conf->layout_mode == LAYOUT_DYNAMIC) {
 		init_wires();
 		info("No blocks created until jobs are submitted");
 	} else {
-		if (create_defined_blocks(bluegene_layout_mode,
-					  bg_found_block_list) 
+		if (create_defined_blocks(bg_conf->layout_mode,
+					  found_block_list) 
 		    == SLURM_ERROR) {
 			/* error in creating the static blocks, so
 			 * blocks referenced by submitted jobs won't
@@ -1328,81 +1282,150 @@ extern int validate_current_blocks(char *dir)
 		}
 	} 
 	
-	/* ok now since bg_list has been made we now can put blocks in
+	/* ok now since bg_lists->main has been made we now can put blocks in
 	   an error state this needs to be done outside of a lock
 	   it doesn't matter much in the first place though since
 	   no threads are started before this function. */
-	itr = list_iterator_create(bg_list);
+	itr = list_iterator_create(bg_lists->main);
 	while((bg_record = list_next(itr))) {
 		if(bg_record->state == RM_PARTITION_ERROR) 
 			put_block_in_error_state(bg_record, BLOCK_ERROR_STATE);
 	}
 	list_iterator_destroy(itr);
 
-	slurm_mutex_lock(&block_state_mutex);
-	list_destroy(bg_curr_block_list);
-	bg_curr_block_list = NULL;
-	if(bg_found_block_list) {
-		list_destroy(bg_found_block_list);
-		bg_found_block_list = NULL;
-	}
+	list_destroy(curr_block_list);
+	curr_block_list = NULL;
+	list_destroy(found_block_list);
+	found_block_list = NULL;
 
+	slurm_mutex_lock(&block_state_mutex);
 	last_bg_update = time(NULL);
 	blocks_are_created = 1;
-	sort_bg_record_inc_size(bg_list);
+	sort_bg_record_inc_size(bg_lists->main);
 	slurm_mutex_unlock(&block_state_mutex);
 	debug("Blocks have finished being created.");
 	return SLURM_SUCCESS;
 }
 
+static void _destroy_bg_config(bg_config_t *bg_conf)
+{
+	if(bg_conf) {
+#ifdef HAVE_BGL
+		if(bg_conf->blrts_list) {
+			list_destroy(bg_conf->blrts_list);
+			bg_conf->blrts_list = NULL;
+		}
+		xfree(bg_conf->default_blrtsimage);
+#endif	
+		xfree(bg_conf->bridge_api_file);
+		xfree(bg_conf->default_linuximage);
+		xfree(bg_conf->default_mloaderimage);
+		xfree(bg_conf->default_ramdiskimage);
+		if(bg_conf->linux_list) {
+			list_destroy(bg_conf->linux_list);
+			bg_conf->linux_list = NULL;
+		}
+	
+		if(bg_conf->mloader_list) {
+			list_destroy(bg_conf->mloader_list);
+			bg_conf->mloader_list = NULL;
+		}
+
+		if(bg_conf->ramdisk_list) {
+			list_destroy(bg_conf->ramdisk_list);
+			bg_conf->ramdisk_list = NULL;
+		}
+		xfree(bg_conf->slurm_user_name);
+		xfree(bg_conf->slurm_node_prefix);
+		xfree(bg_conf);
+	}
+}
+
+static void _destroy_bg_lists(bg_lists_t *bg_lists)
+{
+	if(bg_lists) {
+		if (bg_lists->booted) {
+			list_destroy(bg_lists->booted);
+			bg_lists->booted = NULL;
+		}
+
+		if (bg_lists->freeing) {
+			list_destroy(bg_lists->freeing);
+			bg_lists->freeing = NULL;
+		}	
+
+		if (bg_lists->job_running) {
+			list_destroy(bg_lists->job_running);
+			bg_lists->job_running = NULL;
+			num_unused_cpus = 0;
+		}
+
+		if (bg_lists->main) {
+			list_destroy(bg_lists->main);
+			bg_lists->main = NULL;
+		}	
+
+		if(bg_lists->valid_small32) {
+			list_destroy(bg_lists->valid_small32);
+			bg_lists->valid_small32 = NULL;
+		}
+		if(bg_lists->valid_small64) {
+			list_destroy(bg_lists->valid_small64);
+			bg_lists->valid_small64 = NULL;
+		}
+		if(bg_lists->valid_small128) {
+			list_destroy(bg_lists->valid_small128);
+			bg_lists->valid_small128 = NULL;
+		}
+		if(bg_lists->valid_small256) {
+			list_destroy(bg_lists->valid_small256);
+			bg_lists->valid_small256 = NULL;
+		}
+
+		xfree(bg_lists);
+	}
+}
 
 static void _set_bg_lists()
 {
+	if(!bg_lists)
+		bg_lists = xmalloc(sizeof(bg_lists_t));
+
 	slurm_mutex_lock(&block_state_mutex);
-	if(bg_booted_block_list) 
-		list_destroy(bg_booted_block_list);
-	bg_booted_block_list = list_create(NULL);
-	if(bg_job_block_list) 
-		list_destroy(bg_job_block_list);
-	bg_job_block_list = list_create(NULL);	
-	num_unused_cpus = 
-		DIM_SIZE[X] * DIM_SIZE[Y] * DIM_SIZE[Z] * procs_per_node;
-	if(bg_curr_block_list)
-		list_destroy(bg_curr_block_list);	
-	bg_curr_block_list = list_create(destroy_bg_record);
-	
-	if(bg_list) 
-		list_destroy(bg_list);
-	bg_list = list_create(destroy_bg_record);
+
+	if(bg_lists->booted) 
+		list_destroy(bg_lists->booted);
+	bg_lists->booted = list_create(NULL);
+
+	if(bg_lists->job_running) 
+		list_destroy(bg_lists->job_running);
+	bg_lists->job_running = list_create(NULL);	
+
+	if(bg_lists->main) 
+		list_destroy(bg_lists->main);
+
+	bg_lists->main = list_create(destroy_bg_record);
 
 	slurm_mutex_unlock(&block_state_mutex);	
 	
-#ifdef HAVE_BGL
-	if(bg_blrtsimage_list)
-		list_destroy(bg_blrtsimage_list);
-	bg_blrtsimage_list = list_create(destroy_image);
-#endif
-	if(bg_linuximage_list)
-		list_destroy(bg_linuximage_list);
-	bg_linuximage_list = list_create(destroy_image);
-	if(bg_mloaderimage_list)
-		list_destroy(bg_mloaderimage_list);
-	bg_mloaderimage_list = list_create(destroy_image);
-	if(bg_ramdiskimage_list)
-		list_destroy(bg_ramdiskimage_list);
-	bg_ramdiskimage_list = list_create(destroy_image);	
 }
 
 /*
  * _validate_config_nodes - Match slurm configuration information with
  *                          current BG block configuration.
- * IN/OUT bg_found_block_list - if NULL is created and then any blocks
- *                              found on the system are then pushed on.
+ * IN/OUT curr_block_list -  List of blocks already existing on the system.
+ * IN/OUT found_block_list - List of blocks found on the system
+ *                              that are listed in the bluegene.conf.
+ * NOTE: Both of the lists above should be created with list_create(NULL)
+ *       since the bg_lists->main will contain the complete list of pointers
+ *       and be destroyed with it.
+ *
  * RET - SLURM_SUCCESS if they match, else an error 
- * code. Writes bg_block_id into bg_list records.
+ * code. Writes bg_block_id into bg_lists->main records.
  */
 
-static int _validate_config_nodes(List *bg_found_block_list, char *dir)
+static int _validate_config_nodes(List curr_block_list, 
+				  List found_block_list, char *dir)
 {
 	int rc = SLURM_ERROR;
 	bg_record_t* bg_record = NULL;	
@@ -1412,18 +1435,21 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 	ListIterator itr_curr;
 	char tmp_char[256];
 
+	xassert(curr_block_list);
+	xassert(found_block_list);
+
 #ifdef HAVE_BG_FILES
-	/* read current bg block info into bg_curr_block_list This
+	/* read current bg block info into curr_block_list This
 	 * happens in the state load before this in emulation mode */
-	if (read_bg_blocks() == SLURM_ERROR)
+	if (read_bg_blocks(curr_block_list) == SLURM_ERROR)
 		return SLURM_ERROR;
 	/* since we only care about error states here we don't care
-	   about the return code this must be done after the bg_list
+	   about the return code this must be done after the bg_lists->main
 	   is created */
-	load_state_file(dir);
+	load_state_file(curr_block_list, dir);
 #else
 	/* read in state from last run. */
-	if ((rc = load_state_file(dir)) != SLURM_SUCCESS)
+	if ((rc = load_state_file(curr_block_list, dir)) != SLURM_SUCCESS)
 		return rc;
 	/* This needs to be reset to SLURM_ERROR or it will never we
 	   that way again ;). */
@@ -1432,15 +1458,9 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 	if(!bg_recover) 
 		return SLURM_ERROR;
 
-	if(!bg_curr_block_list)
-		return SLURM_ERROR;
-	
-	if(!*bg_found_block_list)
-		(*bg_found_block_list) = list_create(NULL);
-
-	itr_curr = list_iterator_create(bg_curr_block_list);
-	itr_conf = list_iterator_create(bg_list);
-	while ((bg_record = (bg_record_t*) list_next(itr_conf))) {
+	itr_curr = list_iterator_create(curr_block_list);
+	itr_conf = list_iterator_create(bg_lists->main);
+	while ((bg_record = list_next(itr_conf))) {
 		list_iterator_reset(itr_curr);
 		while ((init_bg_record = list_next(itr_curr))) {
 			if (strcasecmp(bg_record->nodes, init_bg_record->nodes))
@@ -1493,7 +1513,7 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 			if(bg_record->full_block)
 				full_created = 1;
 
-			list_push(*bg_found_block_list, bg_record);
+			list_push(found_block_list, bg_record);
 			format_node_name(bg_record, tmp_char,
 					 sizeof(tmp_char));
 			info("Existing: BlockID:%s Nodes:%s Conn:%s",
@@ -1502,12 +1522,12 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 			     convert_conn_type(bg_record->conn_type));
 			if(((bg_record->state == RM_PARTITION_READY)
 			    || (bg_record->state == RM_PARTITION_CONFIGURING))
-			   && !block_ptr_exist_in_list(bg_booted_block_list, 
+			   && !block_ptr_exist_in_list(bg_lists->booted, 
 						       bg_record))
-				list_push(bg_booted_block_list, bg_record);
+				list_push(bg_lists->booted, bg_record);
 		}
 	}		
-	if(bluegene_layout_mode == LAYOUT_DYNAMIC)
+	if(bg_conf->layout_mode == LAYOUT_DYNAMIC)
 		goto finished;
 
 	if(!full_created) {
@@ -1516,8 +1536,8 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 			if(init_bg_record->full_block) {
 				list_remove(itr_curr);
 				bg_record = init_bg_record;
-				list_append(bg_list, bg_record);
-				list_push(*bg_found_block_list, bg_record);
+				list_append(bg_lists->main, bg_record);
+				list_push(found_block_list, bg_record);
 				format_node_name(bg_record, tmp_char,
 						 sizeof(tmp_char));
 				info("Existing: BlockID:%s Nodes:%s Conn:%s",
@@ -1528,8 +1548,8 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 				    || (bg_record->state 
 					== RM_PARTITION_CONFIGURING))
 				   && !block_ptr_exist_in_list(
-					   bg_booted_block_list, bg_record))
-					list_push(bg_booted_block_list,
+					   bg_lists->booted, bg_record))
+					list_push(bg_lists->booted,
 						  bg_record);
 				break;
 			}
@@ -1539,77 +1559,50 @@ static int _validate_config_nodes(List *bg_found_block_list, char *dir)
 finished:
 	list_iterator_destroy(itr_conf);
 	list_iterator_destroy(itr_curr);
-	if(!list_count(bg_curr_block_list))
+	if(!list_count(curr_block_list))
 		rc = SLURM_SUCCESS;
 	return rc;
 }
 
-static int _delete_old_blocks(List bg_found_block_list)
+static int _delete_old_blocks(List curr_block_list, List found_block_list)
 {
 	ListIterator itr_curr, itr_found;
 	bg_record_t *found_record = NULL, *init_record = NULL;
 	pthread_attr_t attr_agent;
 	pthread_t thread_agent;
 	int retries;
-	List bg_destroy_list = list_create(NULL);
+	List destroy_list = list_create(NULL);
+
+	xassert(curr_block_list);
+	xassert(found_block_list);
 
 	info("removing unspecified blocks");
 	if(!bg_recover) {
-		if(bg_curr_block_list) {
-			itr_curr = list_iterator_create(bg_curr_block_list);
-			while ((init_record = 
-				(bg_record_t*)list_next(itr_curr))) {
-				list_remove(itr_curr);
-				list_push(bg_destroy_list, init_record);
-			}
-			list_iterator_destroy(itr_curr);
-		} else {
-			error("_delete_old_blocks: "
-			      "no bg_curr_block_list 1");
-			list_destroy(bg_destroy_list);
-			return SLURM_ERROR;
+		itr_curr = list_iterator_create(curr_block_list);
+		while ((init_record = list_next(itr_curr))) {
+			list_remove(itr_curr);
+			list_push(destroy_list, init_record);
 		}
+		list_iterator_destroy(itr_curr);
 	} else {
-		if(bg_curr_block_list) {
-			itr_curr = list_iterator_create(bg_curr_block_list);
-			while ((init_record = list_next(itr_curr))) {
-				if(bg_found_block_list) {
-					itr_found = list_iterator_create(
-						bg_found_block_list);
-					while ((found_record 
-						= list_next(itr_found)) 
-					       != NULL) {
-						if (!strcmp(init_record->
-							    bg_block_id, 
-							    found_record->
-							    bg_block_id)) {
-							/* don't delete 
-							   this one 
-							*/
-							break;	
-						}
-					}
-					list_iterator_destroy(itr_found);
-				} else {
-					error("_delete_old_blocks: "
-					      "no bg_found_block_list");
-					list_iterator_destroy(itr_curr);
-					list_destroy(bg_destroy_list);
-					return SLURM_ERROR;
+		itr_curr = list_iterator_create(curr_block_list);
+		while ((init_record = list_next(itr_curr))) {
+			itr_found = list_iterator_create(found_block_list);
+			while ((found_record = list_next(itr_found))) {
+				if (!strcmp(init_record->bg_block_id, 
+					    found_record->bg_block_id)) {
+					/* don't delete this one */
+					break;	
 				}
-				if(found_record == NULL) {
-					list_remove(itr_curr);
-					list_push(bg_destroy_list, 
-						  init_record);
-				}
-			}		
-			list_iterator_destroy(itr_curr);
-		} else {
-			error("_delete_old_blocks: "
-			      "no bg_curr_block_list 2");
-			list_destroy(bg_destroy_list);
-			return SLURM_ERROR;
-		}
+			}
+			list_iterator_destroy(itr_found);
+			
+			if(found_record == NULL) {
+				list_remove(itr_curr);
+				list_push(destroy_list, init_record);
+			}
+		}		
+		list_iterator_destroy(itr_curr);
 	}
 
 	slurm_mutex_lock(&freed_cnt_mutex);
@@ -1617,7 +1610,7 @@ static int _delete_old_blocks(List bg_found_block_list)
 	    && ((bg_destroy_block_list = list_create(NULL)) == NULL))
 		fatal("malloc failure in block_list");
 
-	itr_curr = list_iterator_create(bg_destroy_list);
+	itr_curr = list_iterator_create(destroy_list);
 	while ((init_record = (bg_record_t*) list_next(itr_curr))) {
 		list_push(bg_destroy_block_list, init_record);
 		num_block_to_free++;
@@ -1648,7 +1641,7 @@ static int _delete_old_blocks(List bg_found_block_list)
 	}
 	list_iterator_destroy(itr_curr);
 	slurm_mutex_unlock(&freed_cnt_mutex);
-	list_destroy(bg_destroy_list);
+	list_destroy(destroy_list);
 		
 	retries=30;
 	while(num_block_to_free > num_block_freed) {
@@ -1665,7 +1658,9 @@ static int _delete_old_blocks(List bg_found_block_list)
 		retries++;
 		sleep(1);
 	}
-	
+
+	num_block_to_free = num_block_freed = 0;
+
 	info("I am done deleting");
 
 	return SLURM_SUCCESS;
@@ -1674,7 +1669,7 @@ static int _delete_old_blocks(List bg_found_block_list)
 static char *_get_bg_conf(void)
 {
 	char *val = getenv("SLURM_CONF");
-	char *rc;
+	char *rc = NULL;
 	int i;
 
 	if (!val)
@@ -1697,14 +1692,15 @@ static int _reopen_bridge_log(void)
 {
 	int rc = SLURM_SUCCESS;
 
-	if (bridge_api_file == NULL)
+	if (bg_conf->bridge_api_file == NULL)
 		return rc;
 	
 #ifdef HAVE_BG_FILES
-	rc = bridge_set_log_params(bridge_api_file, bridge_api_verb);
+	rc = bridge_set_log_params(bg_conf->bridge_api_file,
+				   bg_conf->bridge_api_verb);
 #endif
 	debug3("Bridge api file set to %s, verbose level %d\n", 
-	       bridge_api_file, bridge_api_verb);
+	       bg_conf->bridge_api_file, bg_conf->bridge_api_verb);
 	
 	return rc;
 }
diff --git a/src/plugins/select/bluegene/plugin/bluegene.h b/src/plugins/select/bluegene/plugin/bluegene.h
index 3f62010362d6f432bf782e9b100d2e6d958e2d1f..bea0b1a39efe64d026f0b54d3d1bcbacbfdfe3fc 100644
--- a/src/plugins/select/bluegene/plugin/bluegene.h
+++ b/src/plugins/select/bluegene/plugin/bluegene.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  bluegene.h - header for blue gene configuration processing module. 
  *
- *  $Id: bluegene.h 17102 2009-03-31 23:23:01Z da $
+ *  $Id: bluegene.h 17534 2009-05-19 00:58:46Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dan Phung <phung4@llnl.gov> and Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -49,55 +50,61 @@ typedef enum bg_layout_type {
 	LAYOUT_DYNAMIC	/* slurm will make all blocks */
 } bg_layout_t;
 
-/* Global variables */
+typedef struct {
 #ifdef HAVE_BGL
-extern char *default_blrtsimage;
+	List blrts_list;
 #endif
-extern char *default_linuximage;
-extern char *default_mloaderimage;
-extern char *default_ramdiskimage;
-extern char *bridge_api_file;
-extern char *bg_slurm_user_name;
-extern char *bg_slurm_node_prefix;
-extern bg_layout_t bluegene_layout_mode;
-extern double bluegene_io_ratio;
-extern double bluegene_nc_ratio;
-extern uint32_t bluegene_smallest_block;
-extern uint16_t bluegene_proc_ratio;
-extern uint16_t bluegene_numpsets;
-extern uint16_t bluegene_bp_node_cnt;
-extern uint16_t bluegene_bp_nodecard_cnt;
-extern uint16_t bluegene_nodecard_node_cnt;
-extern uint16_t bluegene_nodecard_ionode_cnt;
-extern uint16_t bluegene_quarter_node_cnt;
-extern uint16_t bluegene_quarter_ionode_cnt;
-
-extern ba_system_t *ba_system_ptr;
-extern time_t last_bg_update;
-
-extern List bg_curr_block_list; 	/* Initial bg block state */
-extern List bg_list;			/* List of configured BG blocks */
-extern List bg_job_block_list;  	/* jobs running in these blocks */
-extern List bg_booted_block_list;  	/* blocks that are booted */
-extern List bg_freeing_list;  	        /* blocks that being freed */
+	uint16_t bp_node_cnt;
+	uint16_t bp_nodecard_cnt;
+	char *bridge_api_file;
+	uint16_t bridge_api_verb;
 #ifdef HAVE_BGL
-extern List bg_blrtsimage_list;
+	char *default_blrtsimage;
 #endif
-extern List bg_linuximage_list;
-extern List bg_mloaderimage_list;
-extern List bg_ramdiskimage_list;
-extern List bg_valid_small32;
-extern List bg_valid_small64;
-extern List bg_valid_small128;
-extern List bg_valid_small256;
+	char *default_linuximage;
+	char *default_mloaderimage;
+	char *default_ramdiskimage;
+	uint16_t deny_pass;
+	double io_ratio;
+	bg_layout_t layout_mode;
+	List linux_list;
+	List mloader_list;
+	double nc_ratio;
+	uint16_t nodecard_node_cnt;
+	uint16_t nodecard_ionode_cnt;
+	uint16_t numpsets;
+	uint16_t proc_ratio;
+	uint32_t procs_per_bp;
+	uint16_t quarter_node_cnt;
+	uint16_t quarter_ionode_cnt;
+	List ramdisk_list;
+	char *slurm_user_name;
+	char *slurm_node_prefix;
+	uint32_t smallest_block;
+} bg_config_t;
+
+typedef struct {
+	List booted;         /* blocks that are booted */
+	List job_running;    /* jobs running in these blocks */
+	List freeing;        /* blocks that being freed */
+	List main;	    /* List of configured BG blocks */
+	List valid_small32;
+	List valid_small64;
+	List valid_small128;
+	List valid_small256;
+} bg_lists_t;
 
+/* Global variables */
+extern bg_config_t *bg_conf;
+extern bg_lists_t *bg_lists;
+extern ba_system_t *ba_system_ptr;
+extern time_t last_bg_update;
 extern bool agent_fini;
 extern pthread_mutex_t block_state_mutex;
 extern pthread_mutex_t request_list_mutex;
 extern int num_block_to_free;
 extern int num_block_freed;
 extern int blocks_are_created;
-extern int procs_per_node;
 extern int num_unused_cpus;
 
 #define MAX_PTHREAD_RETRIES  1
@@ -109,7 +116,7 @@ extern int num_unused_cpus;
 #define BITSIZE 128
 /* Change BLOCK_STATE_VERSION value when changing the state save
  * format i.e. pack_block() */
-#define BLOCK_STATE_VERSION      "VER001"
+#define BLOCK_STATE_VERSION      "VER002"
 
 #include "bg_block_info.h"
 #include "bg_job_place.h"
@@ -161,14 +168,14 @@ extern bg_record_t *find_org_in_bg_list(List my_list, bg_record_t *bg_record);
 extern void *mult_free_block(void *args);
 extern void *mult_destroy_block(void *args);
 extern int free_block_list(List delete_list);
-extern int read_bg_conf(void);
+extern int read_bg_conf();
 extern int validate_current_blocks(char *dir);
 
 /* block_sys.c */
 /*****************************************************/
 extern int configure_block(bg_record_t * bg_conf_record);
 extern int read_bg_blocks();
-extern int load_state_file(char *dir_name);
+extern int load_state_file(List curr_block_list, char *dir_name);
 
 /* bg_switch_connections.c */
 /*****************************************************/
diff --git a/src/plugins/select/bluegene/plugin/defined_block.c b/src/plugins/select/bluegene/plugin/defined_block.c
index c2ac0a62bc83a5c66197c6ed250b161730b4a291..1872c8abb9285ec7e6104e3d0df57a3f681a53ea 100644
--- a/src/plugins/select/bluegene/plugin/defined_block.c
+++ b/src/plugins/select/bluegene/plugin/defined_block.c
@@ -8,7 +8,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -66,7 +67,6 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 #ifdef HAVE_BG_FILES
 	init_wires();
 #endif
- 		
 	/* Locks are already in place to protect part_list here */
 	itr = list_iterator_create(part_list);
 	while ((part_ptr = list_next(itr))) {
@@ -84,15 +84,15 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 	
 	slurm_mutex_lock(&block_state_mutex);
 	reset_ba_system(false);
-	if(bg_list) {
-		itr = list_iterator_create(bg_list);
+	if(bg_lists->main) {
+		itr = list_iterator_create(bg_lists->main);
 		while((bg_record = list_next(itr))) {
 			if(bg_found_block_list) {
 				itr_found = list_iterator_create(
 					bg_found_block_list);
 				while ((found_record = (bg_record_t*) 
 					list_next(itr_found)) != NULL) {
-/* 					info("%s[%s[ ?= %s[%s]\n", */
+/* 					info("%s[%s] ?= %s[%s]\n", */
 /* 					     bg_record->nodes, */
 /* 					     bg_record->ionodes, */
 /* 					     found_record->nodes, */
@@ -105,7 +105,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 							  found_record->
 							  ionode_bitmap))
 						) {
-						/* don't reboot this one */
+						/* don't remake this one */
 						break;	
 					}
 				}
@@ -116,7 +116,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 			}
 			if(bg_record->bp_count > 0 
 			   && !bg_record->full_block
-			   && bg_record->cpu_cnt >= procs_per_node) {
+			   && bg_record->cpu_cnt >= bg_conf->procs_per_bp) {
 				char *name = NULL;
 
 				if(overlapped == LAYOUT_OVERLAP) {
@@ -130,7 +130,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 				if(set_all_bps_except(bg_record->nodes)
 				   != SLURM_SUCCESS)
 					fatal("something happened in "
-					      "the load of %s"
+					      "the load of %s.  "
 					      "Did you use smap to "
 					      "make the "
 					      "bluegene.conf file?",
@@ -185,7 +185,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 					}
 					
 					snprintf(temp, sizeof(temp), "%s%s",
-						 bg_slurm_node_prefix,
+						 bg_conf->slurm_node_prefix,
 						 name);
 					
 					xfree(name);
@@ -237,7 +237,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 		}
 		list_iterator_destroy(itr);
 	} else {
-		error("create_defined_blocks: no bg_list 2");
+		error("create_defined_blocks: no bg_lists->main 2");
 		slurm_mutex_unlock(&block_state_mutex);
 		xfree(non_usable_nodes);
 		return SLURM_ERROR;
@@ -248,12 +248,12 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 	create_full_system_block(bg_found_block_list);
 
 	slurm_mutex_lock(&block_state_mutex);
-	sort_bg_record_inc_size(bg_list);
+	sort_bg_record_inc_size(bg_lists->main);
 	slurm_mutex_unlock(&block_state_mutex);
 	
 #ifdef _PRINT_BLOCKS_AND_EXIT
-	if(bg_list) {
-		itr = list_iterator_create(bg_list);
+	if(bg_lists->main) {
+		itr = list_iterator_create(bg_lists->main);
 		debug("\n\n");
 		while ((found_record = (bg_record_t *) list_next(itr)) 
 		       != NULL) {
@@ -261,7 +261,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 		}
 		list_iterator_destroy(itr);
 	} else {
-		error("create_defined_blocks: no bg_list 5");
+		error("create_defined_blocks: no bg_lists->main 5");
 	}
  	exit(0);
 #endif	/* _PRINT_BLOCKS_AND_EXIT */
@@ -318,14 +318,14 @@ extern int create_full_system_block(List bg_found_block_list)
 /* 	geo[Z] = max_dim[Z]; */
 /* #endif */
 	
-	i = (10+strlen(bg_slurm_node_prefix));
+	i = (10+strlen(bg_conf->slurm_node_prefix));
 	name = xmalloc(i);
 	if((geo[X] == 0) && (geo[Y] == 0) && (geo[Z] == 0))
 		snprintf(name, i, "%s000",
-			 bg_slurm_node_prefix);
+			 bg_conf->slurm_node_prefix);
 	else
 		snprintf(name, i, "%s[000x%c%c%c]",
-			 bg_slurm_node_prefix,
+			 bg_conf->slurm_node_prefix,
 			 alpha_num[geo[X]], alpha_num[geo[Y]],
 			 alpha_num[geo[Z]]);
 	
@@ -345,8 +345,8 @@ extern int create_full_system_block(List bg_found_block_list)
 		error("create_full_system_block: no bg_found_block_list 2");
 	}
 	
-	if(bg_list) {
-		itr = list_iterator_create(bg_list);
+	if(bg_lists->main) {
+		itr = list_iterator_create(bg_lists->main);
 		while ((bg_record = (bg_record_t *) list_next(itr)) 
 		       != NULL) {
 			if (!strcmp(name, bg_record->nodes)) {
@@ -359,7 +359,7 @@ extern int create_full_system_block(List bg_found_block_list)
 		list_iterator_destroy(itr);
 	} else {
 		xfree(name);
-		error("create_overlapped_blocks: no bg_list 3");
+		error("create_overlapped_blocks: no bg_lists->main 3");
 		rc = SLURM_ERROR;
 		goto no_total;
 	}
@@ -417,7 +417,7 @@ extern int create_full_system_block(List bg_found_block_list)
 	}
 
 	print_bg_record(bg_record);
-	list_append(bg_list, bg_record);
+	list_append(bg_lists->main, bg_record);
 
 no_total:
 	if(records)
diff --git a/src/plugins/select/bluegene/plugin/defined_block.h b/src/plugins/select/bluegene/plugin/defined_block.h
index 4255135f3c2508117150a0a75fcb76c155fda112..048de8ef4dfedab8b9abf3c9122a24787a7678cf 100644
--- a/src/plugins/select/bluegene/plugin/defined_block.h
+++ b/src/plugins/select/bluegene/plugin/defined_block.h
@@ -8,7 +8,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.c b/src/plugins/select/bluegene/plugin/dynamic_block.c
index 79d265616dbe30875d0f9ce33d1b9fc4fe1fdf9d..fca8258831362da4dd1650c46c44fd596139612f 100644
--- a/src/plugins/select/bluegene/plugin/dynamic_block.c
+++ b/src/plugins/select/bluegene/plugin/dynamic_block.c
@@ -8,7 +8,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -63,13 +64,13 @@ extern List create_dynamic_block(List block_list,
 	int geo[BA_SYSTEM_DIMENSIONS];
 	int i;
 	blockreq_t blockreq;
-	int cnodes = request->procs / bluegene_proc_ratio;
+	int cnodes = request->procs / bg_conf->proc_ratio;
 
-	if(cnodes < bluegene_smallest_block) {
+	if(cnodes < bg_conf->smallest_block) {
 		error("Can't create this size %d "
 		      "on this system numpsets is %d",
 		      request->procs,
-		      bluegene_numpsets);
+		      bg_conf->numpsets);
 		goto finished;
 	}
 	memset(&blockreq, 0, sizeof(blockreq_t));
@@ -135,7 +136,7 @@ extern List create_dynamic_block(List block_list,
 		FREE_NULL_BITMAP(bitmap);
 	}
 
-	if(request->size==1 && cnodes < bluegene_bp_node_cnt) {
+	if(request->size==1 && cnodes < bg_conf->bp_node_cnt) {
 		switch(cnodes) {
 #ifdef HAVE_BGL
 		case 32:
@@ -226,11 +227,11 @@ extern List create_dynamic_block(List block_list,
 			/* Here we are only looking for the first
 			   block on the midplane.  So either the count
 			   is greater or equal than
-			   bluegene_bp_node_cnt or the first bit is
+			   bg_conf->bp_node_cnt or the first bit is
 			   set in the ionode_bitmap.
 			*/
 			if(bg_record->job_running == NO_JOB_RUNNING 
-			   && ((bg_record->node_cnt >= bluegene_bp_node_cnt)
+			   && ((bg_record->node_cnt >= bg_conf->bp_node_cnt)
 			       || (bit_ffs(bg_record->ionode_bitmap) == 0))) {
 				
 				for(i=0; i<BA_SYSTEM_DIMENSIONS; i++) 
@@ -358,7 +359,7 @@ extern bg_record_t *create_small_record(bg_record_t *bg_record,
 		found_record->bp_count = 1;
 		found_record->nodes = xstrdup_printf(
 			"%s%c%c%c", 
-			bg_slurm_node_prefix, 
+			bg_conf->slurm_node_prefix, 
 			alpha_num[ba_node->coord[X]],
 			alpha_num[ba_node->coord[Y]],
 			alpha_num[ba_node->coord[Z]]);
@@ -375,8 +376,8 @@ extern bg_record_t *create_small_record(bg_record_t *bg_record,
 				
 	found_record->conn_type = SELECT_SMALL;
 				
-	xassert(bluegene_proc_ratio);
-	found_record->cpu_cnt = bluegene_proc_ratio * size;
+	xassert(bg_conf->proc_ratio);
+	found_record->cpu_cnt = bg_conf->proc_ratio * size;
 	found_record->node_cnt = size;
 
 	found_record->ionode_bitmap = bit_copy(ionodes);
@@ -583,10 +584,9 @@ static int _breakup_blocks(List block_list, List new_blocks,
 	ListIterator itr = NULL, bit_itr = NULL;
 	int search_cnt = 0;
 	int total_cnode_cnt=0;
-	uint16_t last_quarter = (uint16_t) NO_VAL;
 	char tmp_char[256];
-	bitstr_t *ionodes = bit_alloc(bluegene_numpsets);
-	int cnodes = request->procs / bluegene_proc_ratio;
+	bitstr_t *ionodes = bit_alloc(bg_conf->numpsets);
+	int cnodes = request->procs / bg_conf->proc_ratio;
 	
 	debug2("proc count = %d cnodes = %d size = %d",
 	       request->procs, cnodes, request->size);
@@ -596,16 +596,16 @@ static int _breakup_blocks(List block_list, List new_blocks,
 		/* a 16 can go anywhere */
 		break;
 	case 32:
-		bit_itr = list_iterator_create(bg_valid_small32);
+		bit_itr = list_iterator_create(bg_lists->valid_small32);
 		break;
 	case 64:
-		bit_itr = list_iterator_create(bg_valid_small64);
+		bit_itr = list_iterator_create(bg_lists->valid_small64);
 		break;
 	case 128:
-		bit_itr = list_iterator_create(bg_valid_small128);
+		bit_itr = list_iterator_create(bg_lists->valid_small128);
 		break;
 	case 256:
-		bit_itr = list_iterator_create(bg_valid_small256);
+		bit_itr = list_iterator_create(bg_lists->valid_small256);
 		break;
 	default:
 		error("We shouldn't be here with this size %d", cnodes);
@@ -630,7 +630,7 @@ again:
 				continue;
 		/* check small blocks first */
 		if((search_cnt == 0)
-		   && (bg_record->node_cnt > bluegene_bp_node_cnt))
+		   && (bg_record->node_cnt > bg_conf->bp_node_cnt))
 				continue;
 		
 		if (request->avail_node_bitmap &&
@@ -691,7 +691,7 @@ again:
 				list_iterator_reset(bit_itr);
 			}
 			if(!bitstr) {
-				bit_nclear(ionodes, 0, (bluegene_numpsets-1));
+				bit_nclear(ionodes, 0, (bg_conf->numpsets-1));
 				bit_or(ionodes, bg_record->ionode_bitmap);
 				total_cnode_cnt = bg_record->node_cnt;
 			} else
@@ -731,9 +731,8 @@ again:
 	if(!bg_record && (search_cnt < 2)) {
 		search_cnt++;
 		list_iterator_reset(itr);
-		bit_nclear(ionodes, 0, (bluegene_numpsets-1));
+		bit_nclear(ionodes, 0, (bg_conf->numpsets-1));
 		total_cnode_cnt = 0;		
-		last_quarter = (uint16_t) NO_VAL;
 		goto again;
 	}
 
@@ -747,7 +746,7 @@ again:
 		} else {
 			debug3("looking for original");
 			found_record = find_org_in_bg_list(
-				bg_list, bg_record);
+				bg_lists->main, bg_record);
 		}
 		if(!found_record) {
 			error("this record wasn't found in the list!");
@@ -772,10 +771,9 @@ again:
 		_split_block(block_list, new_blocks, found_record, cnodes);
 		remove_from_bg_list(block_list, bg_record);
 		destroy_bg_record(bg_record);
-		remove_from_bg_list(bg_list, found_record);
+		remove_from_bg_list(bg_lists->main, found_record);
 		temp_list = list_create(NULL);
 		list_push(temp_list, found_record);
-		num_block_to_free++;
 		free_block_list(temp_list);
 		list_destroy(temp_list);
 		rc = SLURM_SUCCESS;
diff --git a/src/plugins/select/bluegene/plugin/dynamic_block.h b/src/plugins/select/bluegene/plugin/dynamic_block.h
index 9e65a27de6cf81836c928311cec1e6b216db5d97..28027618ad5baa9fd82a39b76f9aafb1482c0bc3 100644
--- a/src/plugins/select/bluegene/plugin/dynamic_block.h
+++ b/src/plugins/select/bluegene/plugin/dynamic_block.h
@@ -8,7 +8,8 @@
  *  Written by Danny Auble <da@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -45,12 +46,7 @@ extern List create_dynamic_block(List block_list,
 				 ba_request_t *request, List my_block_list,
 				 bool track_down_nodes);
 
-#ifdef HAVE_BGQ 
-extern bg_record_t *create_small_record(bg_record_t *bg_record, 
-					uint16_t quarter, uint16_t nodecard);
-#else
 extern bg_record_t *create_small_record(bg_record_t *bg_record, 
 					bitstr_t *ionodes, int size);
-#endif
 
 #endif /* _BLUEGENE_DYNAMIC_BLOCK_H_ */
diff --git a/src/plugins/select/bluegene/plugin/libsched_if64.c b/src/plugins/select/bluegene/plugin/libsched_if64.c
index ea9b9281b00a364cf3348d44156c898688ffceae..3be8812e1ee701ffd71849eb945ebb051ff210f3 100644
--- a/src/plugins/select/bluegene/plugin/libsched_if64.c
+++ b/src/plugins/select/bluegene/plugin/libsched_if64.c
@@ -9,7 +9,8 @@
  *  Written by Danny Auble <auble1@llnl.gov> et. al.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/plugin/opts.c b/src/plugins/select/bluegene/plugin/opts.c
index 4fa9392294bf00598190cf8d00de5f260e23a540..760cfc1101acce4a5fbff37929b395729667d31e 100644
--- a/src/plugins/select/bluegene/plugin/opts.c
+++ b/src/plugins/select/bluegene/plugin/opts.c
@@ -1,14 +1,15 @@
 /****************************************************************************\
  *  opts.c - sfree command line option processing functions
- *  $Id: opts.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: opts.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/plugin/select_bluegene.c b/src/plugins/select/bluegene/plugin/select_bluegene.c
index 16364b37298982b09118d7d11706cf5fe2f85765..5604ee13f2218209bdfe6df787160cce77b8668f 100644
--- a/src/plugins/select/bluegene/plugin/select_bluegene.c
+++ b/src/plugins/select/bluegene/plugin/select_bluegene.c
@@ -1,15 +1,15 @@
 /*****************************************************************************\
  *  select_bluegene.c - node selection plugin for Blue Gene system.
- * 
- *  $Id: select_bluegene.c 17175 2009-04-07 17:24:20Z da $
  *****************************************************************************
- *  Copyright (C) 2004-2006 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dan Phung <phung4@llnl.gov> Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -39,19 +39,12 @@
 
 #include "bluegene.h"
 
-#ifndef HAVE_BG
-#include "defined_block.h"
-#endif
-
 //#include "src/common/uid.h"
 #include "src/slurmctld/trigger_mgr.h"
 #include <fcntl.h>
 
 #define HUGE_BUF_SIZE (1024*16)
 
-/* global */
-int procs_per_node = 512;
-
 /*
  * These variables are required by the generic plugin interface.  If they
  * are not found in the plugin, the plugin loader will ignore it.
@@ -101,9 +94,7 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data);
  */
 extern int init ( void )
 {
-#ifndef HAVE_BG
-	fatal("Plugin select/bluegene is illegal on non-BlueGene computers");
-#endif
+
 #if (SYSTEM_DIMENSIONS != 3)
 	fatal("SYSTEM_DIMENSIONS value (%d) invalid for Blue Gene",
 		SYSTEM_DIMENSIONS);
@@ -164,14 +155,12 @@ static char *_block_state_str(int state)
 {
 	static char tmp[16];
 
-#ifdef HAVE_BG
 	switch (state) {
 		case 0: 
 			return "ERROR";
 		case 1:
 			return "FREE";
 	}
-#endif
 
 	snprintf(tmp, sizeof(tmp), "%d", state);
 	return tmp;
@@ -209,21 +198,14 @@ extern int fini ( void )
  */
  extern int select_p_block_init(List part_list)
 {
-	xfree(bg_slurm_user_name);
-	xfree(bg_slurm_node_prefix);
-
-	slurm_conf_lock();
-	xassert(slurmctld_conf.slurm_user_name);
-	xassert(slurmctld_conf.node_prefix);
-	bg_slurm_user_name = xstrdup(slurmctld_conf.slurm_user_name);
-	bg_slurm_node_prefix = xstrdup(slurmctld_conf.node_prefix);
-	slurm_conf_unlock();	
-
-#ifdef HAVE_BG
+	/* select_p_node_init needs to be called before this to set
+	   this up correctly
+	*/
 	if(read_bg_conf() == SLURM_ERROR) {
 		fatal("Error, could not read the file");
 		return SLURM_ERROR;
 	}
+	
 	if(part_list) {
 		struct part_record *part_ptr = NULL;
 		ListIterator itr = list_iterator_create(part_list);
@@ -237,18 +219,6 @@ extern int fini ( void )
 		}
 		list_iterator_destroy(itr);
 	}
-#else
-	/*looking for blocks only I created */
-	if (create_defined_blocks(bluegene_layout_mode, NULL) 
-			== SLURM_ERROR) {
-		/* error in creating the static blocks, so
-		 * blocks referenced by submitted jobs won't
-		 * correspond to actual slurm blocks.
-		 */
-		fatal("Error, could not create the static blocks");
-		return SLURM_ERROR;
-	}
-#endif
 
 	return SLURM_SUCCESS; 
 }
@@ -274,7 +244,7 @@ extern int select_p_state_save(char *dir_name)
 
 	/* write block records to buffer */
 	slurm_mutex_lock(&block_state_mutex);
-	itr = list_iterator_create(bg_list);
+	itr = list_iterator_create(bg_lists->main);
 	while((bg_record = list_next(itr))) {
 		/* on real bluegene systems we only want to keep track of
 		 * the blocks in an error state
@@ -360,12 +330,13 @@ extern int select_p_job_init(List job_list)
 	return sync_jobs(job_list);
 }
 
-/* All initialization is performed by select_p_block_init() */
+/* All initialization is performed by init() */
 extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 {
 	if(node_cnt>0)
-		if(node_ptr->cpus >= bluegene_bp_node_cnt)
-			procs_per_node = node_ptr->cpus;
+		if(node_ptr->cpus >= bg_conf->bp_node_cnt) 
+			bg_conf->procs_per_bp = node_ptr->cpus;
+		
 	return SLURM_SUCCESS;
 }
 
@@ -442,20 +413,9 @@ extern int select_p_job_resume(struct job_record *job_ptr)
 	return ESLURM_NOT_SUPPORTED;
 }
 
-extern int select_p_get_job_cores(uint32_t job_id, int alloc_index, int s)
-{
-	return ESLURM_NOT_SUPPORTED;
-}
-
 extern int select_p_job_ready(struct job_record *job_ptr)
 {
-#ifdef HAVE_BG_FILES
 	return block_ready(job_ptr);
-#else
-	if (job_ptr->job_state == JOB_RUNNING)
-		return 1;
-	return 0;
-#endif
 }
 
 extern int select_p_pack_node_info(time_t last_query_time, Buf *buffer_ptr)
@@ -476,9 +436,9 @@ extern int select_p_pack_node_info(time_t last_query_time, Buf *buffer_ptr)
 		pack32(blocks_packed, buffer);
 		pack_time(last_bg_update, buffer);
 
-		if(bg_list) {
+		if(bg_lists->main) {
 			slurm_mutex_lock(&block_state_mutex);
-			itr = list_iterator_create(bg_list);
+			itr = list_iterator_create(bg_lists->main);
 			while ((bg_record = list_next(itr))) {
 				pack_block(bg_record, buffer);
 				blocks_packed++;
@@ -486,16 +446,16 @@ extern int select_p_pack_node_info(time_t last_query_time, Buf *buffer_ptr)
 			list_iterator_destroy(itr);
 			slurm_mutex_unlock(&block_state_mutex);
 		} else {
-			error("select_p_pack_node_info: no bg_list");
+			error("select_p_pack_node_info: no bg_lists->main");
 			return SLURM_ERROR;
 		}
 		/*
 		 * get all the blocks we are freeing since they have
 		 * been moved here
 		 */
-		if(bg_freeing_list) {
+		if(bg_lists->freeing) {
 			slurm_mutex_lock(&block_state_mutex);
-			itr = list_iterator_create(bg_freeing_list);
+			itr = list_iterator_create(bg_lists->freeing);
 			while ((bg_record = (bg_record_t *) list_next(itr)) 
 			       != NULL) {
 				xassert(bg_record->bg_block_id != NULL);
@@ -513,7 +473,7 @@ extern int select_p_pack_node_info(time_t last_query_time, Buf *buffer_ptr)
 		
 		*buffer_ptr = buffer;
 	} else {
-		error("select_p_pack_node_info: bg_list not ready yet");
+		error("select_p_pack_node_info: bg_lists->main not ready yet");
 		return SLURM_ERROR;
 	}
 
@@ -540,7 +500,7 @@ extern int select_p_update_block (update_part_msg_t *part_desc_ptr)
 	time_t now;
 	char reason[128], tmp[64], time_str[32];
 
-	bg_record = find_bg_record_in_list(bg_list, part_desc_ptr->name);
+	bg_record = find_bg_record_in_list(bg_lists->main, part_desc_ptr->name);
 	if(!bg_record)
 		return SLURM_ERROR;
 
@@ -566,14 +526,14 @@ extern int select_p_update_block (update_part_msg_t *part_desc_ptr)
 	
 	/* Free all overlapping blocks and kill any jobs only
 	 * if we are going into an error state */ 
-	if (bluegene_layout_mode != LAYOUT_DYNAMIC
+	if (bg_conf->layout_mode != LAYOUT_DYNAMIC
 	    && !part_desc_ptr->state_up) {
 		bg_record_t *found_record = NULL;
 		ListIterator itr;
 		List delete_list = list_create(NULL);
 		
 		slurm_mutex_lock(&block_state_mutex);
-		itr = list_iterator_create(bg_list);
+		itr = list_iterator_create(bg_lists->main);
 		while ((found_record = list_next(itr))) {
 			if (bg_record == found_record)
 				continue;
@@ -608,7 +568,6 @@ extern int select_p_update_block (update_part_msg_t *part_desc_ptr)
 				       bg_record->bg_block_id);	
 			}
 			list_push(delete_list, found_record);
-			num_block_to_free++;
 		}		
 		list_iterator_destroy(itr);
 		free_block_list(delete_list);
@@ -621,11 +580,13 @@ extern int select_p_update_block (update_part_msg_t *part_desc_ptr)
 	} else if(part_desc_ptr->state_up){
 		resume_block(bg_record);
 	} else {
+		error("state is ? %d", part_desc_ptr->state_up);
 		return rc;
 	}
 				
 	info("%s", reason);
 	last_bg_update = time(NULL);
+
 	return rc;
 }
 
@@ -639,7 +600,7 @@ extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
 	double nc_pos = 0, last_pos = -1;
 	bitstr_t *ionode_bitmap = NULL;
 	
-	if(bluegene_layout_mode != LAYOUT_DYNAMIC) {
+	if(bg_conf->layout_mode != LAYOUT_DYNAMIC) {
 		info("You can't use this call unless you are on a Dynamically "
 		     "allocated system.  Please use update BlockName instead");
 		rc = SLURM_ERROR;
@@ -719,7 +680,7 @@ extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
 		rc = SLURM_ERROR;
 		goto end_it;
 	}
-	ionode_bitmap = bit_alloc(bluegene_numpsets);
+	ionode_bitmap = bit_alloc(bg_conf->numpsets);
 	bit_unfmt(ionode_bitmap, ionodes);
 	if(bit_ffs(ionode_bitmap) == -1) {
 		error("update_sub_node: Invalid ionode '%s' given.", ionodes);
@@ -727,19 +688,22 @@ extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
 		FREE_NULL_BITMAP(ionode_bitmap);
 		goto end_it;		
 	}
-	node_name = xstrdup_printf("%s%s", bg_slurm_node_prefix, coord);
+	node_name = xstrdup_printf("%s%s", bg_conf->slurm_node_prefix, coord);
 	/* find out how many nodecards to get for each ionode */
 	if(!part_desc_ptr->state_up) {
 		info("Admin setting %s[%s] in an error state",
 		     node_name, ionodes);
-		for(i = 0; i<bluegene_numpsets; i++) {
+		for(i = 0; i<bg_conf->numpsets; i++) {
 			if(bit_test(ionode_bitmap, i)) {
 				if((int)nc_pos != (int)last_pos) {
-					down_nodecard(node_name, i);
+					/* find first bit in nc */
+					int start_io = 
+						(int)nc_pos * bg_conf->io_ratio;
+					down_nodecard(node_name, start_io);
 					last_pos = nc_pos;
 				}
 			}
-			nc_pos += bluegene_nc_ratio;
+			nc_pos += bg_conf->nc_ratio;
 		}
 	} else if(part_desc_ptr->state_up){
 		info("Admin setting %s[%s] in an free state",
@@ -759,32 +723,24 @@ end_it:
 	return rc;
 }
 
-extern int select_p_get_extra_jobinfo (struct node_record *node_ptr, 
-				       struct job_record *job_ptr, 
-                                       enum select_data_info info,
-                                       void *data)
-{
-	if (info == SELECT_AVAIL_CPUS) {
-		/* Needed to track CPUs allocated to jobs on whole nodes
-		 * for sched/wiki2 (Moab scheduler). Small block allocations
-		 * handled through use of job_ptr->num_procs in slurmctld */
-		uint16_t *cpus_per_bp = (uint16_t *) data;
-		*cpus_per_bp = procs_per_node;
-	}
-	return SLURM_SUCCESS;
-}
-
 extern int select_p_get_info_from_plugin (enum select_data_info info, 
+					  struct job_record *job_ptr,
 					  void *data)
 {
+	if (info == SELECT_STATIC_PART) {
+		uint16_t *tmp16 = (uint16_t *) data;
+		if (bg_conf->layout_mode == LAYOUT_STATIC)
+			*tmp16 = 1;
+		else
+			*tmp16 = 0;
+	}
+
 	return SLURM_SUCCESS;
 }
 
 extern int select_p_update_node_state (int index, uint16_t state)
 {
-	int x;
-#ifdef HAVE_BG
-	int y, z;
+	int x, y, z;
 	
 	for (y = DIM_SIZE[Y] - 1; y >= 0; y--) {
 		for (z = 0; z < DIM_SIZE[Z]; z++) {
@@ -799,14 +755,7 @@ extern int select_p_update_node_state (int index, uint16_t state)
 			}
 		}
 	}
-#else
-	for (x = 0; x < DIM_SIZE[X]; x++) {
-		if (ba_system_ptr->grid[x].index == index) {
-			ba_update_node_state(&ba_system_ptr->grid[x], state);
-			return SLURM_SUCCESS;
-		}
-	}
-#endif
+
 	return SLURM_ERROR;
 }
 
@@ -817,22 +766,22 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 	int i;
 	uint16_t req_geometry[BA_SYSTEM_DIMENSIONS];
 
-	if(!bluegene_bp_node_cnt) {
+	if(!bg_conf->bp_node_cnt) {
 		fatal("select_g_alter_node_cnt: This can't be called "
-		      "before select_g_block_init");
+		      "before init");
 	}
 
 	switch (type) {
 	case SELECT_GET_NODE_SCALING:
 		if((*nodes) != INFINITE)
-			(*nodes) = bluegene_bp_node_cnt;
+			(*nodes) = bg_conf->bp_node_cnt;
 		break;
 	case SELECT_SET_BP_CNT:
 		if(((*nodes) == INFINITE) || ((*nodes) == NO_VAL))
 			tmp = (*nodes);
-		else if((*nodes) > bluegene_bp_node_cnt) {
+		else if((*nodes) > bg_conf->bp_node_cnt) {
 			tmp = (*nodes);
-			tmp /= bluegene_bp_node_cnt;
+			tmp /= bg_conf->bp_node_cnt;
 			if(tmp < 1) 
 				tmp = 1;
 		} else 
@@ -846,11 +795,11 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 			 * don't scale up this value. */
 			break;
 		}
-		(*nodes) *= bluegene_bp_node_cnt;
+		(*nodes) *= bg_conf->bp_node_cnt;
 		break;
 	case SELECT_APPLY_NODE_MAX_OFFSET:
 		if((*nodes) != INFINITE)
-			(*nodes) *= bluegene_bp_node_cnt;
+			(*nodes) *= bg_conf->bp_node_cnt;
 		break;
 	case SELECT_SET_NODE_CNT:
 		select_g_get_jobinfo(job_desc->select_jobinfo,
@@ -877,29 +826,30 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 			for (i=0; i<BA_SYSTEM_DIMENSIONS; i++)
 				job_desc->min_nodes *= 
 					(uint16_t)req_geometry[i];
-			job_desc->min_nodes *= bluegene_bp_node_cnt;
+			job_desc->min_nodes *= bg_conf->bp_node_cnt;
 			job_desc->max_nodes = job_desc->min_nodes;
 		}
 
 		if(job_desc->num_procs != NO_VAL) {
+			job_desc->num_procs /= bg_conf->proc_ratio;
 			if(job_desc->min_nodes < job_desc->num_procs)
 				job_desc->min_nodes = job_desc->num_procs;
 			if(job_desc->max_nodes < job_desc->num_procs)
 				job_desc->max_nodes = job_desc->num_procs;
 		}
 		/* See if min_nodes is greater than one base partition */
-		if(job_desc->min_nodes > bluegene_bp_node_cnt) {
+		if(job_desc->min_nodes > bg_conf->bp_node_cnt) {
 			/*
 			 * if it is make sure it is a factor of 
-			 * bluegene_bp_node_cnt, if it isn't make it 
+			 * bg_conf->bp_node_cnt, if it isn't make it 
 			 * that way 
 			 */
-			tmp = job_desc->min_nodes % bluegene_bp_node_cnt;
+			tmp = job_desc->min_nodes % bg_conf->bp_node_cnt;
 			if(tmp > 0)
 				job_desc->min_nodes += 
-					(bluegene_bp_node_cnt-tmp);
+					(bg_conf->bp_node_cnt-tmp);
 		}				
-		tmp = job_desc->min_nodes / bluegene_bp_node_cnt;
+		tmp = job_desc->min_nodes / bg_conf->bp_node_cnt;
 		
 		/* this means it is greater or equal to one bp */
 		if(tmp > 0) {
@@ -907,32 +857,32 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 					     SELECT_DATA_NODE_CNT,
 					     &job_desc->min_nodes);
 			job_desc->min_nodes = tmp;
-			job_desc->num_procs = procs_per_node * tmp;
+			job_desc->num_procs = bg_conf->procs_per_bp * tmp;
 		} else { 
 #ifdef HAVE_BGL
-			if(job_desc->min_nodes <= bluegene_nodecard_node_cnt
-			   && bluegene_nodecard_ionode_cnt)
+			if(job_desc->min_nodes <= bg_conf->nodecard_node_cnt
+			   && bg_conf->nodecard_ionode_cnt)
 				job_desc->min_nodes = 
-					bluegene_nodecard_node_cnt;
+					bg_conf->nodecard_node_cnt;
 			else if(job_desc->min_nodes 
-				<= bluegene_quarter_node_cnt)
+				<= bg_conf->quarter_node_cnt)
 				job_desc->min_nodes = 
-					bluegene_quarter_node_cnt;
+					bg_conf->quarter_node_cnt;
 			else 
 				job_desc->min_nodes = 
-					bluegene_bp_node_cnt;
+					bg_conf->bp_node_cnt;
 			
 			select_g_set_jobinfo(job_desc->select_jobinfo,
 					     SELECT_DATA_NODE_CNT,
 					     &job_desc->min_nodes);
 
-			tmp = bluegene_bp_node_cnt/job_desc->min_nodes;
+			tmp = bg_conf->bp_node_cnt/job_desc->min_nodes;
 			
-			job_desc->num_procs = procs_per_node/tmp;
+			job_desc->num_procs = bg_conf->procs_per_bp/tmp;
 			job_desc->min_nodes = 1;
 #else
-			i = bluegene_smallest_block;
-			while(i <= bluegene_bp_node_cnt) {
+			i = bg_conf->smallest_block;
+			while(i <= bg_conf->bp_node_cnt) {
 				if(job_desc->min_nodes <= i) {
 					job_desc->min_nodes = i;
 					break;
@@ -945,7 +895,7 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 					     &job_desc->min_nodes);
 
 			job_desc->num_procs = job_desc->min_nodes 
-				* bluegene_proc_ratio;
+				* bg_conf->proc_ratio;
 			job_desc->min_nodes = 1;
 #endif
 		}
@@ -953,40 +903,40 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 		if(job_desc->max_nodes == (uint32_t) NO_VAL) 
 			return SLURM_SUCCESS;
 		
-		if(job_desc->max_nodes > bluegene_bp_node_cnt) {
-			tmp = job_desc->max_nodes % bluegene_bp_node_cnt;
+		if(job_desc->max_nodes > bg_conf->bp_node_cnt) {
+			tmp = job_desc->max_nodes % bg_conf->bp_node_cnt;
 			if(tmp > 0)
 				job_desc->max_nodes += 
-					(bluegene_bp_node_cnt-tmp);
+					(bg_conf->bp_node_cnt-tmp);
 		}
-		tmp = job_desc->max_nodes / bluegene_bp_node_cnt;
+		tmp = job_desc->max_nodes / bg_conf->bp_node_cnt;
 		if(tmp > 0) {
 			job_desc->max_nodes = tmp;
 			tmp = NO_VAL;
 		} else {
 #ifdef HAVE_BGL
-			if(job_desc->max_nodes <= bluegene_nodecard_node_cnt
-			   && bluegene_nodecard_ionode_cnt)
+			if(job_desc->max_nodes <= bg_conf->nodecard_node_cnt
+			   && bg_conf->nodecard_ionode_cnt)
 				job_desc->max_nodes = 
-					bluegene_nodecard_node_cnt;
+					bg_conf->nodecard_node_cnt;
 			else if(job_desc->max_nodes 
-				<= bluegene_quarter_node_cnt)
+				<= bg_conf->quarter_node_cnt)
 				job_desc->max_nodes = 
-					bluegene_quarter_node_cnt;
+					bg_conf->quarter_node_cnt;
 			else 
 				job_desc->max_nodes = 
-					bluegene_bp_node_cnt;
+					bg_conf->bp_node_cnt;
 		
-			tmp = bluegene_bp_node_cnt/job_desc->max_nodes;
-			tmp = procs_per_node/tmp;
+			tmp = bg_conf->bp_node_cnt/job_desc->max_nodes;
+			tmp = bg_conf->procs_per_bp/tmp;
 			
 			select_g_set_jobinfo(job_desc->select_jobinfo,
 					     SELECT_DATA_MAX_PROCS, 
 					     &tmp);
 			job_desc->max_nodes = 1;
 #else
-			i = bluegene_smallest_block;
-			while(i <= bluegene_bp_node_cnt) {
+			i = bg_conf->smallest_block;
+			while(i <= bg_conf->bp_node_cnt) {
 				if(job_desc->max_nodes <= i) {
 					job_desc->max_nodes = i;
 					break;
@@ -994,7 +944,7 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 				i *= 2;
 			}
 			
-			tmp = job_desc->max_nodes * bluegene_proc_ratio;
+			tmp = job_desc->max_nodes * bg_conf->proc_ratio;
 			select_g_set_jobinfo(job_desc->select_jobinfo,
 					     SELECT_DATA_MAX_PROCS,
 					     &tmp);
@@ -1014,15 +964,130 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 
 extern int select_p_reconfigure(void)
 {
-	return SLURM_SUCCESS;
-}
+	slurm_conf_lock();
+	if(!slurmctld_conf.slurm_user_name 
+	   || strcmp(bg_conf->slurm_user_name, slurmctld_conf.slurm_user_name))
+		error("The slurm user has changed from '%s' to '%s'.  "
+		      "If this is really what you "
+		      "want you will need to restart slurm for this "
+		      "change to be enforced in the bluegene plugin.",
+		      bg_conf->slurm_user_name, slurmctld_conf.slurm_user_name);
+	if(!slurmctld_conf.node_prefix
+	   || strcmp(bg_conf->slurm_node_prefix, slurmctld_conf.node_prefix))
+		error("Node Prefix has changed from '%s' to '%s'.  "
+		      "If this is really what you "
+		      "want you will need to restart slurm for this "
+		      "change to be enforced in the bluegene plugin.",
+		      bg_conf->slurm_node_prefix, slurmctld_conf.node_prefix);
+	slurm_conf_unlock();	
 
-extern int select_p_step_begin(struct step_record *step_ptr)
-{
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_step_fini(struct step_record *step_ptr)
+extern List select_p_get_config(void)
 {
-	return SLURM_SUCCESS;
+	config_key_pair_t *key_pair;
+	List my_list = list_create(destroy_config_key_pair);
+
+	if (!my_list)
+		fatal("malloc failure on list_create");
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("BasePartitionNodeCnt");
+	key_pair->value = xstrdup_printf("%u", bg_conf->bp_node_cnt);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("NodeCPUCnt");
+	key_pair->value = xstrdup_printf("%u", bg_conf->proc_ratio);
+	list_append(my_list, key_pair);
+
+
+#ifdef HAVE_BGL
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("BlrtsImage");
+	key_pair->value = xstrdup(bg_conf->default_blrtsimage);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("LinuxImage");
+	key_pair->value = xstrdup(bg_conf->default_linuximage);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("RamDiskImage");
+	key_pair->value = xstrdup(bg_conf->default_ramdiskimage);
+	list_append(my_list, key_pair);
+#else
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("CnloadImage");
+	key_pair->value = xstrdup(bg_conf->default_linuximage);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("IoloadImage");
+	key_pair->value = xstrdup(bg_conf->default_ramdiskimage);
+	list_append(my_list, key_pair);
+#endif
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("BridgeAPILogFile");
+	key_pair->value = xstrdup(bg_conf->bridge_api_file);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("BridgeAPIVerbose");
+	key_pair->value = xstrdup_printf("%u", bg_conf->bridge_api_verb);
+	list_append(my_list, key_pair);
+
+	if(bg_conf->deny_pass) {
+		key_pair = xmalloc(sizeof(config_key_pair_t));
+		key_pair->name = xstrdup("DenyPassThrough");
+		if(bg_conf->deny_pass & PASS_DENY_X)
+			xstrcat(key_pair->value, "X,");
+		if(bg_conf->deny_pass & PASS_DENY_Y)
+			xstrcat(key_pair->value, "Y,");
+		if(bg_conf->deny_pass & PASS_DENY_Z)
+			xstrcat(key_pair->value, "Z,");
+		if(key_pair->value)
+			key_pair->value[strlen(key_pair->value)-1] = '\0';
+		list_append(my_list, key_pair);
+	}
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("LayoutMode");
+	switch(bg_conf->layout_mode) {
+	case LAYOUT_STATIC:
+		key_pair->value = xstrdup("Static");
+		break;
+	case LAYOUT_OVERLAP:
+		key_pair->value = xstrdup("Overlap");
+		break;
+	case LAYOUT_DYNAMIC:
+		key_pair->value = xstrdup("Dynamic");
+		break;
+	default:
+		key_pair->value = xstrdup("Unknown");
+		break;
+	}
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("MloaderImage");
+	key_pair->value = xstrdup(bg_conf->default_mloaderimage);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("NodeCardNodeCnt");
+	key_pair->value = xstrdup_printf("%u", bg_conf->nodecard_node_cnt);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("Numpsets");
+	key_pair->value = xstrdup_printf("%u", bg_conf->numpsets);
+	list_append(my_list, key_pair);
+
+	list_sort(my_list, (ListCmpF) sort_key_pairs);
+
+	return my_list;
 }
diff --git a/src/plugins/select/bluegene/plugin/sfree.c b/src/plugins/select/bluegene/plugin/sfree.c
index 36db832cf92b8f1b3b5bf836274250a5c80890a7..5c043f7525b617097fad0a60cfab1f40848f852b 100644
--- a/src/plugins/select/bluegene/plugin/sfree.c
+++ b/src/plugins/select/bluegene/plugin/sfree.c
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  sfree.c - free specified block or all blocks.
- *  $Id: sfree.c 16357 2009-01-30 18:05:07Z da $
+ *  $Id: sfree.c 17366 2009-04-28 23:04:14Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -39,7 +40,7 @@
 
 #include "sfree.h"
 
-#define MAX_POLL_RETRIES    110
+#define MAX_POLL_RETRIES    220
 #define POLL_INTERVAL        3
 #define MAX_PTHREAD_RETRIES  1
 
@@ -292,10 +293,17 @@ static int _free_block(delete_record_t *delete_record)
 				if(rc == PARTITION_NOT_FOUND) {
 					info("block %s is not found");
 					break;
+				} else if(rc == INCOMPATIBLE_STATE) {
+					debug2("bridge_destroy_partition"
+					       "(%s): %s State = %d",
+					       delete_record->bg_block_id, 
+					       _bg_err_str(rc), 
+					       delete_record->state);
+				} else {
+					error("bridge_destroy_block(%s): %s",
+					      delete_record->bg_block_id,
+					      _bg_err_str(rc));
 				}
-				error("bridge_destroy_block(%s): %s",
-				      delete_record->bg_block_id,
-				      _bg_err_str(rc));
 			}
 #else
 			bg_record->state = RM_PARTITION_FREE;	
@@ -519,14 +527,15 @@ static char *_bg_err_str(status_t inx)
 /* Kill a job and remove its record from MMCS */
 static int _remove_job(db_job_id_t job_id)
 {
-	int i, rc;
+	int rc, count = 0;
 	rm_job_t *job_rec = NULL;
 	rm_job_state_t job_state;
 
 	info("removing job %d from MMCS", job_id);
-	for (i=0; i<MAX_POLL_RETRIES; i++) {
-		if (i > 0)
+	while(1) {
+		if (count)
 			sleep(POLL_INTERVAL);
+		count++;
 
 		/* Find the job */
 		if ((rc = bridge_get_job(job_id, &job_rec)) != STATUS_OK) {
@@ -560,17 +569,30 @@ static int _remove_job(db_job_id_t job_id)
 		/* check the state and process accordingly */
 		if(job_state == RM_JOB_TERMINATED)
 			return STATUS_OK;
-		else if(job_state == RM_JOB_DYING)
+		else if(job_state == RM_JOB_DYING) {
+			if(count > MAX_POLL_RETRIES) 
+				error("Job %d isn't dying, trying for "
+				      "%d seconds", count*POLL_INTERVAL);
 			continue;
-		else if(job_state == RM_JOB_ERROR) {
+		} else if(job_state == RM_JOB_ERROR) {
 			error("job %d is in a error state.", job_id);
 			
 			//free_bg_block();
 			return STATUS_OK;
 		}
 
-		(void) bridge_signal_job(job_id, SIGKILL);
-		rc = bridge_cancel_job(job_id);
+		/* we have been told the next 2 lines do the same
+		 * thing, but I don't believe it to be true.  In most
+		 * cases when you do a signal of SIGTERM the mpirun
+		 * process gets killed with a SIGTERM.  In the case of
+		 * bridge_cancel_job it always gets killed with a
+		 * SIGKILL.  From IBM's point of view that is a bad
+		 * deally, so we are going to use signal ;).
+		 */
+
+//		 rc = bridge_cancel_job(job_id);
+		 rc = bridge_signal_job(job_id, SIGTERM);
+
 		if (rc != STATUS_OK) {
 			if (rc == JOB_NOT_FOUND) {
 				debug("job %d removed from MMCS", job_id);
diff --git a/src/plugins/select/bluegene/plugin/sfree.h b/src/plugins/select/bluegene/plugin/sfree.h
index f8bad5a1c81e46a27c938bb4ade9c1f4ca42a89e..7bbfdcc73e62d4d4f824c0b56bb866373f620a9e 100644
--- a/src/plugins/select/bluegene/plugin/sfree.h
+++ b/src/plugins/select/bluegene/plugin/sfree.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/bluegene/plugin/slurm_epilog.c b/src/plugins/select/bluegene/plugin/slurm_epilog.c
index a281e5e73c6bf0ce44dbad09f261fe7b4aa58e2b..68d8466dfd92127d84b42a85feaa26dbc916f816 100644
--- a/src/plugins/select/bluegene/plugin/slurm_epilog.c
+++ b/src/plugins/select/bluegene/plugin/slurm_epilog.c
@@ -3,15 +3,16 @@
  *      owned by this user. This is executed via SLURM to synchronize the 
  *      user's job execution with slurmctld configuration of partitions.
  *
- * $Id: slurm_epilog.c 13672 2008-03-19 23:10:58Z jette $
+ * $Id: slurm_epilog.c 17313 2009-04-21 20:28:06Z lipari $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -78,15 +79,15 @@ int main(int argc, char *argv[])
 	char *job_id_char = NULL;
 	uint32_t job_id;
 
-	job_id_char = getenv("SLURM_JOBID");		/* get SLURM job ID */
+	job_id_char = getenv("SLURM_JOB_ID");		/* get SLURM job ID */
 	if (!job_id_char) {
-		fprintf(stderr, "SLURM_JOBID not set\n");
+		fprintf(stderr, "SLURM_JOB_ID not set\n");
 		exit(0);
 	}
 
 	job_id = (uint32_t) atol(job_id_char);
 	if (job_id == 0) {
-		fprintf(stderr, "SLURM_JOBID invalid: %s\n", job_id_char);
+		fprintf(stderr, "SLURM_JOB_ID invalid: %s\n", job_id_char);
 		exit(0);
 	}
 
diff --git a/src/plugins/select/bluegene/plugin/slurm_prolog.c b/src/plugins/select/bluegene/plugin/slurm_prolog.c
index 07f27e96e0c1ff08fe5fdc7d853602c96a87a97d..ef33f4bc7d9082f74b2087b6454b5e24c2e1a76f 100644
--- a/src/plugins/select/bluegene/plugin/slurm_prolog.c
+++ b/src/plugins/select/bluegene/plugin/slurm_prolog.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -79,15 +80,15 @@ int main(int argc, char *argv[])
 	char *job_id_char = NULL;
 	uint32_t job_id;
 
-	job_id_char = getenv("SLURM_JOBID");		/* get SLURM job ID */
+	job_id_char = getenv("SLURM_JOB_ID");		/* get SLURM job ID */
 	if (!job_id_char) {
-		fprintf(stderr, "SLURM_JOBID not set\n");
+		fprintf(stderr, "SLURM_JOB_ID not set\n");
 		exit(1);				/* abort job */
 	}
 
 	job_id = (uint32_t) atol(job_id_char);
 	if (job_id == 0) {
-		fprintf(stderr, "SLURM_JOBID invalid: %s\n", job_id_char);
+		fprintf(stderr, "SLURM_JOB_ID invalid: %s\n", job_id_char);
 		exit(1);				/* abort job */
 	}
 
diff --git a/src/plugins/select/bluegene/plugin/state_test.c b/src/plugins/select/bluegene/plugin/state_test.c
index 0d32e0e2ca52533656e708fbe70a5b6890681942..2c636d91f2bd0d29fb6377ce42da24651fc9aa94 100644
--- a/src/plugins/select/bluegene/plugin/state_test.c
+++ b/src/plugins/select/bluegene/plugin/state_test.c
@@ -2,14 +2,15 @@
  *  state_test.c - Test state of Bluegene base partitions and switches. 
  *  DRAIN nodes in SLURM that are not usable. 
  *
- *  $Id: state_test.c 17202 2009-04-09 16:56:23Z da $
+ *  $Id: state_test.c 17317 2009-04-21 21:39:50Z da $
  *****************************************************************************
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dan Phung <phung4@llnl.gov> and Morris Jette <jette1@llnl.gov>
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -121,7 +122,7 @@ static void _configure_node_down(rm_bp_id_t bp_id, my_bluegene_t *my_bg)
 		}
 		
 		snprintf(bg_down_node, sizeof(bg_down_node), "%s%c%c%c", 
-			 bg_slurm_node_prefix,
+			 bg_conf->slurm_node_prefix,
 			 alpha_num[bp_loc.X], alpha_num[bp_loc.Y],
 			 alpha_num[bp_loc.Z]);
 		
@@ -157,7 +158,7 @@ static int _test_down_nodecards(rm_BP_t *bp_ptr)
 	//int io_cnt = 1;
 
 	/* Translate 1 nodecard count to ionode count */
-/* 	if((io_cnt *= bluegene_io_ratio)) */
+/* 	if((io_cnt *= bg_conf->io_ratio)) */
 /* 		io_cnt--; */
 
 	if ((rc = bridge_get_data(bp_ptr, RM_BPID, &bp_id))
@@ -182,9 +183,18 @@ static int _test_down_nodecards(rm_BP_t *bp_ptr)
 		rc = SLURM_ERROR;
 		goto clean_up;
 	}
-	
+
+	/* make sure we have this midplane in the system */
+	if(coord[X] >= DIM_SIZE[X]
+	   || coord[Y] >= DIM_SIZE[Y]
+	   || coord[Z] >= DIM_SIZE[Z]) {
+		debug4("node %s isn't configured", bp_id);
+		rc = SLURM_SUCCESS;
+		goto clean_up;
+	}
+
 	node_name = xstrdup_printf("%s%c%c%c",
-				   bg_slurm_node_prefix,
+				   bg_conf->slurm_node_prefix,
 				   alpha_num[coord[X]], 
 				   alpha_num[coord[Y]],
 				   alpha_num[coord[Z]]);
@@ -253,20 +263,37 @@ static int _test_down_nodecards(rm_BP_t *bp_ptr)
 			error("bridge_get_data(CardQuarter): %d",rc);
 			goto clean_up;
 		}
-		io_start *= bluegene_quarter_ionode_cnt;
-		io_start += bluegene_nodecard_ionode_cnt * (i%4);
+		io_start *= bg_conf->quarter_ionode_cnt;
+		io_start += bg_conf->nodecard_ionode_cnt * (i%4);
 #else
 		/* From the first nodecard id we can figure
 		   out where to start from with the alloc of ionodes.
 		*/
 		io_start = atoi((char*)nc_name+1);
-		io_start *= bluegene_io_ratio;
+		io_start *= bg_conf->io_ratio;
 #endif
-
+		/* On small systems with less than a midplane the
+		   database may see the nodecards there but in missing
+		   state.  To avoid getting a bunch of warnings here just
+		   skip over the ones missing.
+		*/
+		if(io_start >= bg_conf->numpsets) {
+			if(state == RM_NODECARD_MISSING) {
+				debug3("Nodecard %s is missing continue",
+				       nc_name);
+			} else {
+				error("We don't have the system configured "
+				      "for this nodecard %s, we only have "
+				      "%d ionodes and this starts at %d", 
+				      nc_name, io_start, bg_conf->numpsets);
+			}
+			free(nc_name);
+			continue;
+		}
 /* 		if(!ionode_bitmap)  */
-/* 			ionode_bitmap = bit_alloc(bluegene_numpsets); */
-/* 		info("setting %d-%d of %d", */
-/* 		     io_start, io_start+io_cnt, bluegene_numpsets); */
+/* 			ionode_bitmap = bit_alloc(bg_conf->numpsets); */
+/* 		info("setting %s start %d of %d", */
+/* 		     nc_name,  io_start, bg_conf->numpsets); */
 /* 		bit_nset(ionode_bitmap, io_start, io_start+io_cnt); */
 		/* we have to handle each nodecard separately to make
 		   sure we don't create holes in the system */
@@ -294,7 +321,7 @@ static int _test_down_nodecards(rm_BP_t *bp_ptr)
 /* 		info("no ionode_bitmap"); */
 /* 		ListIterator itr = NULL; */
 /* 		slurm_mutex_lock(&block_state_mutex); */
-/* 		itr = list_iterator_create(bg_list); */
+/* 		itr = list_iterator_create(bg_lists->main); */
 /* 		while ((bg_record = list_next(itr))) { */
 /* 			if(bg_record->job_running != BLOCK_ERROR_STATE) */
 /* 				continue; */
diff --git a/src/plugins/select/bluegene/plugin/state_test.h b/src/plugins/select/bluegene/plugin/state_test.h
index b6a96d99f2207c163d10b8278a9b2d019d672eea..4134be4e112262fdc52553bdddc5ec7dc5299209 100644
--- a/src/plugins/select/bluegene/plugin/state_test.h
+++ b/src/plugins/select/bluegene/plugin/state_test.h
@@ -1,13 +1,14 @@
 /*****************************************************************************\
  *  state_test.h - header for Blue Gene node and switch state test. 
- *  $Id: state_test.h 17102 2009-03-31 23:23:01Z da $
+ *  $Id: state_test.h 17121 2009-04-02 18:55:59Z da $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dan Phung <phung4@llnl.gov> et. al.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/select/cons_res/Makefile.am b/src/plugins/select/cons_res/Makefile.am
index fc88fa6fe049d43ca91df6b57d25739de492f960..631d2dc979474584b014217ac3c31ef089236322 100644
--- a/src/plugins/select/cons_res/Makefile.am
+++ b/src/plugins/select/cons_res/Makefile.am
@@ -10,5 +10,6 @@ pkglib_LTLIBRARIES = select_cons_res.la
 
 # Consumable resources node selection plugin.
 select_cons_res_la_SOURCES =  select_cons_res.c select_cons_res.h \
-                              dist_tasks.c dist_tasks.h
+                              dist_tasks.c dist_tasks.h \
+			      job_test.c job_test.h
 select_cons_res_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/plugins/select/cons_res/Makefile.in b/src/plugins/select/cons_res/Makefile.in
index 4370a64e6b18b8211e7cc29e8b070a4f5a76ec0b..da73591e014135491c87991bdef4c9b1f7078b76 100644
--- a/src/plugins/select/cons_res/Makefile.in
+++ b/src/plugins/select/cons_res/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,7 +81,8 @@ am__installdirs = "$(DESTDIR)$(pkglibdir)"
 pkglibLTLIBRARIES_INSTALL = $(INSTALL)
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
 select_cons_res_la_LIBADD =
-am_select_cons_res_la_OBJECTS = select_cons_res.lo dist_tasks.lo
+am_select_cons_res_la_OBJECTS = select_cons_res.lo dist_tasks.lo \
+	job_test.lo
 select_cons_res_la_OBJECTS = $(am_select_cons_res_la_OBJECTS)
 select_cons_res_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
 	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
@@ -109,6 +114,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -275,7 +284,8 @@ pkglib_LTLIBRARIES = select_cons_res.la
 
 # Consumable resources node selection plugin.
 select_cons_res_la_SOURCES = select_cons_res.c select_cons_res.h \
-                              dist_tasks.c dist_tasks.h
+                              dist_tasks.c dist_tasks.h \
+			      job_test.c job_test.h
 
 select_cons_res_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 all: all-am
@@ -348,6 +358,7 @@ distclean-compile:
 	-rm -f *.tab.c
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/dist_tasks.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_test.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/select_cons_res.Plo@am__quote@
 
 .c.o:
diff --git a/src/plugins/select/cons_res/dist_tasks.c b/src/plugins/select/cons_res/dist_tasks.c
index deafd2fd3e839b61428ce55628219a5546084b77..2036353a8ea1b7bec23dcceaa70e26e8de7fefcc 100644
--- a/src/plugins/select/cons_res/dist_tasks.c
+++ b/src/plugins/select/cons_res/dist_tasks.c
@@ -1,15 +1,14 @@
 /*****************************************************************************\
  *  dist_tasks - Assign task count to {socket,core,thread} or CPU
  *               resources
- *
- *  $Id: dist_tasks.c,v 1.3 2006/10/31 19:31:31 palermo Exp $
  ***************************************************************************** 
- *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
+ *  Copyright (C) 2006-2008 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -40,10 +39,19 @@
 #include "select_cons_res.h"
 #include "dist_tasks.h"
 
-#if (0)
+#if(0)
 #define CR_DEBUG 1
 #endif
 
+#if(0)
+/* Using CR_SOCKET or CR_SOCKET_MEMORY will not allocate a socket to more
+ * than one job at a time, but it also will not grant a job access to more
+ * CPUs on the socket than requested. If ALLOCATE_FULL_SOCKET is defined,
+ * then a job will be given access to every cores on each allocated socket.
+ */
+#define ALLOCATE_FULL_SOCKET 1
+#endif
+
 /* _compute_task_c_b_task_dist - compute the number of tasks on each
  * of the node for the cyclic and block distribution. We need to do
  * this in the case of consumable resources so that we have an exact
@@ -57,313 +65,278 @@
  * resources.
  *
  * IN/OUT job_ptr - pointer to job being scheduled. The per-node
- *                  job->alloc_cpus array is computed here.
+ *                  job_res->cpus array is recomputed here.
  *
  */
-int compute_c_b_task_dist(struct select_cr_job *job)
+static int _compute_c_b_task_dist(struct job_record *job_ptr)
 {
-	int i, j, rc = SLURM_SUCCESS;
-	bool over_commit = false;
 	bool over_subscribe = false;
-	uint32_t taskid = 0, last_taskid, maxtasks = job->nprocs;
+	uint32_t n, i, tid, maxtasks;
+	uint16_t *avail_cpus;
+	select_job_res_t job_res = job_ptr->select_job;
+	if (!job_res || !job_res->cpus) {
+		error("cons_res: _compute_c_b_task_dist given NULL job_ptr");
+		return SLURM_ERROR;
+	}
 
-	if (job->job_ptr->details && job->job_ptr->details->overcommit)
-		over_commit = true;
+	maxtasks = job_res->nprocs;
+	avail_cpus = job_res->cpus;
+	job_res->cpus = xmalloc(job_res->nhosts * sizeof(uint16_t));
 
-	for (j = 0; (taskid < maxtasks); j++) {	/* cycle counter */
+	for (tid = 0, i = 0; (tid < maxtasks); i++) { /* cycle counter */
 		bool space_remaining = false;
-		last_taskid = taskid;
-		for (i = 0; ((i < job->nhosts) && (taskid < maxtasks)); i++) {
-			if ((j < job->cpus[i]) || over_subscribe) {
-				taskid++;
-				if ((job->alloc_cpus[i] == 0) ||
-				    (!over_commit))
-					job->alloc_cpus[i]++;
-				if ((j + 1) < job->cpus[i])
+		if (over_subscribe) {
+			/* 'over_subscribe' is a relief valve that guards
+			 * against an infinite loop, and it *should* never
+			 * come into play because maxtasks should never be
+			 * greater than the total number of available cpus
+			 */
+			error("cons_res: _compute_c_b_task_dist oversubscribe");
+		}
+		for (n = 0; ((n < job_res->nhosts) && (tid < maxtasks)); n++) {
+			if ((i < avail_cpus[n]) || over_subscribe) {
+				tid++;
+				if (job_res->cpus[n] < avail_cpus[n])
+					job_res->cpus[n]++;
+				if ((i + 1) < avail_cpus[n])
 					space_remaining = true;
 			}
 		}
-		if (!space_remaining)
+		if (!space_remaining) {
 			over_subscribe = true;
-		if (last_taskid == taskid) {
-			/* avoid infinite loop */
-			error("compute_c_b_task_dist failure");
-			rc = SLURM_ERROR;
-			break;
 		}
 	}
-
-#if (CR_DEBUG)	
-	for (i = 0; i < job->nhosts; i++) {
-		info("cons_res _c_b_task_dist %u host_index %d nprocs %u "
-		     "maxtasks %u cpus %u alloc_cpus %u", 
-		     job->job_id, i, job->nprocs, 
-		     maxtasks, job->cpus[i], job->alloc_cpus[i]);
-	}
-#endif	
-
-	return rc;
+	xfree(avail_cpus);
+	return SLURM_SUCCESS;
 }
 
-/* scan all rows looking for the best fit, and return the offset */
-static int _find_offset(struct select_cr_job *job, const int job_index,
-			uint16_t cores, uint16_t sockets, uint32_t maxcores,
-			const select_type_plugin_info_t cr_type,
-			struct node_cr_record *this_cr_node)
-{
-	struct part_cr_record *p_ptr;
-	int i, j, index, offset, skip;
-	uint16_t acores, asockets, freecpus, last_freecpus = 0;
-	struct multi_core_data *mc_ptr;
 
-	p_ptr = get_cr_part_ptr(this_cr_node, job->job_ptr->part_ptr);
-	if (p_ptr == NULL)
-		abort();
-	mc_ptr = job->job_ptr->details->mc_ptr;
+/* distribute blocks (planes) of tasks cyclically */
+static int _compute_plane_dist(struct job_record *job_ptr)
+{
+	bool over_subscribe = false;
+	uint32_t n, i, p, tid, maxtasks;
+	uint16_t *avail_cpus, plane_size = 1;
+	select_job_res_t job_res = job_ptr->select_job;
+	if (!job_res || !job_res->cpus) {
+		error("cons_res: _compute_plane_dist given NULL job_res");
+		return SLURM_ERROR;
+	}
 
-	index = -1;
-	for (i = 0; i < p_ptr->num_rows; i++) {
-		acores = 0;
-		asockets = 0;
-		skip = 0;
-		offset = i * this_cr_node->sockets;
-		for (j = 0; j < this_cr_node->sockets; j++) {
-			if ((cores - p_ptr->alloc_cores[offset+j]) <
-							mc_ptr->min_cores) {
-				/* count the number of unusable sockets */
-				skip++;
-				acores += cores;
-			} else { 
-				acores += p_ptr->alloc_cores[offset+j];
-			}
-			if (p_ptr->alloc_cores[offset+j])
-				asockets++;
-		}
-		/* make sure we have the required number of usable sockets */
-		if (skip && ((sockets - skip) < mc_ptr->min_sockets))
-			continue;
-		/* CR_SOCKET needs UNALLOCATED sockets */
-		if ((cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) {
-			if (sockets - asockets < mc_ptr->min_sockets)
-				continue;
-		}
+	maxtasks = job_res->nprocs;
+	avail_cpus = job_res->cpus;
+	
+	if (job_ptr->details && job_ptr->details->mc_ptr)
+		plane_size = job_ptr->details->mc_ptr->plane_size;
 
-		freecpus = (cores * sockets) - acores;
-		if (freecpus < maxcores)
-			continue;
+	if (plane_size <= 0) {
+		error("cons_res: _compute_plane_dist received invalid plane_size");
+		return SLURM_ERROR;
+	}
+	job_res->cpus = xmalloc(job_res->nhosts * sizeof(uint16_t));
 
-		if (index < 0) {
-			index = i;
-			last_freecpus = freecpus;
+	for (tid = 0, i = 0; (tid < maxtasks); i++) { /* cycle counter */
+		bool space_remaining = false;
+		if (over_subscribe) {
+			/* 'over_subscribe' is a relief valve that guards
+			 * against an infinite loop, and it *should* never
+			 * come into play because maxtasks should never be
+			 * greater than the total number of available cpus
+			 */
+			error("cons_res: _compute_plane_dist oversubscribe");
 		}
-		if (freecpus < last_freecpus) {
-			index = i;
-			last_freecpus = freecpus;
+		for (n = 0; ((n < job_res->nhosts) && (tid < maxtasks)); n++) {
+			for (p = 0; p < plane_size && (tid < maxtasks); p++) {
+				if ((job_res->cpus[n] < avail_cpus[n]) ||
+				    over_subscribe) {
+					tid++;
+					if (job_res->cpus[n] < avail_cpus[n])
+						job_res->cpus[n]++;
+				}
+			}
+			if (job_res->cpus[n] < avail_cpus[n])
+				space_remaining = true;
+		}
+		if (!space_remaining) {
+			over_subscribe = true;
 		}
 	}
-	if (index < 0) {
-		/* This may happen if a node has fewer nodes than
-		 * configured and FastSchedule=2 */
-		error("job_assign_task: failure in computing offset");
-		index = 0;
-	}
-
-	return index * this_cr_node->sockets;
+	xfree(avail_cpus);
+	return SLURM_SUCCESS;
 }
 
-/*  _job_assign_tasks: Assign tasks to hardware for block and cyclic
- *  distributions */
-static int _job_assign_tasks(struct select_cr_job *job, 
-			struct node_cr_record *this_cr_node,
-			const int job_index, 
-			const select_type_plugin_info_t cr_type,
-			const int cyclic) 
+/* sync up core bitmap with new CPU count
+ *
+ * The CPU array contains the distribution of CPUs, which can include
+ * virtual CPUs (hyperthreads)
+ */
+static void _block_sync_core_bitmap(struct job_record *job_ptr,
+				    const select_type_plugin_info_t cr_type)
 {
-	int i, j, rc = SLURM_SUCCESS;
-	uint16_t cores, cpus, sockets, threads;
-	uint16_t usable_cores, usable_sockets, usable_threads;
-	uint16_t *avail_cores = NULL;
-	uint32_t corecount, last_corecount;
-	uint16_t asockets, offset, total;
-	uint32_t maxcores, reqcores, maxtasks = job->alloc_cpus[job_index];
-	struct part_cr_record *p_ptr;
-	struct multi_core_data *mc_ptr;
-	
-	p_ptr = get_cr_part_ptr(this_cr_node, job->job_ptr->part_ptr);
-	if (p_ptr == NULL)
-		return SLURM_ERROR;
+	uint32_t c, i, n, size, csize;
+	uint16_t cpus, num_bits, vpus = 1;
+	select_job_res_t job_res = job_ptr->select_job;
+	bool alloc_sockets = false;
 
-	if ((job->job_ptr == NULL) || (job->job_ptr->details == NULL)) {
-		/* This should never happen */
-		error("cons_res: job %u has no details", job->job_id);
-		return SLURM_ERROR;
-	}
-	if (!job->job_ptr->details->mc_ptr)
-		job->job_ptr->details->mc_ptr = create_default_mc();
-	mc_ptr = job->job_ptr->details->mc_ptr;
+	if (!job_res)
+		return;
 
-	/* get hardware info for this node */	
-	get_resources_this_node(&cpus,  &sockets, &cores, &threads, 
-				this_cr_node, job->job_id);
+#ifdef ALLOCATE_FULL_SOCKET
+	if ((cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY))
+		alloc_sockets = true;
+#endif
 
-	/* compute any job limits */	
-	usable_sockets = MIN(mc_ptr->max_sockets, sockets);
-	usable_cores   = MIN(mc_ptr->max_cores,   cores);
-	usable_threads = MIN(mc_ptr->max_threads, threads);
+	size  = bit_size(job_res->node_bitmap);
+	csize = bit_size(job_res->core_bitmap);
+	for (c = 0, i = 0, n = 0; n < size; n++) {
+		
+		if (bit_test(job_res->node_bitmap, n) == 0)
+			continue;
+		num_bits = select_node_record[n].sockets *
+				select_node_record[n].cores;
+		if ((c + num_bits) > csize)
+			fatal ("cons_res: _block_sync_core_bitmap index error");
+		
+		cpus  = job_res->cpus[i++];
+		if (job_ptr->details && job_ptr->details->mc_ptr) {
+			vpus  = MIN(job_ptr->details->mc_ptr->max_threads,
+				    select_node_record[n].vpus);
+		}
 
-	/* determine the number of required cores. When multiple threads
-	 * are available, the maxtasks value may not reflect the requested
-	 * core count, which is what we are seeking here. */
-	if (job->job_ptr->details->overcommit) {
-		maxcores = 1;
-		reqcores = 1;
-	} else {
-		maxcores = maxtasks / usable_threads;
-		while ((maxcores * usable_threads) < maxtasks)
-			maxcores++;
-		reqcores = mc_ptr->min_cores * mc_ptr->min_sockets;
-		if (maxcores < reqcores)
-			maxcores = reqcores;
+		while (cpus > 0 && num_bits > 0) {
+			if (bit_test(job_res->core_bitmap, c++)) {
+				if (cpus < vpus)
+					cpus = 0;
+				else
+					cpus -= vpus;
+			}
+			num_bits--;
+		}
+		if (cpus > 0)
+			/* cpu count should NEVER be greater than the number
+			 * of set bits in the core bitmap for a given node */
+			fatal("cons_res: cpus computation error");
+
+		if (alloc_sockets) {	/* Advance to end of socket */
+			while ((num_bits > 0) && 
+			       (c % select_node_record[n].cores)) {
+				c++;
+				num_bits--;
+			}
+		}
+		while (num_bits > 0) {
+			bit_clear(job_res->core_bitmap, c++);
+			num_bits--;
+		}
+		
 	}
+}
 
-	offset = _find_offset(job, job_index, cores, sockets, maxcores, cr_type,
-			      this_cr_node);
-	job->node_offset[job_index] = offset;
 
-	debug3("job_assign_task %u s_ min %u u %u c_ min %u u %u"
-	       " t_ min %u u %u task %u core %u offset %u", 
-	       job->job_id, mc_ptr->min_sockets, usable_sockets, 
-	       mc_ptr->min_cores, usable_cores, mc_ptr->min_threads, 
-	       usable_threads, maxtasks, maxcores, offset);
+/* Sync up the core_bitmap with the CPU array using cyclic distribution
+ *
+ * The CPU array contains the distribution of CPUs, which can include
+ * virtual CPUs (hyperthreads)
+ */
+static void _cyclic_sync_core_bitmap(struct job_record *job_ptr,
+				     const select_type_plugin_info_t cr_type)
+{
+	uint32_t c, i, s, n, *sock_start, *sock_end, size, csize;
+	uint16_t cps = 0, cpus, vpus, sockets, sock_size;
+	select_job_res_t job_res = job_ptr->select_job;
+	bitstr_t *core_map;
+	bool *sock_used, alloc_sockets = false;
+
+	if ((job_res == NULL) || (job_res->core_bitmap == NULL))
+		return;
+
+#ifdef ALLOCATE_FULL_SOCKET
+	if ((cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY))
+		alloc_sockets = true;
+#endif
 
-	avail_cores = xmalloc(sizeof(uint16_t) * sockets);
-	/* initialized to zero by xmalloc */
+	core_map = job_res->core_bitmap;
 
-	total = 0;
-	asockets = 0;
-	for (i = 0; i < sockets; i++) {
-		if ((total >= maxcores) && (asockets >= mc_ptr->min_sockets)) {
-			break;
-		}
-		if (this_cr_node->cores <= p_ptr->alloc_cores[offset+i]) {
-			continue;
-		}
-		/* for CR_SOCKET, we only want to allocate empty sockets */
-		if ((cr_type == CR_SOCKET || cr_type == CR_SOCKET_MEMORY) &&
-		    (p_ptr->alloc_cores[offset+i] > 0))
-			continue;
-		avail_cores[i] = this_cr_node->cores - 
-				 p_ptr->alloc_cores[offset+i];
-		if (usable_cores <= avail_cores[i]) {
-			avail_cores[i] = usable_cores;
-		} else if (mc_ptr->min_cores > avail_cores[i]) {
-			avail_cores[i] = 0;
-		}
-		if (avail_cores[i] > 0) {
-			total += avail_cores[i];
-			asockets++;
-		}
-	}
+	sock_size  = select_node_record[0].sockets;
+	sock_start = xmalloc(sock_size * sizeof(uint32_t));
+	sock_end   = xmalloc(sock_size * sizeof(uint32_t));
+	sock_used  = xmalloc(sock_size * sizeof(bool));
 	
-#if(CR_DEBUG)
-    	for (i = 0; i < sockets; i+=2) {
-		info("cons_res: assign_task: avail_cores[%d]=%u, [%d]=%u", i,
-		     avail_cores[i], i+1, avail_cores[i+1]);
-	}
+	size  = bit_size(job_res->node_bitmap);
+	csize = bit_size(core_map);
+	for (c = 0, i = 0, n = 0; n < size; n++) {
+		
+		if (bit_test(job_res->node_bitmap, n) == 0)
+			continue;
+		sockets = select_node_record[n].sockets;
+		cps     = select_node_record[n].cores;
+		vpus    = MIN(job_ptr->details->mc_ptr->max_threads,
+			      select_node_record[n].vpus);
+#ifdef CR_DEBUG
+		info("DEBUG: job %u node %s max_threads %u, vpus %u cpus %u",
+		     job_ptr->job_id, select_node_record[n].node_ptr->name,
+		     job_ptr->details->mc_ptr->max_threads, vpus,
+		     job_res->cpus[i]);
 #endif
-	if (asockets == 0) {
-		/* Should never get here but just in case */
-		error("cons_res: %u Zero sockets satisfy"
-		      " request -B %u:%u: Using alternative strategy",
-		      job->job_id, mc_ptr->min_sockets, mc_ptr->min_cores);
-		for (i = 0; i < sockets; i++) {
-			if (this_cr_node->cores <= p_ptr->alloc_cores[offset+i])
-				continue;
-			avail_cores[i] = this_cr_node->cores - 
-				p_ptr->alloc_cores[offset+i];
+		if ((c + (sockets * cps)) > csize)
+			fatal ("cons_res: _cyclic_sync_core_bitmap index error");
+
+		if (sockets > sock_size) {
+			sock_size = sockets;
+			xrealloc(sock_start, sock_size * sizeof(uint32_t));
+			xrealloc(sock_end,   sock_size * sizeof(uint32_t));
+			xrealloc(sock_used,  sock_size * sizeof(bool));
 		}
-	}
-	
-	if (asockets < mc_ptr->min_sockets) {
-		error("cons_res: %u maxcores %u Cannot satisfy"
-		      " request -B %u:%u: Using -B %u:%u",
-		      job->job_id, maxcores, mc_ptr->min_sockets, 
-		      mc_ptr->min_cores, asockets, mc_ptr->min_cores);
-	}
+		
+		for (s = 0; s < sockets; s++) {
+			sock_start[s] = c + (s * cps);
+			sock_end[s]   = sock_start[s] + cps;
+		}
+		cpus  = job_res->cpus[i++];
+		while (cpus > 0) {
+			uint16_t prev_cpus = cpus;
+			for (s = 0; s < sockets && cpus > 0; s++) {
+
+				while (sock_start[s] < sock_end[s]) {
+					if (bit_test(core_map,sock_start[s])) {
+						sock_used[s] = true;
+						break;
+					} else
+						sock_start[s]++;
+				}
 
-	corecount = 0;
-	if (cyclic) {
-		/* distribute tasks cyclically across the sockets */
-		for (i=1; corecount<maxcores; i++) {
-			last_corecount = corecount;
-			for (j=0; ((j<sockets) && (corecount<maxcores)); j++) {
-				if (avail_cores[j] == 0)
+				if (sock_start[s] == sock_end[s])
+					/* this socket is unusable*/
 					continue;
-				if (i<=avail_cores[j]) {
-					job->alloc_cores[job_index][j]++;
-					corecount++;
-				}
+				if (cpus < vpus)
+					cpus = 0;
+				else
+					cpus -= vpus;
+				sock_start[s]++;
 			}
-			if (last_corecount == corecount) {
-				/* Avoid possible infinite loop on error */
-				error("_job_assign_tasks failure");
-				rc = SLURM_ERROR;
-				goto fini;
+			if (prev_cpus == cpus) {
+				/* we're stuck!*/
+				fatal("cons_res: sync loop not progressing");
 			}
 		}
-	} else {
-		/* distribute tasks in blocks across the sockets */
-		for (j=0; ((j<sockets) && (corecount<maxcores)); j++) {
-			last_corecount = corecount;
-			if (avail_cores[j] == 0)
+		/* clear the rest of the cores in each socket
+		 * FIXME: do we need min_core/min_socket checks here? */
+		for (s = 0; s < sockets; s++) {
+			if (sock_start[s] == sock_end[s])
 				continue;
-			for (i = 0; (i < avail_cores[j]) && 
-				    (corecount<maxcores); i++) {
-				job->alloc_cores[job_index][j]++;
-				corecount++;
-			}
-			if (last_corecount == corecount) {
-				/* Avoid possible infinite loop on error */
-				error("_job_assign_tasks failure");
-				rc = SLURM_ERROR;
-				goto fini;
+			if (!alloc_sockets || !sock_used[s]) {
+				bit_nclear(core_map, sock_start[s], 
+					   sock_end[s]-1);
 			}
 		}
+		/* advance 'c' to the beginning of the next node */
+		c += sockets * cps;
 	}
- fini:	xfree(avail_cores);
-	return rc;
+	xfree(sock_start);
+	xfree(sock_end);
+	xfree(sock_used);
 }
 
-static uint16_t _get_cpu_offset(struct select_cr_job *job, int index,
-				struct node_cr_record *this_node)
-{
-	int i, set = 0;
-	uint16_t cpus, sockets, cores, threads, besto = 0, offset = 0;
-	struct part_cr_record *p_ptr;
-
-	p_ptr = get_cr_part_ptr(this_node, job->job_ptr->part_ptr);
-	if ((p_ptr == NULL) || (p_ptr->num_rows < 2))
-		return offset;
-
-	get_resources_this_node(&cpus, &sockets, &cores, &threads,
-	        		this_node, job->job_id);
-	/* scan all rows looking for the best row for job->alloc_cpus[index] */
-	for (i = 0; i < p_ptr->num_rows; i++) {
-		if ((cpus - p_ptr->alloc_cores[offset]) >=
-						job->alloc_cpus[index]) {
-			if (!set) {
-				set = 1;
-				besto = offset;
-			}
-			if (p_ptr->alloc_cores[offset] >
-						p_ptr->alloc_cores[besto]) {
-				besto = offset;
-			}
-		}
-		offset += this_node->sockets;
-	}
-	return besto;
-}
 
 /* To effectively deal with heterogeneous nodes, we fake a cyclic
  * distribution to figure out how many cpus are needed on each node.
@@ -375,7 +348,7 @@ static uint16_t _get_cpu_offset(struct select_cr_job *job, int index,
  *
  * For the consumable resources support we need to determine what
  * "node/CPU/Core/thread"-tuplets will be allocated for a given job.
- * In the past we assumed that we only allocated on task per CPU (at
+ * In the past we assumed that we only allocated one task per CPU (at
  * that point the lowest level of logical processor) and didn't allow
  * the use of overcommit. We have change this philosophy and are now
  * allowing people to overcommit their resources and expect the system
@@ -385,232 +358,75 @@ static uint16_t _get_cpu_offset(struct select_cr_job *job, int index,
  *
  * In the consumable resources environment we need to determine the
  * layout schema within slurmctld.
-*/
-extern int cr_dist(struct select_cr_job *job, int cyclic,
+ *
+ * We have a core_bitmap of all available cores. All we're doing here
+ * is removing cores that are not needed based on the task count, and
+ * the choice of cores to remove is based on the distribution:
+ * - "cyclic" removes cores "evenly", starting from the last socket,
+ * - "block" removes cores from the "last" socket(s)
+ * - "plane" removes cores "in chunks"
+ */
+extern int cr_dist(struct job_record *job_ptr,
 		   const select_type_plugin_info_t cr_type)
 {
-	int i, cr_cpu = 0, rc = SLURM_SUCCESS; 
-	uint32_t taskcount = 0;
-	int host_index;
-	int job_index = -1;
-
-	int error_code = compute_c_b_task_dist(job);
-	if (error_code != SLURM_SUCCESS) {
-		error(" Error in compute_c_b_task_dist");
-		return error_code;
+	int error_code, cr_cpu = 1; 
+	
+	if (job_ptr->select_job->node_req == NODE_CR_RESERVED) {
+		/* the job has been allocated an EXCLUSIVE set of nodes,
+		 * so it gets all of the bits in the core_bitmap and
+		 * all of the available CPUs in the cpus array */
+		int size = bit_size(job_ptr->select_job->core_bitmap);
+		bit_nset(job_ptr->select_job->core_bitmap, 0, size-1);
+		return SLURM_SUCCESS;
 	}
-
-	if ((cr_type == CR_CPU) || (cr_type == CR_MEMORY) ||
-	    (cr_type == CR_CPU_MEMORY)) 
-		cr_cpu = 1;
-
-	for (host_index = 0; 
-	     ((host_index < node_record_count) && (taskcount < job->nprocs));
-	     host_index++) {
-		struct node_cr_record *this_cr_node;
-
-		if (bit_test(job->node_bitmap, host_index) == 0)
-			continue;
-		job_index++;
-		
-		if (select_node_ptr == NULL) {
-			error("cons_res: select_node_ptr is NULL");
-			return SLURM_ERROR;
-		}
-		this_cr_node = &select_node_ptr[host_index];
-		
-		if (job->cpus[job_index] == 0) {
-			error("cons_res: %d no available cpus on node %s ",
-			      job->job_id,
-			      node_record_table_ptr[host_index].name);
-			continue;
-		}
-
-		if (cr_cpu) {
-			/* compute the offset */
-			job->node_offset[job_index] =
-				_get_cpu_offset(job, job_index, this_cr_node);
-		} else {
-			for (i = 0; i < job->num_sockets[job_index]; i++)
-				job->alloc_cores[job_index][i] = 0;
-
-			if (_job_assign_tasks(job, this_cr_node, job_index, 
-					      cr_type, cyclic) != SLURM_SUCCESS)
-				return SLURM_ERROR;
+	
+	if (job_ptr->details->task_dist == SLURM_DIST_PLANE) {
+		/* perform a plane distribution on the 'cpus' array */
+		error_code = _compute_plane_dist(job_ptr);
+		if (error_code != SLURM_SUCCESS) {
+			error("cons_res: cr_dist: Error in _compute_plane_dist");
+			return error_code;
 		}
-#if(CR_DEBUG)
-		info("cons_res _cr_dist %u host %d %s alloc_cpus %u", 
-		     job->job_id, host_index, this_cr_node->node_ptr->name, 
-		     job->alloc_cpus[job_index]);
-		for(i=0; !cr_cpu && i<job->num_sockets[job_index];i+=2) {
-			info("cons_res: _cr_dist: job %u " 
-			     "alloc_cores[%d][%d]=%u, [%d][%d]=%u", 
-			     job->job_id, 
-			     job_index, i, job->alloc_cores[job_index][i], 
-			     job_index, i+1, job->alloc_cores[job_index][i+1]);
+	} else {
+		/* perform a cyclic distribution on the 'cpus' array */
+		error_code = _compute_c_b_task_dist(job_ptr);
+		if (error_code != SLURM_SUCCESS) {
+			error("cons_res: cr_dist: Error in _compute_c_b_task_dist");
+			return error_code;
 		}
-#endif
 	}
-	return rc;
-}
-
-/* User has specified the --exclusive flag on the srun command line
- * which means that the job should use only dedicated nodes.  In this
- * case we do not need to compute the number of tasks on each nodes
- * since it should be set to the number of cpus.
- */
-extern int cr_exclusive_dist(struct select_cr_job *job,
-		 	     const select_type_plugin_info_t cr_type)
-{
-	int i, j;
-	int host_index = 0, get_cores = 0;
 
+	/* now sync up the core_bitmap with the allocated 'cpus' array
+	 * based on the given distribution AND resource setting */	
 	if ((cr_type == CR_CORE)   || (cr_type == CR_CORE_MEMORY) ||
 	    (cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY))
-		get_cores = 1;
+		cr_cpu = 0;
 
-	if (select_fast_schedule) {
-		struct config_record *config_ptr;
-		for (i = 0; i < node_record_count; i++) {
-			if (bit_test(job->node_bitmap, i) == 0)
-				continue;
-			config_ptr = node_record_table_ptr[i].config_ptr;
-			job->alloc_cpus[host_index] = config_ptr->cpus;
-			if (get_cores) {
-				for (j=0; j<config_ptr->sockets; 
-				     j++) {
-					job->alloc_cores[host_index][j] = 
-						config_ptr->cores;
-				}
-			}
-			host_index++;
-		}
-	} else {
-		for (i = 0; i < node_record_count; i++) {
-			if (bit_test(job->node_bitmap, i) == 0)
-				continue;
-			job->alloc_cpus[host_index] = node_record_table_ptr[i].
-						      cpus;
-			if (get_cores) {
-				for (j=0; j<node_record_table_ptr[i].sockets; 
-				     j++) {
-					job->alloc_cores[host_index][j] = 
-						node_record_table_ptr[i].cores;
-				}
-			}
-			host_index++;
-		}
+	if (cr_cpu) {
+		_block_sync_core_bitmap(job_ptr, cr_type);
+		return SLURM_SUCCESS;
 	}
-	return SLURM_SUCCESS;
-}
 
-extern int cr_plane_dist(struct select_cr_job *job, 
-			 const uint16_t plane_size,
-			 const select_type_plugin_info_t cr_type)
-{
-	uint32_t maxtasks  = job->nprocs;
-	uint32_t num_hosts = job->nhosts;
-	int i, j, k, host_index, cr_cpu = 0;
-	uint32_t taskcount = 0, last_taskcount;
-	int job_index = -1;
-	bool count_done = false;
-	bool over_commit = false;
-
-	debug3("cons_res _cr_plane_dist plane_size %u ", plane_size);
-	debug3("cons_res _cr_plane_dist  maxtasks %u num_hosts %u",
-	       maxtasks, num_hosts);
-
-	if (plane_size <= 0) {
-		error("Error in _cr_plane_dist");
+	/* Determine the number of logical processors per node needed
+	 * for this job. Make sure below matches the layouts in
+	 * lllp_distribution in plugins/task/affinity/dist_task.c (FIXME) */
+	switch(job_ptr->details->task_dist) {
+	case SLURM_DIST_BLOCK_BLOCK:
+	case SLURM_DIST_CYCLIC_BLOCK:
+	case SLURM_DIST_PLANE:
+		_block_sync_core_bitmap(job_ptr, cr_type);
+		break;
+	case SLURM_DIST_ARBITRARY:
+	case SLURM_DIST_BLOCK:
+	case SLURM_DIST_CYCLIC:				
+	case SLURM_DIST_BLOCK_CYCLIC:
+	case SLURM_DIST_CYCLIC_CYCLIC:
+	case SLURM_DIST_UNKNOWN:
+		_cyclic_sync_core_bitmap(job_ptr, cr_type); 
+		break;
+	default:
+		error("select/cons_res: invalid task_dist entry");
 		return SLURM_ERROR;
 	}
-
-	if (job->job_ptr->details && job->job_ptr->details->overcommit)
-		over_commit = true;
-
-	taskcount = 0;
-	for (j=0; ((taskcount<maxtasks) && (!count_done)); j++) {
-		last_taskcount = taskcount;
-		for (i=0; 
-		     (((i<num_hosts) && (taskcount<maxtasks)) && (!count_done));
-		     i++) {
-			for (k=0; ((k<plane_size) && (!count_done)); k++) {
-				if (taskcount >= maxtasks) {
-					count_done = true;
-					break;
-				}
-				taskcount++;
-				if ((job->alloc_cpus[i] == 0) ||
-				    (!over_commit))
-					job->alloc_cpus[i]++;
-			}
-		}
-		if (last_taskcount == taskcount) {
-			/* avoid possible infinite loop on error */
-			error("cr_plane_dist failure");
-			return SLURM_ERROR;
-		}
-	}
-
-#if(CR_DEBUG)	
-	for (i = 0; i < job->nhosts; i++) {
-		info("cons_res _cr_plane_dist %u host_index %d alloc_cpus %u ", 
-		     job->job_id, i, job->alloc_cpus[i]);
-	}
-#endif
-
-	if ((cr_type == CR_CPU) || (cr_type == CR_MEMORY) ||
-	    (cr_type == CR_CPU_MEMORY))
-		cr_cpu = 1;
-
-	taskcount = 0;
-	for (host_index = 0; 
-	     ((host_index < node_record_count) && (taskcount < job->nprocs));
-	     host_index++) {
-		struct node_cr_record *this_cr_node = NULL;
-
-		if (bit_test(job->node_bitmap, host_index) == 0)
-			continue;
-		job_index++;
-
-		if (select_node_ptr == NULL) {
-			error("cons_res: select_node_ptr is NULL");
-			return SLURM_ERROR;
-		}
-		this_cr_node = &select_node_ptr[host_index];
-		
-		if (job->cpus[job_index] == 0) {
-			error("cons_res: no available cpus on node %s", 
-			      node_record_table_ptr[host_index].name);
-			continue;
-		}
-
-		if (cr_cpu) {
-			/* compute the offset */
-			job->node_offset[job_index] =
-				_get_cpu_offset(job, job_index, this_cr_node);
-		} else {
-			for (j = 0; j < job->num_sockets[job_index]; j++)
-				job->alloc_cores[job_index][j] = 0;
-
-			if (_job_assign_tasks(job, this_cr_node, job_index, 
-					      cr_type, 0) != SLURM_SUCCESS)
-				return SLURM_ERROR;
-		}
-#if(CR_DEBUG)
-		info("cons_res _cr_plane_dist %u host %d %s alloc_cpus %u", 
-		     job->job_id, host_index, this_cr_node->node_ptr->name, 
-		     job->alloc_cpus[job_index]);
-
-		for (i = 0; !cr_cpu && i < this_cr_node->sockets; i++) {
-			info("cons_res _cr_plane_dist %u host %d %s alloc_cores %u",
-			     job->job_id, host_index,
-			     this_cr_node->node_ptr->name,
-			     job->alloc_cores[job_index][i]);
-		}
-#endif
-		
-	}
-
 	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/select/cons_res/dist_tasks.h b/src/plugins/select/cons_res/dist_tasks.h
index aea7e3f19472a37c5f10442ffacfee1a72a9f615..2b032609d76e9d3bcd23c054ce050ab03103f4e0 100644
--- a/src/plugins/select/cons_res/dist_tasks.h
+++ b/src/plugins/select/cons_res/dist_tasks.h
@@ -5,10 +5,11 @@
  *****************************************************************************
  *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -49,16 +50,6 @@
 
 #include "select_cons_res.h"
 
-int cr_exclusive_dist(struct select_cr_job *job,
-		      const select_type_plugin_info_t cr_type);
-
-int cr_dist(struct select_cr_job *job, int cyclic,
-	    const select_type_plugin_info_t cr_type);
-
-int cr_plane_dist(struct select_cr_job *job, 
-		  const uint16_t plane_size,
-		  const select_type_plugin_info_t cr_type);
-
-int compute_c_b_task_dist(struct select_cr_job *job);
+int cr_dist(struct job_record *job_ptr,const select_type_plugin_info_t cr_type);
 
 #endif /* !_CONS_RES_DIST_TASKS_H */
diff --git a/src/plugins/select/cons_res/job_test.c b/src/plugins/select/cons_res/job_test.c
new file mode 100644
index 0000000000000000000000000000000000000000..b219e2266915f4cda4dee98cb498165102bbd249
--- /dev/null
+++ b/src/plugins/select/cons_res/job_test.c
@@ -0,0 +1,2123 @@
+/*****************************************************************************\
+ *  select_cons_res.c - node selection plugin supporting consumable 
+ *  resources policies.
+ *****************************************************************************\
+ *
+ *  The following example below illustrates how four jobs are allocated
+ *  across a cluster using when a processor consumable resource approach.
+ * 
+ *  The example cluster is composed of 4 nodes (10 cpus in total):
+ *  linux01 (with 2 processors), 
+ *  linux02 (with 2 processors), 
+ *  linux03 (with 2 processors), and
+ *  linux04 (with 4 processors). 
+ *
+ *  The four jobs are the following: 
+ *  1. srun -n 4 -N 4  sleep 120 &
+ *  2. srun -n 3 -N 3 sleep 120 &
+ *  3. srun -n 1 sleep 120 &
+ *  4. srun -n 3 sleep 120 &
+ *  The user launches them in the same order as listed above.
+ * 
+ *  Using a processor consumable resource approach we get the following
+ *  job allocation and scheduling:
+ * 
+ *  The output of squeue shows that we have 3 out of the 4 jobs allocated
+ *  and running. This is a 2 running job increase over the default SLURM
+ *  approach.
+ * 
+ *  Job 2, Job 3, and Job 4 are now running concurrently on the cluster.
+ * 
+ *  [<snip>]# squeue
+ *  JOBID PARTITION     NAME     USER  ST       TIME  NODES NODELIST(REASON)
+ *     5        lsf    sleep     root  PD       0:00      1 (Resources)
+ *     2        lsf    sleep     root   R       0:13      4 linux[01-04]
+ *     3        lsf    sleep     root   R       0:09      3 linux[01-03]
+ *     4        lsf    sleep     root   R       0:05      1 linux04
+ *  [<snip>]#
+ * 
+ *  Once Job 2 finishes, Job 5, which was pending, is allocated
+ *  available resources and is then running as illustrated below:
+ * 
+ *  [<snip>]# squeue4
+ *   JOBID PARTITION    NAME     USER  ST       TIME  NODES NODELIST(REASON)
+ *     3        lsf    sleep     root   R       1:58      3 linux[01-03]
+ *     4        lsf    sleep     root   R       1:54      1 linux04
+ *     5        lsf    sleep     root   R       0:02      3 linux[01-03]
+ *  [<snip>]#
+ * 
+ *  Job 3, Job 4, and Job 5 are now running concurrently on the cluster.
+ * 
+ *  [<snip>]#  squeue4
+ *  JOBID PARTITION     NAME     USER  ST       TIME  NODES NODELIST(REASON)
+ *     5        lsf    sleep     root   R       1:52      3 xc14n[13-15]
+ *  [<snip>]#
+ *
+ * The advantage of the consumable resource scheduling policy is that
+ * the job throughput can increase dramatically.
+ *
+ *****************************************************************************
+ *  Copyright (C) 2005-2008 Hewlett-Packard Development Company, L.P.
+ *  Written by Susanne M. Balle <susanne.balle@hp.com>, who borrowed heavily
+ *  from select/linear 
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#  if HAVE_STDINT_H
+#    include <stdint.h>
+#  endif
+#  if HAVE_INTTYPES_H
+#    include <inttypes.h>
+#  endif
+#endif
+
+#include "dist_tasks.h"
+#include "job_test.h"
+#include "select_cons_res.h"
+
+#define SELECT_DEBUG	0
+
+static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
+			uint32_t min_nodes, uint32_t max_nodes,
+			uint32_t req_nodes, uint32_t cr_node_cnt,
+			uint16_t *cpu_cnt, uint32_t *freq, uint32_t size);
+static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *node_map,
+			uint32_t min_nodes, uint32_t max_nodes,
+			uint32_t req_nodes, uint32_t cr_node_cnt,
+			uint16_t *cpu_cnt, uint32_t *freq, uint32_t size);
+
+/* _allocate_sockets - Given the job requirements, determine which sockets
+ *                     from the given node can be allocated (if any) to this
+ *                     job. Returns the number of cpus that can be used by
+ *                     this node AND a core-level bitmap of the selected
+ *                     sockets.
+ *
+ * IN job_ptr      - pointer to job requirements
+ * IN/OUT core_map - core_bitmap of available cores
+ * IN node_i       - index of node to be evaluated
+ */
+uint16_t _allocate_sockets(struct job_record *job_ptr, bitstr_t *core_map,
+			   const uint32_t node_i)
+{
+	uint16_t cpu_count = 0, cpu_cnt = 0;
+	uint16_t si, cps, avail_cpus = 0, num_tasks = 0;
+	uint32_t core_begin    = cr_get_coremap_offset(node_i);
+	uint32_t core_end      = cr_get_coremap_offset(node_i+1);
+	uint16_t cpus_per_task = job_ptr->details->cpus_per_task;
+	uint16_t *used_cores, *free_cores, free_core_count = 0;
+	uint16_t i, c, sockets    = select_node_record[node_i].sockets;
+	uint16_t cores_per_socket = select_node_record[node_i].cores;
+	uint16_t threads_per_core = select_node_record[node_i].vpus;
+
+	uint16_t min_cores = 0, min_sockets = 0, ntasks_per_socket = 0;
+	uint16_t max_cores = 0, max_sockets = 0, max_threads = 0;
+
+	if (job_ptr->details && job_ptr->details->mc_ptr) {
+		min_cores   = job_ptr->details->mc_ptr->min_cores;
+		min_sockets = job_ptr->details->mc_ptr->min_sockets;
+		max_cores   = job_ptr->details->mc_ptr->max_cores;
+		max_sockets = job_ptr->details->mc_ptr->max_sockets;
+		max_threads = job_ptr->details->mc_ptr->max_threads;
+		ntasks_per_socket = job_ptr->details->mc_ptr->ntasks_per_socket;
+	}
+	
+	/* These are the job parameters that we must respect:
+	 *
+	 *   job_ptr->details->mc_ptr->min_cores (cr_core|cr_socket)
+	 *	- min # of cores per socket to allocate to this job
+	 *   job_ptr->details->mc_ptr->max_cores (cr_core|cr_socket)
+	 *	- max # of cores per socket to allocate to this job
+	 *   job_ptr->details->mc_ptr->min_sockets (cr_core|cr_socket)
+	 *	- min # of sockets per node to allocate to this job
+	 *   job_ptr->details->mc_ptr->max_sockets (cr_core|cr_socket)
+	 *	- max # of sockets per node to allocate to this job
+	 *
+	 *   job_ptr->details->mc_ptr->max_threads (cr_core|cr_socket)
+	 *	- max_threads per core to allocate to this job
+	 *   job_ptr->details->mc_ptr->ntasks_per_core (cr_core|cr_socket)
+	 *	- number of tasks to launch per core
+	 *   job_ptr->details->mc_ptr->ntasks_per_socket (cr_core|cr_socket)
+	 *	- number of tasks to launch per socket
+	 *
+	 *   job_ptr->details->ntasks_per_node (all cr_types)
+	 *	- total number of tasks to launch on this node
+	 *   job_ptr->details->cpus_per_task (all cr_types)
+	 *	- number of cpus to allocate per task
+	 *
+	 * These are the hardware constraints:
+	 *   cpus = sockets * cores_per_socket * threads_per_core
+	 *
+	 * These are the cores/sockets that are available: core_map
+	 *
+	 * NOTE: currently we only allocate at the socket level, the core
+	 *       level, or the cpu level. When hyperthreading is enabled
+	 *       in the BIOS, then there can be more than one thread/cpu
+	 *       per physical core.
+	 *
+	 * PROCEDURE:
+	 *
+	 * Step 1: Determine the current usage data: used_cores[],
+	 *         used_core_count, free_cores[], free_core_count
+	 *
+	 * Step 2: For core-level and socket-level: apply min_sockets,
+	 *         max_sockets, min_cores, and max_cores to the "free"
+	 *         cores.
+	 *
+	 * Step 3: Compute task-related data: max_threads, ntasks_per_core,
+	 *         ntasks_per_socket, ntasks_per_node and cpus_per_task
+	 *         and determine the number of tasks to run on this node
+	 *
+	 * Step 4: Mark the allocated resources in the job_cores bitmap
+	 *         and return "num_tasks" from Step 3.
+	 *
+	 *
+	 * For socket and core counts, start by assuming that all available
+	 * resources will be given to the job. Check min_* to ensure that
+	 * there's enough resources. Reduce the resource count to match max_*
+	 * (if necessary). Also reduce resource count (if necessary) to
+	 * match ntasks_per_resource.
+	 *
+	 * NOTE: Memory is not used as a constraint here - should it?
+	 *       If not then it needs to be done somewhere else!
+	 */
+
+
+	/* Step 1: create and compute core-count-per-socket
+	 * arrays and total core counts */
+	free_cores = xmalloc(sockets * sizeof(uint16_t));
+	used_cores = xmalloc(sockets * sizeof(uint16_t));
+	
+	for (c = core_begin; c < core_end; c++) {
+		i = (c - core_begin) / cores_per_socket;
+		if (bit_test(core_map, c)) {
+			free_cores[i]++;
+			free_core_count++;
+		} else {
+			used_cores[i]++;
+		}
+	}
+	/* if a socket is already in use, it cannot be used
+	 * by this job */
+	for (i = 0; i < sockets; i++) {
+		if (used_cores[i]) {
+			free_core_count -= free_cores[i];
+			used_cores[i] += free_cores[i];
+			free_cores[i] = 0;
+		}
+	}
+	xfree(used_cores);
+	used_cores = NULL;
+
+	/* Step 2: check min_cores per socket and min_sockets per node */
+	c = 0;
+	for (i = 0; i < sockets; i++) {
+		if (free_cores[i] < min_cores) {
+			/* cannot use this socket */
+			free_core_count -= free_cores[i];
+			free_cores[i] = 0;
+			continue;
+		}
+		/* count this socket as usable */
+		c++;
+	}
+	if (c < min_sockets) {
+		/* cannot use this node */
+		num_tasks = 0;
+		goto fini;
+	}
+	
+	/* check max_cores and max_sockets */
+	c = 0;
+	for (i = 0; i < sockets; i++) {
+		if (max_cores && free_cores[i] > max_cores) {
+			/* remove extra cores from this socket */
+			uint16_t tmp = free_cores[i] - max_cores;
+			free_core_count -= tmp;
+			free_cores[i] -= tmp;
+		}
+		if (free_cores[i] > 0)
+			c++;
+		if (max_sockets && free_cores[i] && c > max_sockets) {
+			/* remove extra sockets from use */
+			free_core_count -= free_cores[i];
+			free_cores[i] = 0;
+		}
+	}
+	if (free_core_count < 1) {
+		/* no available resources on this node */
+		num_tasks = 0;
+		goto fini;
+	}
+
+
+	/* Step 3: Compute task-related data: use max_threads,
+	 *         ntasks_per_socket, ntasks_per_node and cpus_per_task
+	 *         to determine the number of tasks to run on this node
+	 *
+	 * Note: cpus_per_task and ntasks_per_core need to play nice
+	 *       2 tasks_per_core vs. 2 cpus_per_task
+	 */
+	avail_cpus = 0;
+	num_tasks = 0;
+	threads_per_core = MIN(threads_per_core,max_threads);
+	for (i = 0; i < sockets; i++) {
+		uint16_t tmp = free_cores[i] * threads_per_core;
+		avail_cpus += tmp;
+		if (ntasks_per_socket)
+			num_tasks += MIN(tmp,ntasks_per_socket);
+		else
+			num_tasks += tmp;
+	}
+	if (job_ptr->details->ntasks_per_node)
+		num_tasks = MIN(num_tasks, job_ptr->details->ntasks_per_node);
+	
+	if (cpus_per_task < 2) {
+		avail_cpus = num_tasks;
+		cps = num_tasks;
+	} else {
+		c = avail_cpus / cpus_per_task;
+		num_tasks = MIN(num_tasks, c);
+		avail_cpus = num_tasks * cpus_per_task;
+	}
+	
+	/* Step 4 - make sure that ntasks_per_socket is enforced when
+	 *          allocating cores
+	 */
+	cps = num_tasks;
+	if (ntasks_per_socket > 1) {
+		cps = ntasks_per_socket;
+		if (cpus_per_task > 1)
+			cps = ntasks_per_socket * cpus_per_task;
+	}
+	si = 9999;
+	for (c = core_begin; c < core_end && avail_cpus > 0; c++) {
+		if (bit_test(core_map, c) == 0)
+			continue;
+		i = (c - core_begin) / cores_per_socket;
+		if (free_cores[i] > 0) {
+			/* this socket has free cores, but make sure
+			 * we don't use more than are needed for
+			 * ntasks_per_socket */
+			if (si != i) {
+				si = i;
+				cpu_cnt = threads_per_core;
+			} else {
+				if (cpu_cnt >= cps) {
+					/* do not allocate this core */
+					bit_clear(core_map, c);
+					continue;
+				}
+				cpu_cnt += threads_per_core;
+			}
+			free_cores[i]--;
+			cpu_count += threads_per_core;
+			if (avail_cpus >= threads_per_core)
+				avail_cpus -= threads_per_core;
+			else
+				avail_cpus = 0;
+			
+		} else
+			bit_clear(core_map, c);
+	}
+	/* clear leftovers */
+	if (c < core_end)
+		bit_nclear(core_map, c, core_end-1);
+
+fini:
+	/* if num_tasks == 0 then clear all bits on this node */
+	if (!num_tasks) {
+		bit_nclear(core_map, core_begin, core_end-1);
+		cpu_count = 0;
+	}
+	xfree(free_cores);
+	return cpu_count;
+}
+
+
+/* _allocate_cores - Given the job requirements, determine which cores
+ *                   from the given node can be allocated (if any) to this
+ *                   job. Returns the number of cpus that can be used by
+ *                   this node AND a bitmap of the selected cores.
+ *
+ * IN job_ptr      - pointer to job requirements
+ * IN/OUT core_map - bitmap of cores available for use/selected for use
+ * IN node_i       - index of node to be evaluated
+ */
+uint16_t _allocate_cores(struct job_record *job_ptr, bitstr_t *core_map,
+			 const uint32_t node_i, int cpu_type)
+{
+	uint16_t cpu_count = 0, avail_cpus = 0, num_tasks = 0;
+	uint32_t core_begin    = cr_get_coremap_offset(node_i);
+	uint32_t core_end      = cr_get_coremap_offset(node_i+1);
+	uint16_t cpus_per_task = job_ptr->details->cpus_per_task;
+	uint16_t *free_cores, free_core_count = 0;
+	uint16_t i, c, sockets    = select_node_record[node_i].sockets;
+	uint16_t cores_per_socket = select_node_record[node_i].cores;
+	uint16_t threads_per_core = select_node_record[node_i].vpus;
+
+	uint16_t min_cores = 0, min_sockets = 0;
+	uint16_t max_cores = 0, max_sockets = 0, max_threads = 0;
+
+	if (!cpu_type && job_ptr->details && job_ptr->details->mc_ptr) {
+		min_cores   = job_ptr->details->mc_ptr->min_cores;
+		min_sockets = job_ptr->details->mc_ptr->min_sockets;
+		max_cores   = job_ptr->details->mc_ptr->max_cores;
+		max_sockets = job_ptr->details->mc_ptr->max_sockets;
+		max_threads = job_ptr->details->mc_ptr->max_threads;
+	}
+	
+	/* These are the job parameters that we must respect:
+	 *
+	 *   job_ptr->details->mc_ptr->min_cores (cr_core|cr_socket)
+	 *	- min # of cores per socket to allocate to this job
+	 *   job_ptr->details->mc_ptr->max_cores (cr_core|cr_socket)
+	 *	- max # of cores per socket to allocate to this job
+	 *   job_ptr->details->mc_ptr->min_sockets (cr_core|cr_socket)
+	 *	- min # of sockets per node to allocate to this job
+	 *   job_ptr->details->mc_ptr->max_sockets (cr_core|cr_socket)
+	 *	- max # of sockets per node to allocate to this job
+	 *
+	 *   job_ptr->details->mc_ptr->max_threads (cr_core|cr_socket)
+	 *	- max_threads per core to allocate to this job
+	 *   job_ptr->details->mc_ptr->ntasks_per_core (cr_core|cr_socket)
+	 *	- number of tasks to launch per core
+	 *   job_ptr->details->mc_ptr->ntasks_per_socket (cr_core|cr_socket)
+	 *	- number of tasks to launch per socket
+	 *
+	 *   job_ptr->details->ntasks_per_node (all cr_types)
+	 *	- total number of tasks to launch on this node
+	 *   job_ptr->details->cpus_per_task (all cr_types)
+	 *	- number of cpus to allocate per task
+	 *
+	 * These are the hardware constraints:
+	 *   cpus = sockets * cores_per_socket * threads_per_core
+	 *
+	 * These are the cores that are available for use: core_map
+	 *
+	 * NOTE: currently we only allocate at the socket level, the core
+	 *       level, or the cpu level. When hyperthreading is enabled
+	 *       in the BIOS, then there can be more than one thread/cpu
+	 *       per physical core.
+	 *
+	 * PROCEDURE:
+	 *
+	 * Step 1: Determine the current usage data: free_cores[] and
+	 *         free_core_count
+	 *
+	 * Step 2: Apply min_sockets, max_sockets, min_cores and
+	 *         max_cores and to the "free" cores.
+	 *
+	 * Step 3: Compute task-related data: use max_threads,
+	 *         ntasks_per_core, ntasks_per_node and cpus_per_task
+	 *         to determine the number of tasks that can run on
+	 *         this node
+	 *
+	 * Step 4: Mark the allocated resources in the job_cores bitmap
+	 *         and return "num_tasks" from Step 3.
+	 *
+	 *
+	 * Start by assuming that all "free" cores will be given to the
+	 * job. Check min_* to ensure that there's enough resources.
+	 * Reduce the core count to match max_* (if necessary). Also,
+	 * reduce the core count (if necessary) to match ntasks_per_core.
+	 * Note that we're not processing ntasks_per_socket, because the
+	 * srun manpage says that ntasks_per_socket is only valid for
+	 * CR_SOCKET.
+	 */
+
+	/* Step 1: create and compute core-count-per-socket
+	 * arrays and total core counts */
+	free_cores = xmalloc(sockets * sizeof(uint16_t));
+	
+	for (c = core_begin; c < core_end; c++) {
+		i = (c - core_begin) / cores_per_socket;
+		if (bit_test(core_map, c)) {
+			free_cores[i]++;
+			free_core_count++;
+		}
+	}
+	
+	/* Step 2a: check min_cores per socket and min_sockets per node */
+	c = 0;
+	for (i = 0; i < sockets; i++) {
+		if (free_cores[i] < min_cores) {
+			/* cannot use this socket */
+			free_core_count -= free_cores[i];
+			free_cores[i] = 0;
+			continue;
+		}
+		/* count this socket as usable */
+		c++;
+	}
+	if (c < min_sockets) {
+		/* cannot use this node */
+		num_tasks = 0;
+		goto fini;
+	}
+	
+	/* Step 2b: check max_cores per socket and max_sockets per node */
+	c = 0;
+	for (i = 0; i < sockets; i++) {
+		if (max_cores && free_cores[i] > max_cores) {
+			/* remove extra cores from this socket */
+			uint16_t tmp = free_cores[i] - max_cores;
+			free_core_count -= tmp;
+			free_cores[i] -= tmp;
+		}
+		if (free_cores[i] > 0)
+			c++;
+		if (max_sockets && free_cores[i] && c > max_sockets) {
+			/* remove extra sockets from use */
+			free_core_count -= free_cores[i];
+			free_cores[i] = 0;
+		}
+	}
+	if (free_core_count < 1) {
+		/* no available resources on this node */
+		num_tasks = 0;
+		goto fini;
+	}
+
+
+	/* Step 3: Compute task-related data: use max_threads,
+	 *         ntasks_per_core, ntasks_per_node and cpus_per_task
+	 *         to determine the number of tasks to run on this node
+	 *
+	 * Note: cpus_per_task and ntasks_per_core need to play nice
+	 *       2 tasks_per_core vs. 2 cpus_per_task
+	 */
+
+	if (cpu_type)
+		max_threads = threads_per_core;
+	threads_per_core = MIN(threads_per_core,max_threads);
+	num_tasks = avail_cpus = threads_per_core;
+	i = job_ptr->details->mc_ptr->ntasks_per_core;
+	if (!cpu_type && i > 0)
+		num_tasks = MIN(num_tasks, i);
+	
+	/* convert from PER_CORE to TOTAL_FOR_NODE */
+	avail_cpus *= free_core_count;
+	num_tasks *= free_core_count;
+
+	if (job_ptr->details->ntasks_per_node)
+		num_tasks = MIN(num_tasks, job_ptr->details->ntasks_per_node);
+	
+	if (cpus_per_task < 2) {
+		avail_cpus = num_tasks;
+	} else {
+		c = avail_cpus / cpus_per_task;
+		num_tasks = MIN(num_tasks, c);
+		avail_cpus = num_tasks * cpus_per_task;
+	}
+	
+	/* Step 4 */
+	for (c = core_begin; c < core_end && avail_cpus > 0; c++) {
+		if (bit_test(core_map, c) == 0)
+			continue;
+		i = (c - core_begin) / cores_per_socket;
+		if (free_cores[i] == 0)
+			bit_clear(core_map, c);
+		else {
+			free_cores[i]--;
+			cpu_count += threads_per_core;
+			if (avail_cpus >= threads_per_core)
+				avail_cpus -= threads_per_core;
+			else
+				avail_cpus = 0;
+		}
+	}
+	/* clear leftovers */
+	if (c < core_end)
+		bit_nclear(core_map, c, core_end-1);
+
+fini:
+	if (!num_tasks) {
+		bit_nclear(core_map, core_begin, core_end-1);
+		cpu_count = 0;
+	}
+	xfree(free_cores);
+	return cpu_count;
+}
+
+
+/*
+ * _can_job_run_on_node - Given the job requirements, determine which
+ *                        resources from the given node (if any) can be
+ *                        allocated to this job. Returns the number of
+ *                        cpus that can be used by this node and a bitmap
+ *                        of available resources for allocation.
+ *       NOTE: This process does NOT support overcommitting resources
+ *
+ * IN job_ptr       - pointer to job requirements
+ * IN/OUT core_map  - core_bitmap of available cores
+ * IN n             - index of node to be evaluated
+ * IN cr_type       - Consumable Resource setting
+ * IN test_only     - ignore allocated memory check
+ *
+ * NOTE: The returned cpu_count may be less than the number of set bits in 
+ *       core_map for the given node. The cr_dist functions will determine
+ *       which bits to deselect from the core_map to match the cpu_count.
+ */
+uint16_t _can_job_run_on_node(struct job_record *job_ptr, bitstr_t *core_map,
+			      const uint32_t node_i,
+			      struct node_use_record *node_usage,
+			      select_type_plugin_info_t cr_type,
+			      bool test_only)
+{
+	uint16_t cpus;
+	uint32_t avail_mem, req_mem;
+
+	switch (cr_type) {
+	case CR_CORE:
+	case CR_CORE_MEMORY:
+		cpus = _allocate_cores(job_ptr, core_map, node_i, 0);
+		break;
+	case CR_SOCKET:
+	case CR_SOCKET_MEMORY:
+		cpus = _allocate_sockets(job_ptr, core_map, node_i);
+		break;
+	case SELECT_TYPE_INFO_NONE:
+		/* Default for select/linear */
+	case CR_CPU:
+	case CR_CPU_MEMORY:
+	case CR_MEMORY:
+	default:
+		cpus = _allocate_cores(job_ptr, core_map, node_i, 1);
+	}
+	
+	if ((cr_type != CR_CPU_MEMORY)    && (cr_type != CR_CORE_MEMORY) &&
+	    (cr_type != CR_SOCKET_MEMORY) && (cr_type != CR_MEMORY))
+		return cpus;
+
+	/* Memory Check: check job_min_memory to see if:
+	 *          - this node has enough memory (MEM_PER_CPU == 0)
+	 *          - there are enough free_cores (MEM_PER_CPU = 1)
+	 */
+	req_mem   = job_ptr->details->job_min_memory & ~MEM_PER_CPU;
+	avail_mem = select_node_record[node_i].real_memory;
+	if (!test_only)
+		avail_mem -= node_usage[node_i].alloc_memory;
+	if (job_ptr->details->job_min_memory & MEM_PER_CPU) {
+		/* memory is per-cpu */
+		while (cpus > 0 && (req_mem * cpus) > avail_mem)
+			cpus--;	
+		if (cpus < job_ptr->details->ntasks_per_node)
+			cpus = 0;
+		/* FIXME: do we need to recheck min_cores, etc. here? */	
+	} else {
+		/* memory is per node */
+		if (req_mem > avail_mem) {
+			bit_nclear(core_map, cr_get_coremap_offset(node_i), 
+					(cr_get_coremap_offset(node_i+1))-1);
+			cpus = 0;
+		}
+	}
+	
+	debug3("cons_res: _can_job_run_on_node: %u cpus on %s(%d), mem %u/%u",
+		cpus, select_node_record[node_i].node_ptr->name,
+		node_usage[node_i].node_state,
+		node_usage[node_i].alloc_memory,
+		select_node_record[node_i].real_memory);
+	
+	return cpus;
+}
+
+
+/* Test to see if a node already has running jobs.
+ * if (sharing_only) then only check sharing partitions. This is because
+ * the job was submitted to a single-row partition which does not share
+ * allocated CPUs with multi-row partitions.
+ */
+static int _is_node_busy(struct part_res_record *p_ptr, uint32_t node_i,
+			 int sharing_only)
+{
+	uint32_t r, cpu_begin = cr_get_coremap_offset(node_i);
+	uint32_t i, cpu_end   = cr_get_coremap_offset(node_i+1);
+
+	for (; p_ptr; p_ptr = p_ptr->next) {
+		if (sharing_only && p_ptr->num_rows < 2)
+			continue;
+		if (!p_ptr->row)
+			continue;
+		for (r = 0; r < p_ptr->num_rows; r++) {
+			if (!p_ptr->row[r].row_bitmap)
+				continue;
+			for (i = cpu_begin; i < cpu_end; i++) {
+				if (bit_test(p_ptr->row[r].row_bitmap, i))
+					return 1;
+			}
+		}
+	}
+	return 0;
+}
+
+
+/*
+ * Determine which of these nodes are usable by this job
+ *
+ * Remove nodes from the bitmap that don't have enough memory to
+ * support the job. Return SLURM_ERROR if a required node doesn't
+ * have enough memory.
+ *
+ * if node_state = NODE_CR_RESERVED, clear bitmap (if node is required
+ *                                   then should we return NODE_BUSY!?!)
+ *
+ * if node_state = NODE_CR_ONE_ROW, then this node can only be used by
+ *                                  another NODE_CR_ONE_ROW job
+ *
+ * if node_state = NODE_CR_AVAILABLE AND:
+ *  - job_node_req = NODE_CR_RESERVED, then we need idle nodes
+ *  - job_node_req = NODE_CR_ONE_ROW, then we need idle or non-sharing nodes
+ */
+static int _verify_node_state(struct part_res_record *cr_part_ptr,
+			      struct job_record *job_ptr, bitstr_t * bitmap,
+			      select_type_plugin_info_t cr_type,
+			      struct node_use_record *node_usage,
+			      enum node_cr_state job_node_req)
+{
+	uint32_t i, free_mem, min_mem, size;
+
+	min_mem = job_ptr->details->job_min_memory & (~MEM_PER_CPU);
+	size = bit_size(bitmap);
+	for (i = 0; i < size; i++) {
+		if (!bit_test(bitmap, i))
+			continue;
+
+		/* node-level memory check */
+		if ((job_ptr->details->job_min_memory) &&
+		    ((cr_type == CR_CORE_MEMORY) || (cr_type == CR_CPU_MEMORY) ||
+		     (cr_type == CR_MEMORY) || (cr_type == CR_SOCKET_MEMORY))) {
+			free_mem  = select_node_record[i].real_memory;
+			free_mem -= node_usage[i].alloc_memory;
+			if (free_mem < min_mem) {
+				debug3("cons_res: _vns: node %s no mem %u < %u",
+					select_node_record[i].node_ptr->name,
+					free_mem, min_mem);
+				goto clear_bit;
+			}
+		}
+		
+		/* if priority_selection (sched/gang) has been configured,
+		 * then we cannot rule out nodes just because Shared=NO
+		 * (NODE_CR_ONE_ROW) or Shared=EXCLUSIVE(NODE_CR_RESERVED)
+		 */
+		if (cr_priority_selection_enabled())
+			continue;
+
+		/* exclusive node check */
+		if (node_usage[i].node_state >= NODE_CR_RESERVED) {
+			debug3("cons_res: _vns: node %s in exclusive use",
+				select_node_record[i].node_ptr->name);
+			goto clear_bit;
+		
+		/* non-resource-sharing node check */
+		} else if (node_usage[i].node_state >= NODE_CR_ONE_ROW) {
+			if ((job_node_req == NODE_CR_RESERVED) ||
+			    (job_node_req == NODE_CR_AVAILABLE)) {
+				debug3("cons_res: _vns: node %s non-sharing",
+					select_node_record[i].node_ptr->name);
+				goto clear_bit;
+			}
+			/* cannot use this node if it is running jobs
+			 * in sharing partitions */
+			if ( _is_node_busy(cr_part_ptr, i, 1) ) {
+				debug3("cons_res: _vns: node %s sharing?",
+					select_node_record[i].node_ptr->name);
+				goto clear_bit;
+			}
+		
+		/* node is NODE_CR_AVAILABLE - check job request */
+		} else {
+			if (job_node_req == NODE_CR_RESERVED) {
+				if ( _is_node_busy(cr_part_ptr, i, 0) ) {
+					debug3("cons_res: _vns: node %s busy",
+					  select_node_record[i].node_ptr->name);
+					goto clear_bit;
+				}
+			} else if (job_node_req == NODE_CR_ONE_ROW) {
+				/* cannot use this node if it is running jobs
+				 * in sharing partitions */
+				if ( _is_node_busy(cr_part_ptr, i, 1) ) {
+					debug3("cons_res: _vns: node %s vbusy",
+					  select_node_record[i].node_ptr->name);
+					goto clear_bit;
+				}
+			}
+		}
+		continue;	/* node is usable, test next node */
+
+clear_bit:	/* This node is not usable by this job */
+		bit_clear(bitmap, i);
+		if (job_ptr->details->req_node_bitmap &&
+		    bit_test(job_ptr->details->req_node_bitmap, i))
+			return SLURM_ERROR;
+
+	}
+
+	return SLURM_SUCCESS;
+}
+
+
+/* given an "avail" node_bitmap, return a corresponding "avail" core_bitmap */
+bitstr_t *_make_core_bitmap(bitstr_t *node_map)
+{
+	uint32_t n, c, nodes, size;
+
+	nodes = bit_size(node_map);
+	size = cr_get_coremap_offset(nodes+1);
+	bitstr_t *core_map = bit_alloc(size);
+	if (!core_map)
+		return NULL;
+
+	nodes = bit_size(node_map);
+	for (n = 0, c = 0; n < nodes; n++) {
+		if (bit_test(node_map, n)) {
+			while (c < cr_get_coremap_offset(n+1)) {
+				bit_set(core_map, c++);
+			}
+		}
+	}
+	return core_map;
+}
+
+
+/* return the number of cpus that the given
+ * job can run on the indexed node */
+static int _get_cpu_cnt(struct job_record *job_ptr, const int node_index,
+			 uint16_t *cpu_cnt, uint32_t *freq, uint32_t size)
+{
+	int i, pos, cpus;
+	uint16_t *layout_ptr = job_ptr->details->req_node_layout;
+
+	pos = 0;
+	for (i = 0; i < size; i++) {
+		if (pos+freq[i] > node_index)
+			break;
+		pos += freq[i];
+	}
+	cpus = cpu_cnt[i];
+	if (layout_ptr && bit_test(job_ptr->details->req_node_bitmap, i)) {
+		pos = bit_get_pos_num(job_ptr->details->req_node_bitmap, i);
+		cpus = MIN(cpus, layout_ptr[pos]);
+	} else if (layout_ptr) {
+		cpus = 0; /* should not happen? */
+	}
+	return cpus;
+}
+
+
+#define CR_FREQ_ARRAY_INCREMENT 16
+
+/* Compute resource usage for the given job on all available resources
+ *
+ * IN: job_ptr     - pointer to the job requesting resources
+ * IN: node_map    - bitmap of available nodes
+ * IN: core_map    - bitmap of available cores
+ * IN: cr_node_cnt - total number of nodes in the cluster
+ * IN: cr_type     - resource type
+ * OUT: cpu_cnt    - number of cpus that can be used by this job
+ * OUT: freq       - number of nodes to which the corresponding cpu_cnt applies
+ * IN: test_only   - ignore allocated memory check
+ * OUT:            returns the length of the 2 arrays
+ */
+uint32_t _get_res_usage(struct job_record *job_ptr, bitstr_t *node_map,
+			bitstr_t *core_map, uint32_t cr_node_cnt,
+			struct node_use_record *node_usage,
+			select_type_plugin_info_t cr_type,
+			uint16_t **cpu_cnt_ptr, uint32_t **freq_ptr,
+			bool test_only)
+{
+	uint16_t *cpu_cnt, cpu_count;
+	uint32_t *freq;
+	uint32_t n, size = 0, array_size = CR_FREQ_ARRAY_INCREMENT;
+
+	cpu_cnt = xmalloc(array_size * sizeof(uint16_t));
+	freq    = xmalloc(array_size * sizeof(uint32_t));
+	
+	for (n = 0; n < cr_node_cnt; n++) {
+		if (bit_test(node_map, n)) {
+			cpu_count = _can_job_run_on_node(job_ptr, core_map,
+							n, node_usage, cr_type,
+							test_only);
+			if (cpu_count == cpu_cnt[size]) {
+				freq[size]++;
+				continue;
+			}
+			if (freq[size] == 0) {
+				cpu_cnt[size] = cpu_count;
+				freq[size]++;
+				continue;
+			}
+			size++;
+			if (size >= array_size) {
+				array_size += CR_FREQ_ARRAY_INCREMENT;
+				xrealloc(cpu_cnt, array_size *sizeof(uint16_t));
+				xrealloc(freq, array_size * sizeof(uint32_t));
+			}
+			cpu_cnt[size] = cpu_count;
+			freq[size]++;
+		} else {
+			if (cpu_cnt[size] == 0) {
+				freq[size]++;
+				continue;
+			}
+			size++;
+			if (size >= array_size) {
+				array_size += CR_FREQ_ARRAY_INCREMENT;
+				xrealloc(cpu_cnt, array_size *sizeof(uint16_t));
+				xrealloc(freq, array_size * sizeof(uint32_t));
+			}
+			freq[size]++;
+		}
+	}
+	*cpu_cnt_ptr = cpu_cnt;
+	*freq_ptr = freq;
+	return size+1;
+}			
+
+
+static bool _enough_nodes(int avail_nodes, int rem_nodes, 
+			  uint32_t min_nodes, uint32_t req_nodes)
+{
+	int needed_nodes;
+
+	if (req_nodes > min_nodes)
+		needed_nodes = rem_nodes + min_nodes - req_nodes;
+	else
+		needed_nodes = rem_nodes;
+
+	return (avail_nodes >= needed_nodes);
+}
+
+
+/* this is the heart of the selection process */
+static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
+			uint32_t min_nodes, uint32_t max_nodes,
+			uint32_t req_nodes, uint32_t cr_node_cnt,
+			uint16_t *cpu_cnt, uint32_t *freq, uint32_t size)
+{
+	int i, f, index, error_code = SLURM_ERROR;
+	int *consec_nodes;	/* how many nodes we can add from this 
+				 * consecutive set of nodes */
+	int *consec_cpus;	/* how many nodes we can add from this 
+				 * consecutive set of nodes */
+	int *consec_start;	/* where this consecutive set starts (index) */
+	int *consec_end;	/* where this consecutive set ends (index) */
+	int *consec_req;	/* are nodes from this set required 
+				 * (in req_bitmap) */
+	int consec_index, consec_size, sufficient;
+	int rem_cpus, rem_nodes;	/* remaining resources desired */
+	int best_fit_nodes, best_fit_cpus, best_fit_req;
+	int best_fit_sufficient, best_fit_index = 0;
+	int avail_cpus, ll;	/* ll = layout array index */
+	bool required_node;
+	bitstr_t *req_map      = job_ptr->details->req_node_bitmap;
+	uint16_t *layout_ptr = job_ptr->details->req_node_layout;
+
+	xassert(node_map);
+
+	if (cr_node_cnt != node_record_count) {
+		error("cons_res: node count inconsistent with slurmctld");
+		return error_code;
+	}
+	if (bit_set_count(node_map) < min_nodes)
+		return error_code;
+
+	if ((job_ptr->details->req_node_bitmap) &&
+	    (!bit_super_set(job_ptr->details->req_node_bitmap, node_map)))
+		return error_code;
+
+	if (switch_record_cnt && switch_record_table) {
+		/* Perform optimized resource selection based upon topology */
+		return _eval_nodes_topo(job_ptr, node_map, 
+					min_nodes, max_nodes, req_nodes,
+					cr_node_cnt, cpu_cnt, freq, size);
+	}
+
+	consec_size = 50;	/* start allocation for 50 sets of 
+				 * consecutive nodes */
+	consec_cpus  = xmalloc(sizeof(int) * consec_size);
+	consec_nodes = xmalloc(sizeof(int) * consec_size);
+	consec_start = xmalloc(sizeof(int) * consec_size);
+	consec_end   = xmalloc(sizeof(int) * consec_size);
+	consec_req   = xmalloc(sizeof(int) * consec_size);
+
+	/* Build table with information about sets of consecutive nodes */
+	consec_index = 0;
+	consec_cpus[consec_index] = consec_nodes[consec_index] = 0;
+	consec_req[consec_index] = -1;	/* no required nodes here by default */
+
+	rem_cpus = job_ptr->num_procs;
+	rem_nodes = MAX(min_nodes, req_nodes);
+
+	i = 0;
+	f = 0;
+	for (index = 0, ll = -1; index < cr_node_cnt; index++, f++) {
+		if (f >= freq[i]) {
+			f = 0;
+			i++;
+		}
+		if (req_map) {
+			required_node = bit_test(req_map, index);
+		} else
+			required_node = false;
+		if (layout_ptr && required_node)
+			ll++;
+		if (bit_test(node_map, index)) {
+			if (consec_nodes[consec_index] == 0)
+				consec_start[consec_index] = index;
+			avail_cpus = cpu_cnt[i];
+			if (layout_ptr && required_node){
+				avail_cpus = MIN(avail_cpus, layout_ptr[ll]);
+			} else if (layout_ptr) {
+				avail_cpus = 0; /* should not happen? */
+			}
+			if ((max_nodes > 0) && required_node) {
+				if (consec_req[consec_index] == -1) {
+					/* first required node in set */
+					consec_req[consec_index] = index;
+				}
+				rem_cpus -= avail_cpus;
+				rem_nodes--;
+				/* leaving bitmap set, decrement max limit */
+				max_nodes--;
+			} else {	/* node not selected (yet) */
+				bit_clear(node_map, index);
+				consec_cpus[consec_index] += avail_cpus;
+				consec_nodes[consec_index]++;
+			}
+		} else if (consec_nodes[consec_index] == 0) {
+			consec_req[consec_index] = -1;
+			/* already picked up any required nodes */
+			/* re-use this record */
+		} else {
+			consec_end[consec_index] = index - 1;
+			if (++consec_index >= consec_size) {
+				consec_size *= 2;
+				xrealloc(consec_cpus, sizeof(int)*consec_size);
+				xrealloc(consec_nodes,sizeof(int)*consec_size);
+				xrealloc(consec_start,sizeof(int)*consec_size);
+				xrealloc(consec_end,  sizeof(int)*consec_size);
+				xrealloc(consec_req,  sizeof(int)*consec_size);
+			}
+			consec_cpus[consec_index]  = 0;
+			consec_nodes[consec_index] = 0;
+			consec_req[consec_index]   = -1;
+		}
+	}
+	if (consec_nodes[consec_index] != 0)
+		consec_end[consec_index++] = index - 1;
+	
+	for (i = 0; i < consec_index; i++) {
+		debug3("cons_res: eval_nodes:%d consec c=%d n=%d b=%d e=%d r=%d",
+		       i, consec_cpus[i], consec_nodes[i], consec_start[i],
+		       consec_end[i], consec_req[i]);
+	}
+	
+	/* accumulate nodes from these sets of consecutive nodes until */
+	/*   sufficient resources have been accumulated */
+	while (consec_index && (max_nodes > 0)) {
+		best_fit_cpus = best_fit_nodes = best_fit_sufficient = 0;
+		best_fit_req = -1;	/* first required node, -1 if none */
+		for (i = 0; i < consec_index; i++) {
+			if (consec_nodes[i] == 0)
+				continue;	/* no usable nodes here */
+
+			if (job_ptr->details->contiguous &&
+			    job_ptr->details->req_node_bitmap &&
+			    (consec_req[i] == -1))
+				break;  /* not required nodes */
+
+			sufficient = (consec_cpus[i] >= rem_cpus) &&
+				     _enough_nodes(consec_nodes[i], rem_nodes,
+						   min_nodes, req_nodes);
+			
+			/* if first possibility OR */
+			/* contains required nodes OR */
+			/* first set large enough for request OR */
+			/* tightest fit (less resource waste) OR */
+			/* nothing yet large enough, but this is biggest */
+			if ((best_fit_nodes == 0) ||
+			    ((best_fit_req == -1) && (consec_req[i] != -1)) ||
+			    (sufficient && (best_fit_sufficient == 0)) ||
+			    (sufficient && (consec_cpus[i] < best_fit_cpus)) ||
+			    (!sufficient && (consec_cpus[i] > best_fit_cpus))) {
+				best_fit_cpus = consec_cpus[i];
+				best_fit_nodes = consec_nodes[i];
+				best_fit_index = i;
+				best_fit_req = consec_req[i];
+				best_fit_sufficient = sufficient;
+			}
+
+			if (job_ptr->details->contiguous &&
+			    job_ptr->details->req_node_bitmap) {
+				/* Must wait for all required nodes to be 
+				 * in a single consecutive block */
+				int j, other_blocks = 0;
+				for (j = (i+1); j < consec_index; j++) {
+					if (consec_req[j] != -1) {
+						other_blocks = 1;
+						break;
+					}
+				}
+				if (other_blocks) {
+					best_fit_nodes = 0;
+					break;
+				}
+			}
+		}
+		if (best_fit_nodes == 0)
+			break;
+		if (job_ptr->details->contiguous &&
+		    ((best_fit_cpus < rem_cpus) ||
+		     (!_enough_nodes(best_fit_nodes, rem_nodes,
+				     min_nodes, req_nodes))))
+			break;	/* no hole large enough */
+		if (best_fit_req != -1) {
+			/* This collection of nodes includes required ones
+			 * select nodes from this set, first working up
+			 * then down from the required nodes */
+			for (i = best_fit_req;
+			     i <= consec_end[best_fit_index]; i++) {
+				if ((max_nodes <= 0) ||
+				    ((rem_nodes <= 0) && (rem_cpus <= 0)))
+					break;
+				if (bit_test(node_map, i))
+					continue;
+				bit_set(node_map, i);
+				rem_nodes--;
+				max_nodes--;
+				avail_cpus = _get_cpu_cnt(job_ptr, i, cpu_cnt,
+							  freq, size);
+				rem_cpus -= avail_cpus;
+			}
+			for (i = (best_fit_req - 1);
+			     i >= consec_start[best_fit_index]; i--) {
+				if ((max_nodes <= 0) ||
+				    ((rem_nodes <= 0) && (rem_cpus <= 0)))
+					break;
+				if (bit_test(node_map, i)) 
+					continue;
+				avail_cpus = _get_cpu_cnt(job_ptr, i, cpu_cnt,
+							  freq, size);
+				if (avail_cpus <= 0)
+					continue;
+				rem_cpus -= avail_cpus;
+				bit_set(node_map, i);
+				rem_nodes--;
+				max_nodes--;
+			}
+		} else {
+			for (i = consec_start[best_fit_index];
+			     i <= consec_end[best_fit_index]; i++) {
+				if ((max_nodes <= 0) ||
+				    ((rem_nodes <= 0) && (rem_cpus <= 0)))
+					break;
+				if (bit_test(node_map, i))
+					continue;
+				avail_cpus = _get_cpu_cnt(job_ptr, i, cpu_cnt,
+							  freq, size);
+				if (avail_cpus <= 0)
+					continue;
+				if ((max_nodes == 1) && 
+				    (avail_cpus < rem_cpus)) {
+					/* Job can only take one more node and
+					 * this one has insufficient CPU */
+					continue;
+				}
+				rem_cpus -= avail_cpus;
+				bit_set(node_map, i);
+				rem_nodes--;
+				max_nodes--;
+			}
+		}
+
+		if (job_ptr->details->contiguous ||
+		    ((rem_nodes <= 0) && (rem_cpus <= 0))) {
+			error_code = SLURM_SUCCESS;
+			break;
+		}
+		consec_cpus[best_fit_index] = 0;
+		consec_nodes[best_fit_index] = 0;
+	}
+	
+	if (error_code && (rem_cpus <= 0) &&
+	    _enough_nodes(0, rem_nodes, min_nodes, req_nodes))
+		error_code = SLURM_SUCCESS;
+
+	xfree(consec_cpus);
+	xfree(consec_nodes);
+	xfree(consec_start);
+	xfree(consec_end);
+	xfree(consec_req);
+	return error_code;
+}
+
+/*
+ * A network topology aware version of _eval_nodes().
+ * NOTE: The logic here is almost identical to that of _job_test_topo() 
+ *       in select_linear.c. Any bug found here is probably also there.
+ */
+static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
+			uint32_t min_nodes, uint32_t max_nodes,
+			uint32_t req_nodes, uint32_t cr_node_cnt,
+			uint16_t *cpu_cnt, uint32_t *freq, uint32_t size)
+{
+	bitstr_t **switches_bitmap;		/* nodes on this switch */
+	int       *switches_cpu_cnt;		/* total CPUs on switch */
+	int       *switches_node_cnt;		/* total nodes on switch */
+	int       *switches_required;		/* set if has required node */
+
+	bitstr_t  *avail_nodes_bitmap = NULL;	/* nodes on any switch */
+	bitstr_t  *req_nodes_bitmap   = NULL;
+	int rem_cpus, rem_nodes;	/* remaining resources desired */
+	int avail_cpus, alloc_cpus = 0;
+	int i, j, rc = SLURM_SUCCESS;
+	int best_fit_inx, first, last;
+	int best_fit_nodes, best_fit_cpus;
+	int best_fit_location = 0, best_fit_sufficient;
+	bool sufficient;
+
+	rem_cpus = job_ptr->num_procs;
+	if (req_nodes > min_nodes)
+		rem_nodes = req_nodes;
+	else
+		rem_nodes = min_nodes;
+
+	if (job_ptr->details->req_node_bitmap) {
+		req_nodes_bitmap = bit_copy(job_ptr->details->req_node_bitmap);
+		i = bit_set_count(req_nodes_bitmap);
+		if (i > max_nodes) {
+			info("job %u requires more nodes than currently "
+			     "available (%u>%u)",
+			     job_ptr->job_id, i, max_nodes);
+			rc = SLURM_ERROR;
+			goto fini;
+		}
+	}
+
+	/* Construct a set of switch array entries, 
+	 * use the same indexes as switch_record_table in slurmctld */
+	switches_bitmap   = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
+	switches_cpu_cnt  = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_node_cnt = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_required = xmalloc(sizeof(int)        * switch_record_cnt);
+	avail_nodes_bitmap = bit_alloc(node_record_count);
+	for (i=0; i<switch_record_cnt; i++) {
+		switches_bitmap[i] = bit_copy(switch_record_table[i].
+					      node_bitmap);
+		bit_and(switches_bitmap[i], bitmap);
+		bit_or(avail_nodes_bitmap, switches_bitmap[i]);
+		switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
+		if (req_nodes_bitmap &&
+		    bit_overlap(req_nodes_bitmap, switches_bitmap[i])) {
+			switches_required[i] = 1;
+		}
+	}
+	bit_nclear(bitmap, 0, node_record_count - 1);
+
+#if SELECT_DEBUG
+	/* Don't compile this, it slows things down too much */
+	for (i=0; i<switch_record_cnt; i++) {
+		char *node_names = NULL;
+		if (switches_node_cnt[i])
+			node_names = bitmap2node_name(switches_bitmap[i]);
+		debug("switch=%s nodes=%u:%s required:%u speed:%u",
+		      switch_record_table[i].name,
+		      switches_node_cnt[i], node_names,
+		      switches_required[i],
+		      switch_record_table[i].link_speed);
+		xfree(node_names);
+	}
+#endif
+
+	if (req_nodes_bitmap &&
+	    (!bit_super_set(req_nodes_bitmap, avail_nodes_bitmap))) {
+		info("job %u requires nodes not available on any switch",
+		     job_ptr->job_id);
+		rc = SLURM_ERROR;
+		goto fini;
+	}
+
+	if (req_nodes_bitmap) {
+		/* Accumulate specific required resources, if any */
+		first = bit_ffs(req_nodes_bitmap);
+		last  = bit_fls(req_nodes_bitmap);
+		for (i=first; ((i<=last) && (first>=0)); i++) {
+			if (!bit_test(req_nodes_bitmap, i))
+				continue;
+			if (max_nodes <= 0) {
+				info("job %u requires nodes than allowed",
+				     job_ptr->job_id);
+				rc = SLURM_ERROR;
+				goto fini;
+			}
+			bit_set(bitmap, i);
+			bit_clear(avail_nodes_bitmap, i);
+			rem_nodes--;
+			max_nodes--;
+			avail_cpus = _get_cpu_cnt(job_ptr, i, cpu_cnt,
+						  freq, size);
+			rem_cpus   -= avail_cpus;
+			alloc_cpus += avail_cpus;
+			for (j=0; j<switch_record_cnt; j++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				bit_clear(switches_bitmap[j], i);
+				switches_node_cnt[j]--;
+			}
+		}
+		if ((rem_nodes <= 0) && (rem_cpus <= 0))
+			goto fini;
+
+		/* Accumulate additional resources from leafs that
+		 * contain required nodes */
+		for (j=0; j<switch_record_cnt; j++) {
+			if ((switch_record_table[j].level != 0) ||
+			    (switches_node_cnt[j] == 0) ||
+			    (switches_required[j] == 0)) {
+				continue;
+			}
+			while ((max_nodes > 0) &&
+			       ((rem_nodes > 0) || (rem_cpus > 0))) {
+				i = bit_ffs(switches_bitmap[j]);
+				if (i == -1)
+					break;
+				bit_clear(switches_bitmap[j], i);
+				switches_node_cnt[j]--;
+				if (bit_test(bitmap, i)) {
+					/* node on multiple leaf switches
+					 * and already selected */
+					continue;
+				}
+				bit_set(bitmap, i);
+				bit_clear(avail_nodes_bitmap, i);
+				rem_nodes--;
+				max_nodes--;
+				avail_cpus = _get_cpu_cnt(job_ptr, i, cpu_cnt,
+							  freq, size);
+				rem_cpus   -= avail_cpus;
+				alloc_cpus += avail_cpus;
+			}
+		}
+		if ((rem_nodes <= 0) && (rem_cpus <= 0))
+			goto fini;
+
+		/* Update bitmaps and node counts for higher-level switches */
+		for (j=0; j<switch_record_cnt; j++) {
+			if (switches_node_cnt[j] == 0)
+				continue;
+			first = bit_ffs(switches_bitmap[j]);
+			if (first < 0)
+				continue;
+			last  = bit_fls(switches_bitmap[j]);
+			for (i=first; i<=last; i++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				if (!bit_test(avail_nodes_bitmap, i)) {
+					/* cleared from lower level */
+					bit_clear(switches_bitmap[j], i);
+					switches_node_cnt[j]--;
+				} else {
+					switches_cpu_cnt[j] += 
+						_get_cpu_cnt(job_ptr, i, 
+							     cpu_cnt, freq, 
+							     size);
+				}
+			}
+		}
+	} else {
+		/* No specific required nodes, calculate CPU counts */
+		for (j=0; j<switch_record_cnt; j++) {
+			first = bit_ffs(switches_bitmap[j]);
+			if (first < 0)
+				continue;
+			last  = bit_fls(switches_bitmap[j]);
+			for (i=first; i<=last; i++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				switches_cpu_cnt[j] += 
+					_get_cpu_cnt(job_ptr, i, cpu_cnt,
+						     freq, size);
+			}
+		}
+	}
+
+	/* Determine lowest level switch satifying request with best fit */
+	best_fit_inx = -1;
+	for (j=0; j<switch_record_cnt; j++) {
+		if ((switches_cpu_cnt[j]  < rem_cpus) ||
+		    (!_enough_nodes(switches_node_cnt[j], rem_nodes,
+				    min_nodes, req_nodes)))
+			continue;
+		if ((best_fit_inx == -1) ||
+		    (switch_record_table[j].level <
+		     switch_record_table[best_fit_inx].level) ||
+		    ((switch_record_table[j].level ==
+		      switch_record_table[best_fit_inx].level) &&
+		     (switches_node_cnt[j] < switches_node_cnt[best_fit_inx])))
+			best_fit_inx = j;
+	}
+	if (best_fit_inx == -1) {
+		error("job %u: best_fit topology failure", job_ptr->job_id);
+		rc = SLURM_ERROR;
+		goto fini;
+	}
+	bit_and(avail_nodes_bitmap, switches_bitmap[best_fit_inx]);
+
+	/* Identify usable leafs (within higher switch having best fit) */
+	for (j=0; j<switch_record_cnt; j++) {
+		if ((switch_record_table[j].level != 0) ||
+		    (!bit_super_set(switches_bitmap[j], 
+				    switches_bitmap[best_fit_inx]))) {
+			switches_node_cnt[j] = 0;
+		}
+	}
+
+	/* Select resources from these leafs on a best-fit basis */
+	while ((max_nodes > 0) && ((rem_nodes > 0) || (rem_cpus > 0))) {
+		best_fit_cpus = best_fit_nodes = best_fit_sufficient = 0;
+		for (j=0; j<switch_record_cnt; j++) {
+			if (switches_node_cnt[j] == 0)
+				continue;
+			sufficient = (switches_cpu_cnt[j] >= rem_cpus) &&
+				     _enough_nodes(switches_node_cnt[j], 
+						   rem_nodes, min_nodes, 
+						   req_nodes);
+			/* If first possibility OR */
+			/* first set large enough for request OR */
+			/* tightest fit (less resource waste) OR */
+			/* nothing yet large enough, but this is biggest */
+			if ((best_fit_nodes == 0) ||	
+			    (sufficient && (best_fit_sufficient == 0)) ||
+			    (sufficient && 
+			     (switches_cpu_cnt[j] < best_fit_cpus)) ||
+			    ((sufficient == 0) && 
+			     (switches_cpu_cnt[j] > best_fit_cpus))) {
+				best_fit_cpus =  switches_cpu_cnt[j];
+				best_fit_nodes = switches_node_cnt[j];
+				best_fit_location = j;
+				best_fit_sufficient = sufficient;
+			}
+		}
+		if (best_fit_nodes == 0)
+			break;
+
+		/* Use select nodes from this leaf */
+		first = bit_ffs(switches_bitmap[best_fit_location]);
+		last  = bit_fls(switches_bitmap[best_fit_location]);
+		for (i=first; ((i<=last) && (first>=0)); i++) {
+			if (!bit_test(switches_bitmap[best_fit_location], i))
+				continue;
+
+			bit_clear(switches_bitmap[best_fit_location], i);
+			switches_node_cnt[best_fit_location]--;
+			avail_cpus = _get_cpu_cnt(job_ptr, i, cpu_cnt,
+						  freq, size);
+			switches_cpu_cnt[best_fit_location] -= avail_cpus;
+
+			if (bit_test(bitmap, i)) {
+				/* node on multiple leaf switches
+				 * and already selected */
+				continue;
+			}
+
+			bit_set(bitmap, i);
+			rem_nodes--;
+			max_nodes--;
+			rem_cpus   -= avail_cpus;
+			alloc_cpus += avail_cpus;
+			if ((max_nodes <= 0) || 
+			    ((rem_nodes <= 0) && (rem_cpus <= 0)))
+				break;
+		}
+		switches_node_cnt[best_fit_location] = 0;
+	}
+	if ((rem_cpus <= 0) && 
+	    _enough_nodes(0, rem_nodes, min_nodes, req_nodes)) {
+		rc = SLURM_SUCCESS;
+	} else
+		rc = SLURM_ERROR;
+
+ fini:	if (rc == SLURM_SUCCESS) {
+		/* Job's total_procs is needed for SELECT_MODE_WILL_RUN */
+		job_ptr->total_procs = alloc_cpus;
+	}
+	FREE_NULL_BITMAP(avail_nodes_bitmap);
+	FREE_NULL_BITMAP(req_nodes_bitmap);
+	for (i=0; i<switch_record_cnt; i++)
+		bit_free(switches_bitmap[i]);
+	xfree(switches_bitmap);
+	xfree(switches_cpu_cnt);
+	xfree(switches_node_cnt);
+	xfree(switches_required);
+
+	return rc;
+}
+
+/* this is an intermediary step between _select_nodes and _eval_nodes
+ * to tackle the knapsack problem. This code incrementally removes nodes
+ * with low cpu counts for the job and re-evaluates each result */
+static int _choose_nodes(struct job_record *job_ptr, bitstr_t *node_map,
+			 uint32_t min_nodes, uint32_t max_nodes, 
+			 uint32_t req_nodes, uint32_t cr_node_cnt,
+			 uint16_t *cpu_cnt, uint32_t *freq, uint32_t size)
+{
+	int i, b, node_boundary, count, ec, most_cpus = 0;
+	bitstr_t *origmap, *reqmap = NULL;
+
+	if (job_ptr->details->req_node_bitmap)
+		reqmap = job_ptr->details->req_node_bitmap;
+
+	/* clear nodes from the bitmap that don't have available resources */
+	for (i = 0, b = 0; i < size; i++) {
+		for (count = 0; count < freq[i]; count++, b++) {
+			if (bit_test(node_map, b) && cpu_cnt[i] < 1) {
+				if (reqmap && bit_test(reqmap, b)) {
+					/* can't clear a required node! */
+					return SLURM_ERROR;
+				}
+				bit_clear(node_map, b); 
+			}
+		}
+	}
+
+	/* NOTE: num_procs is 1 by default, 
+	 * Only reset max_nodes if user explicitly sets a proc count */
+	if ((job_ptr->num_procs > 1) && (max_nodes > job_ptr->num_procs))
+		max_nodes = job_ptr->num_procs;
+
+	origmap = bit_copy(node_map);
+	if (origmap == NULL)
+		fatal("bit_copy malloc failure");
+
+	ec = _eval_nodes(job_ptr, node_map, min_nodes, max_nodes,
+			 req_nodes, cr_node_cnt, cpu_cnt, freq, size);
+
+	if (ec == SLURM_SUCCESS) {
+		FREE_NULL_BITMAP(origmap);
+		return ec;
+	}
+
+	/* This nodeset didn't work. To avoid a possible knapsack problem, 
+	 * incrementally remove nodes with low cpu counts and retry */
+
+	/* find the higest number of cpus per node */
+	for (i = 0; i < size; i++) {
+		if (cpu_cnt[i] > most_cpus)
+			most_cpus = cpu_cnt[i];
+	}
+
+	for (count = 1; count < most_cpus; count++) {
+		int nochange = 1;
+		bit_or(node_map, origmap);
+		for (i = 0, node_boundary = 0; i < size; i++) {
+			if (cpu_cnt[i] > 0 && cpu_cnt[i] <= count) {
+				int j, n = node_boundary;
+				for (j = 0; j < freq[i]; j++, n++) {
+					if (!bit_test(node_map, n))
+						continue;
+					if (reqmap && bit_test(reqmap, n)) {
+						continue;
+					}
+					nochange = 0;
+					bit_clear(node_map, n);
+					bit_clear(origmap, n);
+				}
+			}
+			node_boundary += freq[i];
+		}
+		if (nochange)
+			continue;
+		ec = _eval_nodes(job_ptr, node_map, min_nodes, max_nodes,
+				 req_nodes, cr_node_cnt, cpu_cnt, freq, size);
+		if (ec == SLURM_SUCCESS) {
+			FREE_NULL_BITMAP(origmap);
+			return ec;
+		}
+	}
+	FREE_NULL_BITMAP(origmap);
+	return ec;
+}
+
+
+/* Select the best set of resources for the given job
+ * IN: job_ptr      - pointer to the job requesting resources
+ * IN: min_nodes    - minimum number of nodes required
+ * IN: max_nodes    - maximum number of nodes requested
+ * IN: req_nodes    - number of requested nodes
+ * IN/OUT: node_map - bitmap of available nodes / bitmap of selected nodes
+ * IN: cr_node_cnt  - total number of nodes in the cluster
+ * IN/OUT: core_map - bitmap of available cores / bitmap of selected cores
+ * IN: cr_type      - resource type
+ * IN: test_only    - ignore allocated memory check
+ * OUT:             return SLURM_SUCCESS if an allocation was found
+ */
+static uint16_t *_select_nodes(struct job_record *job_ptr, uint32_t min_nodes,
+				uint32_t max_nodes, uint32_t req_nodes,
+				bitstr_t *node_map, uint32_t cr_node_cnt,
+				bitstr_t *core_map,
+				struct node_use_record *node_usage,
+				select_type_plugin_info_t cr_type,
+				bool test_only)
+{
+	int rc;
+	uint16_t *cpu_cnt, *cpus = NULL;
+	uint32_t start, n, a, i, f, size, *freq;
+	bitstr_t *req_map = job_ptr->details->req_node_bitmap;
+	
+	if (bit_set_count(node_map) < min_nodes)
+		return NULL;
+
+	/* get resource usage for this job from each available node */
+	size = _get_res_usage(job_ptr, node_map, core_map, cr_node_cnt,
+			      node_usage, cr_type, &cpu_cnt, &freq,
+			      test_only);
+
+	/* clear all nodes that do not have any
+	 * usable resources for this job */
+	i = f = 0;
+	for (n = 0; n < cr_node_cnt; n++) {
+		if (bit_test(node_map, n) && cpu_cnt[i] == 0) {
+			/* no resources are available for this node */
+			if (req_map && bit_test(req_map, n)) {
+				/* cannot clear a required node! */
+				xfree(cpu_cnt);
+				xfree(freq);
+				return NULL;
+			}
+			bit_clear(node_map, n);
+		}
+		f++;
+		if (f >= freq[i]) {
+			f = 0;
+			i++;
+		}
+	}
+	if (bit_set_count(node_map) < min_nodes) {
+		xfree(cpu_cnt);
+		xfree(freq);
+		return NULL;
+	}
+
+	/* choose the best nodes for the job */
+	rc = _choose_nodes(job_ptr, node_map, min_nodes, max_nodes, req_nodes,
+			   cr_node_cnt, cpu_cnt, freq, size);
+	
+	/* if successful, sync up the core_map with the node_map, and
+	 * create a cpus array */
+	if (rc == SLURM_SUCCESS) {
+		cpus = xmalloc(bit_set_count(node_map) * sizeof(uint16_t));
+		start = 0;
+		a = i = f = 0;
+		for (n = 0; n < cr_node_cnt; n++) {
+			if (bit_test(node_map, n)) {
+				cpus[a++] = cpu_cnt[i];
+				if (cr_get_coremap_offset(n) != start) {
+					bit_nclear(core_map, start, 
+						(cr_get_coremap_offset(n))-1);
+				}
+				start = cr_get_coremap_offset(n+1);
+			}
+			f++;
+			if (f >= freq[i]) {
+				f = 0;
+				i++;
+			}
+		}
+		if (cr_get_coremap_offset(n) != start) {
+			bit_nclear(core_map, start,
+						(cr_get_coremap_offset(n))-1);
+		}
+	}
+
+	xfree(cpu_cnt);
+	xfree(freq);
+	return cpus;
+}
+
+
+/* cr_job_test - does most of the real work for select_p_job_test(), which 
+ *	includes contiguous selection, load-leveling and max_share logic
+ *
+ * PROCEDURE:
+ *
+ * Step 1: compare nodes in "avail" bitmap with current node state data
+ *         to find available nodes that match the job request
+ *
+ * Step 2: check resources in "avail" bitmap with allocated resources from
+ *         higher priority partitions (busy resources are UNavailable)
+ *
+ * Step 3: select resource usage on remaining resources in "avail" bitmap
+ *         for this job, with the placement influenced by existing
+ *         allocations
+ */
+extern int cr_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
+			uint32_t min_nodes, uint32_t max_nodes, 
+			uint32_t req_nodes, int mode,
+			select_type_plugin_info_t cr_type,
+			enum node_cr_state job_node_req, uint32_t cr_node_cnt,
+			struct part_res_record *cr_part_ptr,
+			struct node_use_record *node_usage)
+{
+	int error_code = SLURM_SUCCESS, ll; /* ll = layout array index */
+	uint16_t *layout_ptr = NULL;
+	bitstr_t *orig_map, *avail_cores, *free_cores;
+	bitstr_t *tmpcore = NULL, *reqmap = NULL;
+	bool test_only;
+	uint32_t c, i, n, csize, total_cpus, save_mem = 0;
+	int32_t build_cnt;
+	select_job_res_t job_res;
+	struct part_res_record *p_ptr, *jp_ptr;
+	uint16_t *cpu_count;
+
+	layout_ptr = job_ptr->details->req_node_layout;
+	reqmap = job_ptr->details->req_node_bitmap;
+	
+	free_select_job_res(&job_ptr->select_job);
+
+	if (mode == SELECT_MODE_TEST_ONLY)
+		test_only = true;
+	else	/* SELECT_MODE_RUN_NOW || SELECT_MODE_WILL_RUN  */ 
+		test_only = false;
+
+	/* check node_state and update the node bitmap as necessary */
+	if (!test_only) {
+		error_code = _verify_node_state(cr_part_ptr, job_ptr, 
+						bitmap, cr_type, node_usage,
+						job_node_req);
+		if (error_code != SLURM_SUCCESS) {
+			return error_code;
+		}
+	}
+
+	/* This is the case if -O/--overcommit  is true */ 
+	if (job_ptr->num_procs == job_ptr->details->min_nodes) {
+		struct multi_core_data *mc_ptr = job_ptr->details->mc_ptr;
+		job_ptr->num_procs *= MAX(1, mc_ptr->min_threads);
+		job_ptr->num_procs *= MAX(1, mc_ptr->min_cores);
+		job_ptr->num_procs *= MAX(1, mc_ptr->min_sockets);
+	}
+
+	debug3("cons_res: cr_job_test: evaluating job %u on %u nodes",
+		job_ptr->job_id, bit_set_count(bitmap));
+
+	orig_map = bit_copy(bitmap);
+	avail_cores = _make_core_bitmap(bitmap);
+
+	/* test to make sure that this job can succeed with all avail_cores
+	 * if 'no' then return FAIL
+	 * if 'yes' then we will seek the optimal placement for this job
+	 *          within avail_cores
+	 */
+	free_cores = bit_copy(avail_cores);
+	cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes, req_nodes,
+				   bitmap, cr_node_cnt, free_cores,
+				   node_usage, cr_type, test_only);
+	if (cpu_count == NULL) {
+		/* job cannot fit */
+		FREE_NULL_BITMAP(orig_map);
+		FREE_NULL_BITMAP(free_cores);
+		FREE_NULL_BITMAP(avail_cores);
+		debug3("cons_res: cr_job_test: test 0 fail: "
+		       "insufficient resources");
+		return SLURM_ERROR;
+	} else if (test_only) {
+		/* FIXME: does "test_only" expect struct_job_res
+		 * to be filled out? For now we assume NO */
+		FREE_NULL_BITMAP(orig_map);
+		FREE_NULL_BITMAP(free_cores);
+		FREE_NULL_BITMAP(avail_cores);
+		xfree(cpu_count);
+		debug3("cons_res: cr_job_test: test 0 pass: test_only"); 
+		return SLURM_SUCCESS;
+	}
+	if (cr_type == CR_MEMORY) {
+		/* CR_MEMORY does not care about existing CPU allocations,
+		 * so we can jump right to job allocation from here */
+		goto alloc_job;
+	}
+	xfree(cpu_count);
+	debug3("cons_res: cr_job_test: test 0 pass - "
+	       "job fits on given resources");
+
+	/* now that we know that this job can run with the given resources,
+	 * let's factor in the existing allocations and seek the optimal set
+	 * of resources for this job. Here is the procedure:
+	 *
+	 * Step 1: Seek idle nodes across all partitions. If successful then
+	 *         place job and exit. If not successful, then continue:
+	 *
+	 * Step 2: Remove resources that are in use by higher-pri partitions,
+	 *         and test that job can still succeed. If not then exit.
+	 *
+	 * Step 3: Seek idle nodes among the partitions with the same
+	 *         priority as the job's partition. If successful then
+	 *         goto Step 6. If not then continue:
+	 *
+	 * Step 4: Seek placement within the job's partition. Search
+	 *         row-by-row. If no placement if found, then exit. If a row
+	 *         is found, then continue:
+	 *
+	 * Step 5: Place job and exit. FIXME! Here is where we need a
+	 *         placement algorithm that recognizes existing job
+	 *         boundaries and tries to "overlap jobs" as efficiently
+	 *         as possible.
+	 *
+	 * Step 6: Place job and exit. FIXME! here is we use a placement
+	 *         algorithm similar to Step 5 on jobs from lower-priority
+	 *         partitions.
+	 */
+
+
+	/*** Step 1 ***/
+	bit_copybits(bitmap, orig_map);
+	bit_copybits(free_cores, avail_cores);
+
+	/* remove all existing allocations from free_cores */
+	tmpcore = bit_copy(free_cores);
+	for(p_ptr = cr_part_ptr; p_ptr; p_ptr = p_ptr->next) {
+		if (!p_ptr->row)
+			continue;
+		for (i = 0; i < p_ptr->num_rows; i++) {
+			if (!p_ptr->row[i].row_bitmap)
+				continue;
+			bit_copybits(tmpcore, p_ptr->row[i].row_bitmap);
+			bit_not(tmpcore); /* set bits now "free" resources */
+			bit_and(free_cores, tmpcore);
+		}
+	}
+	cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes, req_nodes,
+				  bitmap, cr_node_cnt, free_cores,
+				  node_usage, cr_type, test_only);
+	if (cpu_count) {
+		/* job fits! We're done. */
+		debug3("cons_res: cr_job_test: test 1 pass - "
+		       "idle resources found");
+		goto alloc_job;
+	}
+	debug3("cons_res: cr_job_test: test 1 fail - "
+	       "not enough idle resources");
+
+	/*** Step 2 ***/
+	bit_copybits(bitmap, orig_map);
+	bit_copybits(free_cores, avail_cores);
+	
+	for (jp_ptr = cr_part_ptr; jp_ptr; jp_ptr = jp_ptr->next) {
+		if (strcmp(jp_ptr->name, job_ptr->part_ptr->name) == 0)
+			break;
+	}
+	if (!jp_ptr)
+		fatal("cons_res error: could not find partition for job %u",
+			job_ptr->job_id);
+
+	/* remove hi-pri existing allocations from avail_cores */
+	for(p_ptr = cr_part_ptr; p_ptr; p_ptr = p_ptr->next) {
+		if (p_ptr->priority <= jp_ptr->priority)
+			continue;
+		if (!p_ptr->row)
+			continue;
+		for (i = 0; i < p_ptr->num_rows; i++) {
+			if (!p_ptr->row[i].row_bitmap)
+				continue;
+			bit_copybits(tmpcore, p_ptr->row[i].row_bitmap);
+			bit_not(tmpcore); /* set bits now "free" resources */
+			bit_and(free_cores, tmpcore);
+		}
+	}
+	/* make these changes permanent */
+	bit_copybits(avail_cores, free_cores);
+	cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes, req_nodes,
+				  bitmap, cr_node_cnt, free_cores,
+				  node_usage, cr_type, test_only);
+	if (!cpu_count) {
+		/* job needs resources that are currently in use by
+		 * higher-priority jobs, so fail for now */
+		debug3("cons_res: cr_job_test: test 2 fail - "
+			"resources busy with higher priority jobs");
+		goto alloc_job;
+	}
+	xfree(cpu_count);
+	debug3("cons_res: cr_job_test: test 2 pass - "
+	       "available resources for this priority");
+
+	/*** Step 3 ***/
+	bit_copybits(bitmap, orig_map);
+	bit_copybits(free_cores, avail_cores);
+	
+	/* remove same-priority existing allocations from free_cores */
+	for(p_ptr = cr_part_ptr; p_ptr; p_ptr = p_ptr->next) {
+		if (p_ptr->priority != jp_ptr->priority)
+			continue;
+		if (!p_ptr->row)
+			continue;
+		for (i = 0; i < p_ptr->num_rows; i++) {
+			if (!p_ptr->row[i].row_bitmap)
+				continue;
+			bit_copybits(tmpcore, p_ptr->row[i].row_bitmap);
+			bit_not(tmpcore); /* set bits now "free" resources */
+			bit_and(free_cores, tmpcore);
+		}
+	}
+	cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes, req_nodes,
+				   bitmap, cr_node_cnt, free_cores,
+				   node_usage, cr_type, test_only);
+	if (cpu_count) {
+		/* lo-pri jobs are the only thing left in our way.
+		 * for now we'll ignore them, but FIXME: we need
+		 * a good placement algorithm here that optimizes
+		 * "job overlap" between this job (in these idle
+		 * nodes) and the lo-pri jobs */
+		debug3("cons_res: cr_job_test: test 3 pass - found resources");
+		goto alloc_job;
+	}
+	debug3("cons_res: cr_job_test: test 3 fail - "
+	       "not enough idle resources in same priority");
+	
+	
+	/*** Step 4 ***/	
+	/* try to fit the job into an existing row */
+	/*
+	 * tmpcore = worker core_bitmap
+	 * free_cores = core_bitmap to be built
+	 * avail_cores = static core_bitmap of all available cores
+	 */
+	
+	if (jp_ptr->row == NULL) {
+		/* there's no existing jobs in this partition, so place
+		 * the job in avail_cores. FIXME: still need a good
+		 * placement algorithm here that optimizes "job overlap"
+		 * between this job (in these idle nodes) and existing
+		 * jobs in the other partitions with <= priority to
+		 * this partition */
+		bit_copybits(bitmap, orig_map);
+		bit_copybits(free_cores, avail_cores);
+		cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes,
+					  req_nodes, bitmap, cr_node_cnt,
+					  free_cores, node_usage, cr_type, 
+					  test_only);
+		debug3("cons_res: cr_job_test: test 4 pass - first row found");
+		goto alloc_job;
+	}
+
+	cr_sort_part_rows(jp_ptr);
+	c = jp_ptr->num_rows;
+	if (job_node_req != NODE_CR_AVAILABLE)
+		c = 1;
+	for (i = 0; i < c; i++) {
+		if (!jp_ptr->row[i].row_bitmap)
+			break;
+		bit_copybits(bitmap, orig_map);
+		bit_copybits(free_cores, avail_cores);
+		bit_copybits(tmpcore, jp_ptr->row[i].row_bitmap);
+		bit_not(tmpcore);
+		bit_and(free_cores, tmpcore);
+		cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes,
+					  req_nodes, bitmap, cr_node_cnt,
+					  free_cores, node_usage, cr_type,
+					  test_only);
+		if (cpu_count) {
+			debug3("cons_res: cr_job_test: test 4 pass - row %i",i);
+			break;
+		}
+		debug3("cons_res: cr_job_test: test 4 fail - row %i", i);
+	}
+
+	if (i < c && !jp_ptr->row[i].row_bitmap) {
+		/* we've found an empty row, so use it */
+		bit_copybits(bitmap, orig_map);
+		bit_copybits(free_cores, avail_cores);
+		debug3("cons_res: cr_job_test: test 4 trying empty row %i",i);
+		cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes,
+					  req_nodes, bitmap, cr_node_cnt,
+					  free_cores, node_usage, cr_type,
+					  test_only);
+	}
+
+	if (!cpu_count) {
+		/* job can't fit into any row, so exit */
+		debug3("cons_res: cr_job_test: test 4 fail - busy partition");
+		goto alloc_job;
+		
+	}
+
+	/*** CONSTRUCTION ZONE FOR STEPs 5 AND 6 ***
+	 *  Note that while the job may have fit into a row, it should
+	 *  still be run through a good placement algorithm here that
+	 * optimizes "job overlap" between this job (in these idle nodes)
+	 * and existing jobs in the other partitions with <= priority to
+	 * this partition */
+
+alloc_job:
+	/* at this point we've found a good set of
+	 * bits to allocate to this job:
+	 * - bitmap is the set of nodes to allocate
+	 * - free_cores is the set of allocated cores
+	 * - cpu_count is the number of cpus per allocated node
+	 *
+	 * Next steps are to cleanup the worker variables,
+	 * create the select_job_res struct,
+	 * distribute the job on the bits, and exit
+	 */
+	FREE_NULL_BITMAP(orig_map);
+	FREE_NULL_BITMAP(avail_cores);
+	FREE_NULL_BITMAP(tmpcore);
+	if (!cpu_count) {
+		/* we were sent here to cleanup and exit */
+		FREE_NULL_BITMAP(free_cores);
+		debug3("cons_res: exiting cr_job_test with no allocation");
+		return SLURM_ERROR;
+	}
+
+	/* At this point we have:
+	 * - a bitmap of selected nodes
+	 * - a free_cores bitmap of usable cores on each selected node
+	 * - a per-alloc-node cpu_count array
+	 */
+
+	if ((mode != SELECT_MODE_WILL_RUN) && (job_ptr->part_ptr == NULL))
+		error_code = EINVAL;
+	if ((error_code != SLURM_SUCCESS) || (mode != SELECT_MODE_RUN_NOW)) {
+		FREE_NULL_BITMAP(free_cores);
+		xfree(cpu_count);
+		return error_code;
+	}
+
+	debug3("cons_res: cr_job_test: distributing job %u", job_ptr->job_id);
+	/** create the struct_job_res  **/
+	job_res                   = create_select_job_res();
+	job_res->node_bitmap      = bit_copy(bitmap);
+	if (job_res->node_bitmap == NULL)
+		fatal("bit_copy malloc failure");
+	job_res->nhosts           = bit_set_count(bitmap);
+	job_res->nprocs           = job_res->nhosts;
+	if (job_ptr->details->ntasks_per_node)
+		job_res->nprocs  *= job_ptr->details->ntasks_per_node;
+	job_res->nprocs           = MAX(job_res->nprocs, job_ptr->num_procs);
+	job_res->node_req         = job_node_req;
+	job_res->cpus             = cpu_count;
+	job_res->cpus_used        = xmalloc(job_res->nhosts * sizeof(uint16_t));
+	job_res->memory_allocated = xmalloc(job_res->nhosts * sizeof(uint32_t));
+	job_res->memory_used      = xmalloc(job_res->nhosts * sizeof(uint32_t));
+
+	/* store the hardware data for the selected nodes */
+	error_code = build_select_job_res(job_res, node_record_table_ptr,
+					  select_fast_schedule);
+	if (error_code != SLURM_SUCCESS) {
+		free_select_job_res(&job_res);		
+		FREE_NULL_BITMAP(free_cores);
+		return error_code;
+	}
+
+	/* sync up cpus with layout_ptr, total up
+	 * all cpus, and load the core_bitmap */
+	ll = -1;
+	total_cpus = 0;
+	c = 0;
+	csize = bit_size(job_res->core_bitmap);
+	for (i = 0, n = 0; n < cr_node_cnt; n++) {
+		uint32_t j;
+		if (layout_ptr && reqmap && bit_test(reqmap,n))
+			ll++;
+		if (bit_test(bitmap, n) == 0)
+			continue;
+		j = cr_get_coremap_offset(n);
+		for (; j < cr_get_coremap_offset(n+1); j++, c++) {
+			if (bit_test(free_cores, j)) {
+				if (c >= csize)	{
+					fatal("cons_res: cr_job_test "
+					      "core_bitmap index error");
+				}
+				bit_set(job_res->core_bitmap, c);
+			}
+		}
+		
+		if (layout_ptr && reqmap && bit_test(reqmap, n)) {
+			job_res->cpus[i] = MIN(job_res->cpus[i],layout_ptr[ll]);
+		} else if (layout_ptr) {
+			job_res->cpus[i] = 0;
+		}
+		total_cpus += job_res->cpus[i];
+		i++;
+	}
+
+	/* When 'srun --overcommit' is used, nprocs is set to a minimum value
+	 * in order to allocate the appropriate number of nodes based on the
+	 * job request.
+	 * For cons_res, all available logical processors will be allocated on
+	 * each allocated node in order to accommodate the overcommit request.
+	 */
+	if (job_ptr->details->overcommit && job_ptr->details->num_tasks)
+		job_res->nprocs = MIN(total_cpus, job_ptr->details->num_tasks);
+
+	debug3("cons_res: cr_job_test: job %u nprocs %u cbits %u/%u nbits %u",
+		job_ptr->job_id, job_res->nprocs, bit_set_count(free_cores),
+		bit_set_count(job_res->core_bitmap), job_res->nhosts);
+	FREE_NULL_BITMAP(free_cores);
+
+	/* distribute the tasks and clear any unused cores */
+	job_ptr->select_job = job_res;
+	error_code = cr_dist(job_ptr, cr_type);
+	if (error_code != SLURM_SUCCESS) {
+		free_select_job_res(&job_ptr->select_job);
+		return error_code;
+	}
+
+	/* translate job_res->cpus array into format with rep count */
+	build_cnt = build_select_job_res_cpu_array(job_res);
+	if (build_cnt >= 0)
+		job_ptr->total_procs = build_cnt;
+	else
+		job_ptr->total_procs = total_cpus;	/* best guess */
+
+	if ((cr_type != CR_CPU_MEMORY) && (cr_type != CR_CORE_MEMORY) &&
+	    (cr_type != CR_SOCKET_MEMORY) && (cr_type != CR_MEMORY))
+		return error_code;
+
+	/* load memory allocated array */
+	save_mem =job_ptr->details->job_min_memory;
+	if (save_mem & MEM_PER_CPU) {
+		/* memory is per-cpu */
+		save_mem &= (~MEM_PER_CPU);
+		for (i = 0; i < job_res->nhosts; i++) {
+			job_res->memory_allocated[i] = job_res->cpus[i] *
+						       save_mem;
+		}
+	} else {
+		/* memory is per-node */
+		for (i = 0; i < job_res->nhosts; i++) {
+			job_res->memory_allocated[i] = save_mem;
+		}
+	}
+	return error_code;
+}
diff --git a/src/plugins/select/cons_res/job_test.h b/src/plugins/select/cons_res/job_test.h
new file mode 100644
index 0000000000000000000000000000000000000000..ae39c71916146ec39ecfc5503a3e6a3293b68f56
--- /dev/null
+++ b/src/plugins/select/cons_res/job_test.h
@@ -0,0 +1,70 @@
+/*****************************************************************************\
+ *  select_cons_res.h 
+ *
+ *  $Id: select_cons_res.h,v 1.3 2006/10/31 20:01:38 palermo Exp $
+ *****************************************************************************
+ *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
+ *  Written by Susanne M. Balle, <susanne.balle@hp.com>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *  
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _CR_JOB_TEST_H
+#define _CR_JOB_TEST_H
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <slurm/slurm.h>
+#include <slurm/slurm_errno.h>
+
+#include "src/common/list.h"
+#include "src/common/log.h"
+#include "src/common/node_select.h"
+#include "src/common/pack.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xassert.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/common/slurm_resource_info.h"
+#include "src/slurmctld/slurmctld.h"
+
+
+/* _job_test - does most of the real work for select_p_job_test(), which 
+ *	pretty much just handles load-leveling and max_share logic */
+int cr_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
+		uint32_t min_nodes, uint32_t max_nodes, uint32_t req_nodes,
+		int mode, select_type_plugin_info_t cr_type,
+		enum node_cr_state job_node_req, uint32_t cr_node_cnt,
+		struct part_res_record *cr_part_ptr,
+		struct node_use_record *node_usage);
+
+#endif /* !_CR_JOB_TEST_H */
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index 7c6d07b26175a182e4530ed1a4c35c32fa11c800..cebb6c078d8f230a4ee20fcce1cc88be4bd60f2a 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -1,8 +1,6 @@
 /*****************************************************************************\
  *  select_cons_res.c - node selection plugin supporting consumable 
  *  resources policies.
- *
- *  $Id: select_cons_res.c 17022 2009-03-25 18:42:18Z jette $
  *****************************************************************************\
  *
  *  The following example below illustrates how four jobs are allocated
@@ -59,12 +57,13 @@
  * the job throughput can increase dramatically.
  *
  *****************************************************************************
- *  Copyright (C) 2005-2006 Hewlett-Packard Development Company, L.P.
+ *  Copyright (C) 2005-2008 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle <susanne.balle@hp.com>, who borrowed heavily
  *  from select/linear 
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -104,6 +103,7 @@
 
 #include "select_cons_res.h"
 #include "dist_tasks.h"
+#include "job_test.h"
 
 #if(0)
 #define CR_DEBUG 1
@@ -139,995 +139,868 @@
  */
 const char plugin_name[] = "Consumable Resources (CR) Node Selection plugin";
 const char plugin_type[] = "select/cons_res";
-const uint32_t plugin_version = 90;
-const uint32_t pstate_version = 6;	/* version control on saved state */
-
-#define CR_JOB_ALLOCATED_CPUS  0x1
-#define CR_JOB_ALLOCATED_MEM   0x2
+const uint32_t plugin_version = 91;
+const uint32_t pstate_version = 7;	/* version control on saved state */
 
 select_type_plugin_info_t cr_type = CR_CPU; /* cr_type is overwritten in init() */
 
-/* Array of node_cr_record. One entry for each node in the cluster */
-struct node_cr_record *select_node_ptr = NULL;
 uint16_t select_fast_schedule;
+
+uint16_t *cr_node_num_cores = NULL;
+uint32_t *cr_num_core_count = NULL;
+struct part_res_record *select_part_record = NULL;
+struct node_res_record *select_node_record = NULL;
+struct node_use_record *select_node_usage  = NULL;
 static int select_node_cnt = 0;
-static time_t last_cr_update_time;
-static pthread_mutex_t cr_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-List select_cr_job_list = NULL; /* List of select_cr_job(s) that are still active */
-static uint32_t last_verified_job_id = 0;
-/* verify the job list after every CR_VERIFY_JOB_CYCLE jobs have finished */
-#define CR_VERIFY_JOB_CYCLE 2000
-
-static void	_cr_job_list_del(void *x);
-static int	_cr_job_list_sort(void *x, void *y);
-static struct node_cr_record *_dup_node_cr(struct node_cr_record *node_cr_ptr,
-					   int node_cr_cnt);
-static int	_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-			uint32_t min_nodes, uint32_t max_nodes, 
-			uint32_t req_nodes, int mode, 
-			enum node_cr_state job_node_req,
-			struct node_cr_record *select_node_ptr);
-static int 	_will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
+static bool cr_priority_test      = false;
+static bool cr_priority_selection = false;
+
+/* Procedure Declarations */
+static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 			uint32_t min_nodes, uint32_t max_nodes, 
-			uint32_t req_nodes, enum node_cr_state job_node_req);
+			uint32_t req_nodes, uint16_t job_node_req);
 
-#ifdef CR_DEBUG
-static void _dump_state(struct node_cr_record *select_node_ptr)
-{
-	int i, j, cores;
-	struct part_cr_record *parts;
-	ListIterator job_iterator;
-	struct select_cr_job *job;
+#if (CR_DEBUG)
+
+static void _dump_job_res(select_job_res_t job) {
+	char str[64];
 
+	if (job->core_bitmap)
+		bit_fmt(str, 63, job->core_bitmap);
+	else
+		sprintf(str, "[no core_bitmap]");
+	info("DEBUG: Dump select_job_res: nhosts %u cb %s", job->nhosts, str);
+}
+ 
+static void _dump_nodes()
+{
+	int i;
+	
 	for (i=0; i<select_node_cnt; i++) {
-		info("node:%s sockets:%u alloc_memory:%u state:%d",
-			select_node_ptr[i].node_ptr->name,
-			select_node_ptr[i].sockets,
-			select_node_ptr[i].alloc_memory,
-			select_node_ptr[i].node_state);
-		parts = select_node_ptr[i].parts;
-		while (parts) {
-			info("  part:%s rows:%u",
-				parts->part_ptr->name,
-				parts->num_rows);
-			cores = select_node_ptr[i].sockets * 
-				parts->num_rows;
-			for (j=0; j<cores; j++) {
-				info("    alloc_cores[%d]:%u",
-					j, parts->alloc_cores[j]);
-			}
-			parts = parts->next;
-		}
+		info("node:%s cpus:%u c:%u s:%u t:%u mem:%u a_mem:%u state:%d",
+			select_node_record[i].node_ptr->name,
+			select_node_record[i].cpus,
+			select_node_record[i].cores,
+			select_node_record[i].sockets,
+			select_node_record[i].vpus,
+			select_node_record[i].real_memory,
+			select_node_usage[i].alloc_memory,
+			select_node_usage[i].node_state);	
 	}
+}
+
+static void _dump_part(struct part_res_record *p_ptr)
+{
+	uint16_t i;
+	info("part:%s rows:%u pri:%u ", p_ptr->name, p_ptr->num_rows,
+		p_ptr->priority);
+	if (!p_ptr->row)
+		return;
 
-	if (select_cr_job_list == NULL)
-	    	return;
-	job_iterator = list_iterator_create(select_cr_job_list);
-	while ((job = (struct select_cr_job *) list_next(job_iterator))) {
-		info("job:%u nprocs:%u nhosts:%u",
-			job->job_id, job->nprocs, job->nhosts);
-		if (job->job_ptr == NULL)
-			error("  job_ptr is NULL");
-		else if (job->job_ptr->job_id != job->job_id)
-			error("  job_ptr is bad");
-		for (i=0; ((i<job->nhosts)&&(i<2)); i++) {
-			info("  cpus:%u alloc_cpus:%u ",
-				job->cpus[i], job->alloc_cpus[i]);
-			info("  node_offset:%u",
-				job->node_offset[i]);
+	for (i = 0; i < p_ptr->num_rows; i++) {
+		char str[64]; /* print first 64 bits of bitmaps */
+		if (p_ptr->row[i].row_bitmap) {
+			bit_fmt(str, 63, p_ptr->row[i].row_bitmap);
+		} else {
+			sprintf(str, "[no row_bitmap]");
 		}
+		info("  row%u: num_jobs %u: bitmap: %s", i,
+			p_ptr->row[i].num_jobs, str);
+	}
+}
+
+static void _dump_state(struct part_res_record *p_ptr)
+{
+	_dump_nodes();
+
+	/* dump partition data */
+	for (; p_ptr; p_ptr = p_ptr->next) {
+		_dump_part(p_ptr);
 	}
-	list_iterator_destroy(job_iterator);
 	return;
 }
 #endif
 
-/* Create a duplicate part_cr_record structure */
-static struct part_cr_record *_dup_part_cr(struct node_cr_record *node_cr_ptr)
+/*  */
+extern bool cr_priority_selection_enabled()
 {
-	int i, j, part_cnt;
-	struct part_cr_record *part_cr_ptr, *new_part_cr_ptr;
-
-	part_cnt = node_cr_ptr->node_ptr->part_cnt;
-	new_part_cr_ptr = xmalloc(sizeof(struct part_cr_record) * part_cnt);
-	part_cr_ptr = node_cr_ptr->parts;
-	for (i=0; i<part_cnt; i++) {
-		if (!part_cr_ptr)
-			break;
-		new_part_cr_ptr[i].part_ptr = part_cr_ptr->part_ptr;
-		new_part_cr_ptr[i].num_rows = part_cr_ptr->num_rows;
-		j = sizeof(uint16_t) * part_cr_ptr->num_rows *
-		    select_node_ptr->sockets;
-		new_part_cr_ptr[i].alloc_cores = xmalloc(j);
-		memcpy(new_part_cr_ptr[i].alloc_cores,
-		       part_cr_ptr->alloc_cores, j);
-		if (i > 0)
-			new_part_cr_ptr[i-1].next = &new_part_cr_ptr[i];
-		part_cr_ptr = part_cr_ptr->next;
+	if (!cr_priority_test) {
+		char *sched_type = slurm_get_sched_type();
+		if (strcmp(sched_type, "sched/gang") == 0)
+			cr_priority_selection = true;
+		xfree(sched_type);
+		cr_priority_test = true;
 	}
-	return new_part_cr_ptr;
+	return cr_priority_selection;
+	
 }
 
-/* Create a duplicate node_cr_records structure */
-static struct node_cr_record *_dup_node_cr(struct node_cr_record *node_cr_ptr,
-					   int node_cr_cnt)
+#define CR_NUM_CORE_ARRAY_INCREMENT 8
+
+/* (re)set cr_node_num_cores and cr_num_core_count arrays */
+static void _init_global_core_data(struct node_record *node_ptr, int node_cnt)
 {
-	int i;
-	struct node_cr_record *new_node_cr_ptr;
+	uint32_t i, n, array_size = CR_NUM_CORE_ARRAY_INCREMENT;
 
-	if (node_cr_ptr == NULL)
-		return NULL;
+	xfree(cr_num_core_count);
+	xfree(cr_node_num_cores);
+	cr_node_num_cores = xmalloc(array_size * sizeof(uint16_t));
+	cr_num_core_count = xmalloc(array_size * sizeof(uint32_t));
 
-	new_node_cr_ptr = xmalloc(sizeof(struct node_cr_record) *
-				  node_cr_cnt);
-
-	for (i=0; i<node_cr_cnt; i++) {
-		new_node_cr_ptr[i].node_ptr     = node_cr_ptr[i].node_ptr;
-		new_node_cr_ptr[i].cpus         = node_cr_ptr[i].cpus;
-		new_node_cr_ptr[i].sockets      = node_cr_ptr[i].sockets;
-		new_node_cr_ptr[i].cores        = node_cr_ptr[i].cores;
-		new_node_cr_ptr[i].threads      = node_cr_ptr[i].threads;
-		new_node_cr_ptr[i].real_memory  = node_cr_ptr[i].real_memory;
-		new_node_cr_ptr[i].alloc_memory = node_cr_ptr[i].alloc_memory;
-		new_node_cr_ptr[i].node_state   = node_cr_ptr[i].node_state;
-		new_node_cr_ptr[i].parts        = _dup_part_cr(&node_cr_ptr[i]);
+	for (i = 0, n = 0; n < node_cnt; n++) {
+		uint16_t cores;
+		if (select_fast_schedule) {
+			cores  = node_ptr[n].config_ptr->cores;
+			cores *= node_ptr[n].config_ptr->sockets;
+		} else {
+			cores  = node_ptr[n].cores;
+			cores *= node_ptr[n].sockets;
+		}
+		if (cr_node_num_cores[i] == cores) {
+			cr_num_core_count[i]++;
+			continue;
+		}
+		if (cr_num_core_count[i] > 0) {
+			i++;
+			if (i > array_size) {
+				array_size += CR_NUM_CORE_ARRAY_INCREMENT;
+				xrealloc(cr_node_num_cores,
+					array_size * sizeof(uint16_t));
+				xrealloc(cr_node_num_cores,
+					array_size * sizeof(uint16_t));
+			}
+		}
+		cr_node_num_cores[i] = cores;
+		cr_num_core_count[i] = 1;
+	}
+	/* make sure we have '0'-terminate fields at the end */
+	i++;
+	if (i > array_size) {
+		array_size += CR_NUM_CORE_ARRAY_INCREMENT;
+		xrealloc(cr_node_num_cores, array_size * sizeof(uint16_t));
+		xrealloc(cr_node_num_cores, array_size * sizeof(uint16_t));
 	}
-	return new_node_cr_ptr;
 }
 
-static void _destroy_node_part_array(struct node_cr_record *this_cr_node)
+
+/* return the coremap index to the first core of the given node */
+extern uint32_t cr_get_coremap_offset(uint32_t node_index)
 {
-	struct part_cr_record *p_ptr;
+	uint32_t i;
+	uint32_t cindex = 0;
+	uint32_t n = cr_num_core_count[0];
+	for (i = 0; cr_num_core_count[i] && node_index > n; i++) {
+		cindex += cr_node_num_cores[i] * cr_num_core_count[i];
+		n += cr_num_core_count[i+1];
+	}
+	if (!cr_num_core_count[i])
+		return cindex;
+	n -= cr_num_core_count[i];
 
-	if (!this_cr_node)
-		return;
-	for (p_ptr = this_cr_node->parts; p_ptr; p_ptr = p_ptr->next)
-		xfree(p_ptr->alloc_cores);
-	xfree(this_cr_node->parts);
+	cindex += cr_node_num_cores[i] * (node_index-n);	
+	return cindex;
 }
 
-static void _cr_job_list_del(void *x)
+
+/* return the total number of cores in a given node */
+extern uint32_t cr_get_node_num_cores(uint32_t node_index)
 {
-	xfree(x);
+	uint32_t i = 0;
+	uint32_t pos = cr_num_core_count[i++];
+	while (node_index >= pos) {
+		pos += cr_num_core_count[i++];
+	}
+	return cr_node_num_cores[i-1];
 }
-static int  _cr_job_list_sort(void *x, void *y)
+
+
+/* Helper function for _dup_part_data: create a duplicate part_row_data array */
+static struct part_row_data *_dup_row_data(struct part_row_data *orig_row,
+					   uint16_t num_rows)
 {
-	struct job_record **job1_pptr = (struct job_record **) x;
-	struct job_record **job2_pptr = (struct job_record **) y;
-	return (int) difftime(job1_pptr[0]->end_time, job2_pptr[0]->end_time);
+	struct part_row_data *new_row;
+	int i, j;
+
+	if (num_rows == 0 || !orig_row)
+		return NULL;
+	
+	new_row = xmalloc(num_rows * sizeof(struct part_row_data));
+	for (i = 0; i < num_rows; i++) {
+		new_row[i].num_jobs = orig_row[i].num_jobs;
+		new_row[i].job_list_size = orig_row[i].job_list_size;
+		if (orig_row[i].row_bitmap) 
+			new_row[i].row_bitmap= bit_copy(orig_row[i].row_bitmap);
+		if (new_row[i].job_list_size == 0)
+			continue;
+		/* copy the job list */
+		new_row[i].job_list = xmalloc(new_row[i].job_list_size *
+							sizeof(bitstr_t *));
+		for (j = 0; j < new_row[i].num_jobs; j++) {
+			new_row[i].job_list[j] = orig_row[i].job_list[j];
+		}
+	}
+	return new_row;
 }
 
-static void _create_node_part_array(struct node_cr_record *this_cr_node)
+
+/* Create a duplicate part_res_record list */
+static struct part_res_record *_dup_part_data(struct part_res_record *orig_ptr)
 {
-	struct node_record *node_ptr;
-	struct part_cr_record *p_ptr;
-	int i;
+	struct part_res_record *new_part_ptr, *new_ptr;
 
-	if (!this_cr_node)
-		return;
-	node_ptr = this_cr_node->node_ptr;
+	if (orig_ptr == NULL)
+		return NULL;
 
-	if (this_cr_node->parts)
-		_destroy_node_part_array(this_cr_node);
+	new_part_ptr = xmalloc(sizeof(struct part_res_record));
+	new_ptr = new_part_ptr;
 
-	if (node_ptr->part_cnt < 1)
-		return;
-	this_cr_node->parts = xmalloc(sizeof(struct part_cr_record) *
-	        		      node_ptr->part_cnt);
-	for (i = 0; i < node_ptr->part_cnt; i++) {
-		p_ptr		 = &(this_cr_node->parts[i]);
-		p_ptr->part_ptr  = node_ptr->part_pptr[i];
-		p_ptr->num_rows  = node_ptr->part_pptr[i]->max_share;
-		if (p_ptr->num_rows & SHARED_FORCE)
-			p_ptr->num_rows &= (~SHARED_FORCE);
-		/* SHARED=EXCLUSIVE sets max_share = 0 */
-		if (p_ptr->num_rows < 1)
-			p_ptr->num_rows = 1;
-#if (CR_DEBUG)
-		info("cons_res: _create_node_part_array: part %s  num_rows %d",
-		     p_ptr->part_ptr->name, p_ptr->num_rows);
-#endif
-		p_ptr->alloc_cores = xmalloc(sizeof(uint16_t) *
-		        		     this_cr_node->sockets *
-					     p_ptr->num_rows);
-		if (i+1 < node_ptr->part_cnt)
-			p_ptr->next = &(this_cr_node->parts[i+1]);
-		else
-			p_ptr->next = NULL;
+	while (orig_ptr) {
+		new_ptr->name = xstrdup(orig_ptr->name);
+		new_ptr->priority = orig_ptr->priority;
+		new_ptr->num_rows = orig_ptr->num_rows;
+		new_ptr->row = _dup_row_data(orig_ptr->row, orig_ptr->num_rows);
+		if (orig_ptr->next) {
+			new_ptr->next = xmalloc(sizeof(struct part_res_record));
+			new_ptr = new_ptr->next;
+		}
+		orig_ptr = orig_ptr->next;
 	}
-
+	return new_part_ptr;
 }
 
-static int _find_job_by_id(void *x, void *key)
-{
-	struct select_cr_job *cr_job_ptr = (struct select_cr_job *) x;
-	uint32_t *job_id = (uint32_t *) key;
-
-	if (cr_job_ptr->job_id == *job_id)
-		return 1;
-	return 0;
-}
 
-/* Find a partition record based upon pointer to slurmctld record */
-extern struct part_cr_record *get_cr_part_ptr(struct node_cr_record *this_node,
-					      struct part_record *part_ptr)
+/* Create a duplicate part_res_record list */
+static struct node_use_record *_dup_node_usage(struct node_use_record *orig_ptr)
 {
-	struct part_cr_record *p_ptr;
+	struct node_use_record *new_use_ptr, *new_ptr;
+	uint32_t i;
 
-	if (part_ptr == NULL)
+	if (orig_ptr == NULL)
 		return NULL;
 
-	if (!this_node->parts)
-		_create_node_part_array(this_node);
+	new_use_ptr = xmalloc(select_node_cnt * sizeof(struct node_use_record));
+	new_ptr = new_use_ptr;
 
-	for (p_ptr = this_node->parts; p_ptr; p_ptr = p_ptr->next) {
-		if (p_ptr->part_ptr == part_ptr)
-			return p_ptr;
+	for (i = 0; i < select_node_cnt; i++) {
+		new_ptr[i].node_state   = orig_ptr[i].node_state;
+		new_ptr[i].alloc_memory = orig_ptr[i].alloc_memory;
 	}
-	error("cons_res: could not find partition %s", part_ptr->name);
-
-	return NULL;
+	return new_use_ptr;
 }
 
-/* This just resizes alloc_cores based on a potential change to
- * the number of sockets on this node (if fast_schedule = 0 and the
- * node checks in with a different node count after initialization).
- * Any changes to the number of partition rows will be caught
- * and adjusted in select_p_reconfigure() */
-static void _chk_resize_node(struct node_cr_record *node)
-{
-	struct part_cr_record *p_ptr;
-
-	if ((select_fast_schedule > 0) ||
-	    (node->cpus >= node->node_ptr->cpus))
-		return;
-
-	verbose("cons_res: increasing node %s cpus from %u to %u",
-		node->node_ptr->name, node->cpus, node->node_ptr->cpus);
-	node->cpus        = node->node_ptr->cpus;
-	node->sockets     = node->node_ptr->sockets;
-	node->cores       = node->node_ptr->cores;
-	node->threads     = node->node_ptr->threads;
-	node->real_memory = node->node_ptr->real_memory;
-	for (p_ptr = node->parts; p_ptr; p_ptr = p_ptr->next) {
-		xrealloc(p_ptr->alloc_cores, (sizeof(uint16_t) *
-			 node->sockets * p_ptr->num_rows));
-		/* NOTE: xrealloc zero fills added memory */
+/* delete the given row data */
+static void _destroy_row_data(struct part_row_data *row, uint16_t num_rows) {
+	uint16_t i;
+	for (i = 0; i < num_rows; i++) {
+		FREE_NULL_BITMAP(row[i].row_bitmap);
+		if (row[i].job_list) {
+			uint32_t j;
+			for (j = 0; j < row[i].num_jobs; j++)
+				row[i].job_list[j] = NULL;
+			xfree(row[i].job_list);
+		}
 	}
+	xfree(row);
 }
 
-static void _chk_resize_job(struct select_cr_job *job, uint16_t node_id, 
-			    uint16_t sockets)
+/* delete the given list of partition data */
+static void _destroy_part_data(struct part_res_record *this_ptr)
 {
-	if ((job->alloc_cores[node_id] == NULL) ||
-	    		(sockets > job->num_sockets[node_id])) {
-		debug3("cons_res: increasing job %u node %u "
-			"num_sockets from %u to %u",
-			job->job_id, node_id, 
-			job->num_sockets[node_id], sockets);
-	    	xrealloc(job->alloc_cores[node_id], sockets * sizeof(uint16_t));
-		/* NOTE: xrealloc zero fills added memory */
-		job->num_sockets[node_id] = sockets;
+	while (this_ptr) {
+		struct part_res_record *tmp = this_ptr;
+		this_ptr = this_ptr->next;
+		xfree(tmp->name);
+		tmp->name = NULL;
+		if (tmp->row) {
+			_destroy_row_data(tmp->row, tmp->num_rows);
+			tmp->row = NULL;
+		}
+		xfree(tmp);
 	}
 }
 
-extern void get_resources_this_node(uint16_t *cpus, uint16_t *sockets, 
-				    uint16_t *cores, uint16_t *threads, 
-				    struct node_cr_record *this_cr_node,
-				    uint32_t jobid)
+
+/* (re)create the global select_part_record array */
+static void _create_part_data()
 {
-	_chk_resize_node(this_cr_node);
+	ListIterator part_iterator;
+	struct part_record *p_ptr;
+	struct part_res_record *this_ptr;
+	int num_parts;
 
-	*cpus    = this_cr_node->cpus;
-	*sockets = this_cr_node->sockets;
-	*cores   = this_cr_node->cores;
-	*threads = this_cr_node->threads;
+	_destroy_part_data(select_part_record);
+	select_part_record = NULL;
 
-	debug3("cons_res %u _get_resources host %s HW_ "
-	       "cpus %u sockets %u cores %u threads %u ", 
-	       jobid, this_cr_node->node_ptr->name,
-	       *cpus, *sockets, *cores, *threads);
-}
+	num_parts = list_count(part_list);
+	if (!num_parts)
+		return;
+	info("cons_res: preparing for %d partitions", num_parts);
 
-/* _get_cpu_data
- * determine the number of available free cores/cpus/sockets
- * IN - p_ptr:       pointer to a node's part_cr_record for a specific partition
- * IN - num_sockets: number of sockets on this node
- * IN - max_cpus:    the total number of cores/cpus/sockets on this node
- * OUT- row_index:   the row index from which the returned value was obtained
- *                   (if -1 then nothing is allocated in this partition)
- * OUT- free_row:    the row index of an unallocated row (if -1 then all rows
- *                   contain allocated cores)
- * RETURN - the maximum number of free cores/cpus/sockets found in the given
- *          row_index (if 0 then node is full; if 'max_cpus' then node is free)
- */
-static uint16_t _get_cpu_data (struct part_cr_record *p_ptr, int num_sockets,
-			       uint16_t max_cpus, int *row_index, int *free_row)
-{
-	int i, j, index;
-	uint16_t alloc_count = 0;
-	bool counting_sockets = 0;
-	if ((cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY))
-		counting_sockets = 1;
- 
- 	*free_row = -1;
-	*row_index = -1;
-
-	for (i = 0, index = 0; i < p_ptr->num_rows; i++) {
-		uint16_t cpu_count = 0;
-		uint16_t socket_count = 0;
-		for (j = 0; j < num_sockets; j++, index++) {
-			if (p_ptr->alloc_cores[index]) {
-				socket_count++;
-				cpu_count += p_ptr->alloc_cores[index];
-			}
-		}
-		if (socket_count > 0) {
-			if (counting_sockets) {
-				if ((alloc_count == 0) ||
-				    (socket_count < alloc_count)) {
-					alloc_count = socket_count;
-					*row_index = i;
-				}
-			} else {
-				if ((alloc_count == 0) ||
-				    (cpu_count < alloc_count)) {
-					alloc_count = cpu_count;
-					*row_index = i;
-				}
-			}
-		} 
-		else if (*free_row < 0) {
-			*free_row = i;
+	select_part_record = xmalloc(sizeof(struct part_res_record));
+	this_ptr = select_part_record;
+
+	part_iterator = list_iterator_create(part_list);
+	if (part_iterator == NULL)
+		fatal ("memory allocation failure");
+
+	while ((p_ptr = (struct part_record *) list_next(part_iterator))) {
+		this_ptr->name = xstrdup(p_ptr->name);
+		this_ptr->num_rows = p_ptr->max_share;
+		if (this_ptr->num_rows & SHARED_FORCE)
+			this_ptr->num_rows &= (~SHARED_FORCE);
+		/* SHARED=EXCLUSIVE sets max_share = 0 */
+		if (this_ptr->num_rows < 1)
+			this_ptr->num_rows = 1;
+		/* we'll leave the 'row' array blank for now */
+		this_ptr->row = NULL;
+		this_ptr->priority = p_ptr->priority;
+		num_parts--;
+		if (num_parts) {
+			this_ptr->next =xmalloc(sizeof(struct part_res_record));
+			this_ptr = this_ptr->next;
 		}
 	}
-	return max_cpus - alloc_count;
+	/* should we sort the select_part_record list by priority here? */
 }
 
-/*
- * _get_task_count - Given the job requirements, compute the number of tasks
- *                   this node can run
- *
- * IN job_ptr - pointer to job being scheduled
- * IN index - index of node's configuration information in select_node_ptr
- */
-static uint16_t _get_task_count(struct node_cr_record *select_node_ptr,
-				struct job_record *job_ptr, const int index, 
-				const bool all_available, bool try_partial_idle,
-				enum node_cr_state job_node_req)
-{
-	uint16_t numtasks, cpus_per_task = 0;
-	uint16_t max_sockets = 0, max_cores = 0, max_threads = 0;
-	uint16_t min_sockets = 0, min_cores = 0, min_threads = 0;
-	uint16_t ntasks_per_node = 0, ntasks_per_socket = 0, ntasks_per_core = 0;
-	uint16_t i, cpus, sockets, cores, threads, *alloc_cores = NULL;
-	struct node_cr_record *this_node;
-	struct part_cr_record *p_ptr;
-	struct multi_core_data *mc_ptr = NULL;
-
-	cpus_per_task   = job_ptr->details->cpus_per_task;
-	ntasks_per_node = job_ptr->details->ntasks_per_node;
-
-	mc_ptr      = job_ptr->details->mc_ptr;
-	min_sockets = mc_ptr->min_sockets;
-	max_sockets = mc_ptr->max_sockets;
-	min_cores   = mc_ptr->min_cores;
-	max_cores   = mc_ptr->max_cores;
-	min_threads = mc_ptr->min_threads;
-	max_threads = mc_ptr->max_threads;
-	ntasks_per_socket = mc_ptr->ntasks_per_socket;
-	ntasks_per_core   = mc_ptr->ntasks_per_core;
-
-	this_node = &(select_node_ptr[index]);
-	get_resources_this_node(&cpus, &sockets, &cores, &threads, 
-				this_node, job_ptr->job_id);
-
-	alloc_cores = xmalloc(sockets * sizeof(uint16_t));
-	/* array is zero filled by xmalloc() */
-
-	if (!all_available) {
-		p_ptr = get_cr_part_ptr(this_node, job_ptr->part_ptr);
-		if (!p_ptr) {
-			error("cons_res: _get_task_count: could not find part %s",
-			      job_ptr->part_ptr->name);
-		} else {
-			if (job_node_req == NODE_CR_ONE_ROW) {
-				/* need to scan over all partitions with
-				 * num_rows = 1 */
-				for (p_ptr = this_node->parts; p_ptr;
-				     p_ptr = p_ptr->next) {
-					if (p_ptr->num_rows > 1)
-						continue;
-					for (i = 0; i < sockets; i++) {
-					    if ((cr_type == CR_SOCKET) ||
-						(cr_type == CR_SOCKET_MEMORY)) {
-						if (p_ptr->alloc_cores[i])
-							alloc_cores[i] += cores;
-					    } else {
-						alloc_cores[i] +=
-							p_ptr->alloc_cores[i];
-					    }
-					}
-				}
-			} else {
-				/* job_node_req == EXCLUSIVE | AVAILABLE
-				 * if EXCLUSIVE, then node *should* be free and
-				 * this code should fall through with
-				 * alloc_cores all set to zero.
-				 * if AVAILABLE then scan partition rows based
-				 * on 'try_partial_idle' setting. Note that
-				 * if 'try_partial_idle' is FALSE then this
-				 * code should use a 'free' row and this is
-				 * where a new row will first be evaluated.
-				 */
-				uint16_t count, max_cpus;
-				int alloc_row, free_row;
-
-				max_cpus = cpus;
-				if ((cr_type == CR_SOCKET) ||
-				    (cr_type == CR_SOCKET_MEMORY))
-					max_cpus = sockets;
-				if ((cr_type == CR_CORE) ||
-				    (cr_type == CR_CORE_MEMORY))
-					max_cpus = cores * sockets;
-
-				count = _get_cpu_data(p_ptr, sockets, max_cpus,
-						      &alloc_row, &free_row);
-				if ((count == 0) && (free_row == -1)) {
-					/* node is completely allocated */
-					xfree(alloc_cores);
-					return 0;
-				}
-				if ((free_row == -1) && (!try_partial_idle)) {
-					/* no free rows, so partial idle is
-					 * all that is left! */
-					try_partial_idle = 1;
-				}
-				if (try_partial_idle && (alloc_row > -1)) {
-					alloc_row *= sockets;
-					for (i = 0; i < sockets; i++) {
-						alloc_cores[i] += p_ptr->
-							alloc_cores[alloc_row+i];
-					}
-				}
-			}
-		}
-	}
-#if (CR_DEBUG)
-	for (i = 0; i < sockets; i+=2) {
-		info("cons_res: _get_task_count: %s alloc_cores[%d]=%d, [%d]=%d",
-		     this_node->node_ptr->name, i, alloc_cores[i],
-		     i+1, alloc_cores[i+1]);
-	}
-#endif
 
-	numtasks = slurm_get_avail_procs(max_sockets, max_cores, max_threads,
-					 min_sockets, min_cores,
-					 cpus_per_task,
-					 ntasks_per_node,
-					 ntasks_per_socket,
-					 ntasks_per_core,
-					 &cpus, &sockets, &cores,
-					 &threads, alloc_cores, 
-					 cr_type, job_ptr->job_id,
-					 this_node->node_ptr->name);
-
-	if (job_ptr->details->job_min_memory & MEM_PER_CPU) {
-		uint32_t free_mem, mem_per_cpu;
-		int max_cpus;
-		mem_per_cpu = job_ptr->details->job_min_memory & (~MEM_PER_CPU);
-		free_mem = this_node->real_memory - this_node->alloc_memory;
-		max_cpus = free_mem / mem_per_cpu;
-		/* info("cpus avail:%d  mem for %d", numtasks, max_cpus); */
-		numtasks = MIN(numtasks, max_cpus);
-	}
+/* List sort function: sort by the job's expected end time */
+static int _cr_job_list_sort(void *x, void *y)
+{
+	struct job_record **job1_pptr = (struct job_record **) x;
+	struct job_record **job2_pptr = (struct job_record **) y;
+	return (int) difftime(job1_pptr[0]->end_time, job2_pptr[0]->end_time);
+}
 
-#if (CR_DEBUG)
-	info("cons_res: _get_task_count computed a_tasks %d s %d c %d "
-		"t %d on %s for job %d",
-		numtasks, sockets, cores, 
-		threads, this_node->node_ptr->name, job_ptr->job_id);
-#endif
-	xfree(alloc_cores);
-	return(numtasks);
-}		
 
-/* xfree an array of node_cr_record */
-static void _xfree_select_nodes(struct node_cr_record *ptr, int count)
+/* delete the given select_node_record and select_node_usage arrays */
+static void _destroy_node_data(struct node_use_record *node_usage,
+				struct node_res_record *node_data)
 {
-	int i;
-	
-	if (ptr == NULL)
-		return;
-
-	for (i = 0; i < count; i++)
-		_destroy_node_part_array(&(ptr[i]));
-	xfree(ptr);
+	xfree(node_data);
+	xfree(node_usage);
 }
 
-/* xfree a select_cr_job job */
-static void _xfree_select_cr_job(struct select_cr_job *job)
+
+static void _add_job_to_row(struct select_job_res *job,
+			    struct part_row_data *r_ptr)
 {
-	int i;
+	/* add the job to the row_bitmap */
+	if (r_ptr->row_bitmap && r_ptr->num_jobs == 0) {
+		/* if no jobs, clear the existing row_bitmap first */
+		uint32_t size = bit_size(r_ptr->row_bitmap);
+		bit_nclear(r_ptr->row_bitmap, 0, size-1);
+	}
+	add_select_job_to_row(job, &(r_ptr->row_bitmap), cr_node_num_cores,
+				cr_num_core_count);
 	
-	if (job == NULL)
-		return;
-
-	xfree(job->cpus);
-	xfree(job->alloc_cpus);	
-	xfree(job->node_offset);	
-	xfree(job->alloc_memory);
-	if ((cr_type == CR_CORE)   || (cr_type == CR_CORE_MEMORY) ||
-	    (cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) {
-		for (i = 0; i < job->nhosts; i++)
-			xfree(job->alloc_cores[i]);
-		xfree(job->alloc_cores);
-		xfree(job->num_sockets);
+	/*  add the job to the job_list */
+	if (r_ptr->num_jobs >= r_ptr->job_list_size) {
+		r_ptr->job_list_size += 8;
+		xrealloc(r_ptr->job_list, r_ptr->job_list_size *
+					sizeof(struct select_job_res *));
 	}
-	FREE_NULL_BITMAP(job->node_bitmap);
-	xfree(job);
+	r_ptr->job_list[r_ptr->num_jobs++] = job;
 }
 
-/* Free the select_cr_job_list list and the individual objects before
- * existing the plug-in.
- */
-static void _clear_job_list(void)
+
+/* test for conflicting core_bitmap bits */
+static int _can_job_fit_in_row(struct select_job_res *job,
+				struct part_row_data *r_ptr)
 {
-	ListIterator job_iterator;
-	struct select_cr_job *job;
+	if (r_ptr->num_jobs == 0 || !r_ptr->row_bitmap)
+		return 1;
+	return can_select_job_cores_fit(job, r_ptr->row_bitmap,
+					cr_node_num_cores, cr_num_core_count);
+}
 
-	if (select_cr_job_list == NULL)
-	    	return;
 
-	slurm_mutex_lock(&cr_mutex);
-	job_iterator = list_iterator_create(select_cr_job_list);
-	while ((job = (struct select_cr_job *) list_next(job_iterator))) {
-		list_remove(job_iterator);
-		_xfree_select_cr_job(job);
-	}
-	list_iterator_destroy(job_iterator);
-	slurm_mutex_unlock(&cr_mutex);
+/* helper script for cr_sort_part_rows() */
+static void _swap_rows(struct part_row_data *a, struct part_row_data *b)
+{
+	struct part_row_data tmprow;
+
+	tmprow.row_bitmap    = a->row_bitmap;
+	tmprow.num_jobs      = a->num_jobs;
+	tmprow.job_list      = a->job_list;
+	tmprow.job_list_size = a->job_list_size;
+	
+	a->row_bitmap    = b->row_bitmap;
+	a->num_jobs      = b->num_jobs;
+	a->job_list      = b->job_list;
+	a->job_list_size = b->job_list_size;
+	
+	b->row_bitmap    = tmprow.row_bitmap;
+	b->num_jobs      = tmprow.num_jobs;
+	b->job_list      = tmprow.job_list;
+	b->job_list_size = tmprow.job_list_size;
+	
+	return;
 }
 
-static void _verify_select_job_list(uint32_t job_id)
+
+/* sort the rows of a partition from "most allocated" to "least allocated" */
+extern void cr_sort_part_rows(struct part_res_record *p_ptr)
 {
-	ListIterator job_iterator;
-	struct select_cr_job *job;
+	uint32_t i, j, a, b;
 
-	if (list_count(select_cr_job_list) < 1) {
-		last_verified_job_id = job_id;
-		return;
-	}
-	if ((job_id > last_verified_job_id) &&
-	    (job_id < (last_verified_job_id + CR_VERIFY_JOB_CYCLE))) {
+	if (!p_ptr->row)
 		return;
-	}
-
-	last_verified_job_id = job_id;
-	slurm_mutex_lock(&cr_mutex);
-	job_iterator = list_iterator_create(select_cr_job_list);
-	while ((job = (struct select_cr_job *) list_next(job_iterator))) {
-		if (find_job_record(job->job_id) == NULL) {
-			list_remove(job_iterator);
-			debug2("cons_res: _verify_job_list: removing "
-				"nonexistent job %u", job->job_id);
-			_xfree_select_cr_job(job);
+		
+	for (i = 0; i < p_ptr->num_rows; i++) {
+		if (p_ptr->row[i].row_bitmap)
+			a = bit_set_count(p_ptr->row[i].row_bitmap);
+		else
+			a = 0;
+		for (j = i+1; j < p_ptr->num_rows; j++) {
+			if (!p_ptr->row[j].row_bitmap)
+				continue;
+			b = bit_set_count(p_ptr->row[j].row_bitmap);
+			if (b > a) {
+				_swap_rows(&(p_ptr->row[i]), &(p_ptr->row[j]));
+			}
 		}
 	}
-	list_iterator_destroy(job_iterator);
-	slurm_mutex_unlock(&cr_mutex);	
-	last_cr_update_time = time(NULL);
+	return;
 }
 
-/* Append a specific select_cr_job to select_cr_job_list. If the
- * select_job already exists then it is deleted and re-added otherwise
- * it is just added to the list.
+
+/*
+ * _build_row_bitmaps: A job has been removed from the given partition,
+ *                     so the row_bitmap(s) need to be reconstructed.
+ *                     Optimize the jobs into the least number of rows,
+ *                     and make the lower rows as dense as possible.
+ * 
+ * IN/OUT: p_ptr   - the partition that has jobs to be optimized
  */
-static void _append_to_job_list(struct select_cr_job *new_job)
+static void _build_row_bitmaps(struct part_res_record *p_ptr)
 {
-	int job_id = new_job->job_id;
-	struct select_cr_job *old_job = NULL;
-	ListIterator iterator = list_iterator_create(select_cr_job_list);
+	uint32_t i, j, num_jobs, size;
+	int x, *jstart;
+	struct part_row_data *this_row, *orig_row;
+	struct select_job_res **tmpjobs, *job;
+	
+	if (!p_ptr->row)
+		return;
 
-	slurm_mutex_lock(&cr_mutex);
-	while ((old_job = (struct select_cr_job *) list_next(iterator))) {
-		if (old_job->job_id != job_id)
-			continue;
-		list_remove(iterator);	/* Delete record for JobId job_id */
-		_xfree_select_cr_job(old_job);	/* xfree job structure */
-		break;
+	if (p_ptr->num_rows == 1) {
+		this_row = &(p_ptr->row[0]);
+		if (this_row->num_jobs == 0) {
+			if (this_row->row_bitmap) {
+				size = bit_size(this_row->row_bitmap);
+				bit_nclear(this_row->row_bitmap, 0, size-1);
+			}
+			return;
+		}
+		
+		/* rebuild the row bitmap */
+		num_jobs = this_row->num_jobs;
+		tmpjobs = xmalloc(num_jobs * sizeof(struct select_job_res *));	
+		for (i = 0; i < num_jobs; i++) {
+			tmpjobs[i] = this_row->job_list[i];
+			this_row->job_list[i] = NULL;
+		}
+		this_row->num_jobs = 0; /* this resets the row_bitmap */
+		for (i = 0; i < num_jobs; i++) {
+			_add_job_to_row(tmpjobs[i], this_row);
+		}
+		xfree(tmpjobs);
+		return;
 	}
-	list_iterator_destroy(iterator);
-	list_append(select_cr_job_list, new_job);
-	slurm_mutex_unlock(&cr_mutex);
-	debug3 ("cons_res: _append_to_job_list job_id %u to list. "
-		"list_count %d ", job_id, list_count(select_cr_job_list));
-}
 
-/* find the maximum number of idle cpus from all partitions */
-static uint16_t _count_idle_cpus(struct node_cr_record *this_node)
-{
-	struct part_cr_record *p_ptr;
-	int i, j, index, idlecpus;
-	uint16_t cpus, sockets, cores, threads;
+	/* gather data */
+	num_jobs = 0;
+	for (i = 0; i < p_ptr->num_rows; i++) {
+		if (p_ptr->row[i].num_jobs) {
+			num_jobs += p_ptr->row[i].num_jobs;
+		}
+	}
+	if (num_jobs == 0) {
+		size = bit_size(p_ptr->row[0].row_bitmap);
+		for (i = 0; i < p_ptr->num_rows; i++) {
+			if (p_ptr->row[i].row_bitmap) {
+				bit_nclear(p_ptr->row[i].row_bitmap, 0, size-1);
+			}
+		}
+		return;
+	}
 
-	if (this_node->node_state == NODE_CR_RESERVED)
-		return (uint16_t) 0;
+#if (CR_DEBUG)
+	info("DEBUG: _build_row_bitmaps (before):");
+	_dump_part(p_ptr);
+#endif
+	debug3("cons_res: build_row_bitmaps reshuffling %u jobs", num_jobs);
 
-	get_resources_this_node(&cpus, &sockets, &cores, &threads, 
-				this_node, 0);
+	/* make a copy, in case we cannot do better than this */
+	orig_row = _dup_row_data(p_ptr->row, p_ptr->num_rows);
+	if (orig_row == NULL)
+		return;
+	
+	/* get row_bitmap size from first row (we can safely assume that the
+	 * first row_bitmap exists because there exists at least one job. */
+	size = bit_size(p_ptr->row[0].row_bitmap);
+	
+	/* create a master job list and clear out ALL row data */
+	tmpjobs = xmalloc(num_jobs * sizeof(struct select_job_res *));	
+	jstart  = xmalloc(num_jobs * sizeof(int));
+	x = 0;
+	for (i = 0; i < p_ptr->num_rows; i++) {
+		for (j = 0; j < p_ptr->row[i].num_jobs; j++) {
+			tmpjobs[x] = p_ptr->row[i].job_list[j];
+			p_ptr->row[i].job_list[j] = NULL;
+			jstart[x] = bit_ffs(tmpjobs[x]->node_bitmap);
+			jstart[x] = cr_get_coremap_offset(jstart[x]);
+			jstart[x] += bit_ffs(tmpjobs[x]->core_bitmap);
+			x++;
+		}
+		p_ptr->row[i].num_jobs = 0;
+		if (p_ptr->row[i].row_bitmap) {
+			bit_nclear(p_ptr->row[i].row_bitmap, 0, size-1);
+		}
+	}
+	
+	/* VERY difficult: Optimal placement of jobs in the matrix
+	 * - how to order jobs to be added to the matrix?
+	 *   - "by size" does not guarantee optimal placement
+	 *
+	 *   - for now, try sorting jobs by first bit set
+	 *     - if job allocations stay "in blocks", then this should work OK
+	 *     - may still get scenarios where jobs should switch rows
+	 *     - fixme: JOB SHUFFLING BETWEEN ROWS NEEDS TESTING
+	 */
+	for (i = 0; i < num_jobs; i++) {
+		for (j = i+1; j < num_jobs; j++) {
+			if (jstart[j] < jstart[i] || (jstart[j] == jstart[i] &&
+			    tmpjobs[j]->nprocs > tmpjobs[i]->nprocs)) {
+				x = jstart[i];
+				jstart[i] = jstart[j];
+				jstart[j] = x;
+				job = tmpjobs[i];
+				tmpjobs[i] = tmpjobs[j];
+				tmpjobs[j] = job;
+			}
+		}
+	}
 
-	if (!this_node->parts)
-		return cpus;
+#if (CR_DEBUG)
+	for (i = 0; i < num_jobs; i++) {
+		char cstr[64], nstr[64];
+		if (tmpjobs[i]->core_bitmap)
+			bit_fmt(cstr, (sizeof(cstr)-1) , tmpjobs[i]->core_bitmap);
+		else
+			sprintf(cstr, "[no core_bitmap]");
+		if (tmpjobs[i]->node_bitmap)
+			bit_fmt(nstr, (sizeof(nstr)-1), tmpjobs[i]->node_bitmap);
+		else
+			sprintf(nstr, "[no node_bitmap]");
+		info ("DEBUG:  jstart %d job nb %s cb %s", jstart[i], nstr,
+			cstr);
+	}
+#endif
 
-	idlecpus = cpus;
-	if (this_node->node_state == NODE_CR_ONE_ROW) {
-		/* check single-row partitions for idle CPUs */
-		for (p_ptr = this_node->parts; p_ptr; p_ptr = p_ptr->next) {
-			if (p_ptr->num_rows > 1)
-				continue;
-			for (i = 0; i < this_node->sockets; i++) {
-				if ((cr_type == CR_SOCKET) ||
-				    (cr_type == CR_SOCKET_MEMORY)) {
-				 	if (p_ptr->alloc_cores[i])
-						idlecpus -= cores;
-				} else {
-					idlecpus -= p_ptr->alloc_cores[i];
-				}
+	/* add jobs to the rows */
+	for (j = 0; j < num_jobs; j++) {
+		for (i = 0; i < p_ptr->num_rows; i++) {
+			if (_can_job_fit_in_row(tmpjobs[j], &(p_ptr->row[i]))) {
+				/* job fits in row, so add it */
+				_add_job_to_row(tmpjobs[j], &(p_ptr->row[i]));
+				tmpjobs[j] = NULL;
+				break;
 			}
-			if (idlecpus < 1)
-				return (uint16_t) 0;
 		}
-		return (uint16_t) idlecpus;
+		/* job should have been added, so shuffle the rows */
+		cr_sort_part_rows(p_ptr);
 	}
-
-	if (this_node->node_state == NODE_CR_AVAILABLE) {
-		/* check all partitions for idle CPUs */
-		int tmpcpus, max_idle = 0;
-		for (p_ptr = this_node->parts; p_ptr; p_ptr = p_ptr->next) {
-			for (i = 0, index = 0; i < p_ptr->num_rows; i++) {
-				tmpcpus = idlecpus;
-				for (j = 0; j < this_node->sockets;
-				     j++, index++) {
-				 	if ((cr_type == CR_SOCKET) ||
-				 	    (cr_type == CR_SOCKET_MEMORY)) {
-						if (p_ptr->alloc_cores[index])
-							tmpcpus -= cores;
-					} else {
-						tmpcpus -= p_ptr->
-							   alloc_cores[index];
-					}
-				}
-				if (tmpcpus > max_idle) {
-					max_idle = tmpcpus;
-					if (max_idle == idlecpus)
-						break;
-				}
+	
+	/* test for dangling jobs */
+	for (j = 0; j < num_jobs; j++) {
+		if (tmpjobs[j])
+			break;
+	}
+	if (j < num_jobs) {
+		/* we found a dangling job, which means our packing
+		 * algorithm couldn't improve apon the existing layout.
+		 * Thus, we'll restore the original layout here */
+		debug3("cons_res: build_row_bitmap: dangling job found");
+#if (CR_DEBUG)
+		info("DEBUG: _build_row_bitmaps (post-algorithm):");
+		_dump_part(p_ptr);
+#endif
+		_destroy_row_data(p_ptr->row, p_ptr->num_rows);
+		p_ptr->row = orig_row;
+		orig_row = NULL;
+		
+		/* still need to rebuild row_bitmaps */
+		for (i = 0; i < p_ptr->num_rows; i++) {
+			if (p_ptr->row[i].row_bitmap)
+				bit_nclear(p_ptr->row[i].row_bitmap, 0, size-1);
+			if (p_ptr->row[i].num_jobs == 0)
+				continue;
+			for (j = 0; j < p_ptr->row[i].num_jobs; j++) {
+				add_select_job_to_row(p_ptr->row[i].job_list[j],
+						    &(p_ptr->row[i].row_bitmap),
+						      cr_node_num_cores,
+						      cr_num_core_count);
 			}
-			if (max_idle == idlecpus)
-				break;
 		}
-		if (this_node->parts)
-			idlecpus = max_idle;
 	}
-	return (uint16_t) idlecpus;
-}
-
-static int _synchronize_bitmaps(bitstr_t ** partially_idle_bitmap)
-{
-	int size, i, idlecpus = bit_set_count(avail_node_bitmap);
-	size = bit_size(avail_node_bitmap);
-	bitstr_t *bitmap = bit_alloc(size);
-
-	*partially_idle_bitmap = bitmap;
-	if (bitmap == NULL)
-		return SLURM_ERROR;
 
-	debug3("cons_res: synch_bm: size avail %d (%d set) size idle %d ",
-	       size, idlecpus, bit_size(idle_node_bitmap));
+#if (CR_DEBUG)
+	info("DEBUG: _build_row_bitmaps (after):");
+	_dump_part(p_ptr);
+#endif
 
-	for (i = 0; i < select_node_cnt; i++) {
-		if (bit_test(avail_node_bitmap, i) != 1)
-			continue;
+	if (orig_row)
+		_destroy_row_data(orig_row, p_ptr->num_rows);
+	xfree(tmpjobs);
+	xfree(jstart);
+	return;
 
-		if (bit_test(idle_node_bitmap, i) == 1) {
-			bit_set(bitmap, i);
-			continue;
-		}
-		
-		idlecpus = _count_idle_cpus(&(select_node_ptr[i]));
-		if (idlecpus)
-			bit_set(bitmap, i);
-	}
-	idlecpus = bit_set_count(bitmap);
-	debug3("cons_res: synch found %d partially idle nodes", idlecpus);
+	/* LEFTOVER DESIGN THOUGHTS, PRESERVED HERE */
+	
+	/* 1. sort jobs by size
+	 * 2. only load core bitmaps with largest jobs that conflict
+	 * 3. sort rows by set count 
+	 * 4. add remaining jobs, starting with fullest rows
+	 * 5. compute  set count: if disparity between rows got closer, then
+	 *    switch non-conflicting jobs that were added
+	 */
 
-	return SLURM_SUCCESS;
+	/* 
+	 *  Step 1: remove empty rows between non-empty rows
+	 *  Step 2: try to collapse rows
+	 *  Step 3: sort rows by size
+	 *  Step 4: try to swap jobs from different rows to pack rows
+	 */
+	
+	/* WORK IN PROGRESS - more optimization should go here, such as:
+	 *
+	 * - try collapsing jobs from higher rows to lower rows
+	 *
+	 * - produce a load array to identify cores with less load. Test
+	 * to see if those cores are in the lower row. If not, try to swap
+	 * those jobs with jobs in the lower row. If the job can be swapped
+	 * AND the lower row set_count increases, then SUCCESS! else swap
+	 * back. The goal is to pack the lower rows and "bubble up" clear
+	 * bits to the higher rows.
+	 */
 }
 
+
 /* allocate resources to the given job
+ * - add 'struct select_job_res' resources to 'struct part_res_record'
+ * - add job's memory requirements to 'struct node_res_record'
  *
- * if suspend = 0 then fully add job
- * if suspend = 1 then only add memory
+ * if action = 0 then add cores and memory
+ * if action = 1 then only add memory (job is suspended)
+ * if action = 2 then only add cores (job is resumed)
  */
-static int _add_job_to_nodes(struct select_cr_job *job, char *pre_err,
-			     int suspend)
+static int _add_job_to_res(struct job_record *job_ptr, int action)
 {
-	int host_index, i, j, rc = SLURM_SUCCESS;
-	uint16_t add_memory = 0;
-	uint16_t memset = job->state & CR_JOB_ALLOCATED_MEM;
-	uint16_t cpuset = job->state & CR_JOB_ALLOCATED_CPUS;
+	struct select_job_res *job = job_ptr->select_job;
+	struct part_res_record *p_ptr;
+	int i, n;
 
-	if (memset && cpuset)
-		return rc;
-	if (job->node_bitmap == NULL) {	/* likely still starting up */
-		error("job %u has no node_bitmap", job->job_id);
-		return rc;
-	}
-	if (!memset &&
-	    ((cr_type == CR_CORE_MEMORY) || (cr_type == CR_CPU_MEMORY) ||
-	     (cr_type == CR_MEMORY) || (cr_type == CR_SOCKET_MEMORY))) {
-		job->state |= CR_JOB_ALLOCATED_MEM;
-		add_memory = 1;
+	if (!job || !job->core_bitmap) {
+		error("job %u has no select data", job_ptr->job_id);
+		return SLURM_ERROR;
 	}
-	if (!cpuset && !suspend)
-		job->state |= CR_JOB_ALLOCATED_CPUS;
-
-	i = -1;
-	for (host_index = 0; host_index < select_node_cnt; host_index++) {
-		struct node_cr_record *this_node;
-		struct part_cr_record *p_ptr;
-		uint16_t offset = 0;
-
-		if (bit_test(job->node_bitmap, host_index) == 0)
-			continue;
 	
-		this_node = &select_node_ptr[host_index];
-		i++;
-
-		/* Update this node's allocated resources, starting with
-		 * memory (if applicable) */
-		
-		if (add_memory)
-			this_node->alloc_memory += job->alloc_memory[i];
+	debug3("cons_res: _add_job_to_res: job %u act %d ", job_ptr->job_id,
+		action);
 
-		if (cpuset || suspend)
-			continue;
+#if (CR_DEBUG)
+	_dump_job_res(job);
+#endif
 
-		this_node->node_state = job->node_req;
-		
-		p_ptr = get_cr_part_ptr(this_node, job->job_ptr->part_ptr);
-		if (p_ptr == NULL) {
-			error("%s: could not find part %s", pre_err,
-			      job->job_ptr->partition);
-			continue;
+	/* add memory */
+	if (action != 2) {
+		for (i = 0, n = 0; i < select_node_cnt; i++) {
+			if (!bit_test(job->node_bitmap, i))
+				continue;
+			select_node_usage[i].alloc_memory +=
+						job->memory_allocated[n];
+			if (select_node_usage[i].alloc_memory >
+			    select_node_record[i].real_memory) {
+				error("error: node %s mem is overallocated "
+				      "(%u) for job %u",
+				      select_node_record[i].node_ptr->name,
+				      select_node_usage[i].alloc_memory,
+				      job_ptr->job_id);
+				
+			}
+			n++;
 		}
+	}
+	
+	/* add cores */
+	if (action != 1) {
 
-		/* The offset could be invalid if the sysadmin reduced the
-		 * number of shared rows after this job was allocated. In
-		 * this case, we *should* attempt to place this job in
-		 * other rows. However, this may be futile if they are all
-		 * currently full.
-		 * For now, we're going to be lazy and simply NOT "allocate"
-		 * this job on the node(s) (hey - you get what you pay for). ;-)
-		 * This just means that we will not be accounting for this
-		 * job when determining available space for future jobs,
-		 * which is relatively harmless (hey, there was space when
-		 * this job was first scheduled - if the sysadmin doesn't
-		 * like it, then (s)he can terminate the job). ;-)
-		 * Note that we are still "allocating" memory for this job
-		 * (if requested). 
-		 */
-		offset = job->node_offset[i];
-		if (offset > (this_node->sockets * (p_ptr->num_rows - 1))) {
-			rc = SLURM_ERROR;
-			continue;
+		for (p_ptr = select_part_record; p_ptr; p_ptr = p_ptr->next) {
+			if (strcmp(p_ptr->name, job_ptr->part_ptr->name) == 0)
+				break;
 		}
-
-		switch (cr_type) {
-		case CR_SOCKET_MEMORY:
-		case CR_SOCKET:
-		case CR_CORE_MEMORY:
-		case CR_CORE:
-			_chk_resize_job(job, i, this_node->sockets);
-			for (j = 0; j < this_node->sockets; j++) {
-				p_ptr->alloc_cores[offset+j] +=
-							job->alloc_cores[i][j];
-				if (p_ptr->alloc_cores[offset+j] >
-						this_node->cores)
-					error("%s: Job %u Host %s offset %u "
-					      "too many allocated "
-					      "cores %u for socket %d",
-					      pre_err, job->job_id,
-					      this_node->node_ptr->name, offset,
-					      p_ptr->alloc_cores[offset+j], j);
-			}
-			break;
-		case CR_CPU_MEMORY:
-		case CR_CPU:
-			/* "CPU" count is stored in the first "core" */
-			p_ptr->alloc_cores[offset] += job->alloc_cpus[i];
-			break;
-		default:
+		if (!p_ptr) {
+			error("cons_res: could not find cr partition %s",
+				job_ptr->part_ptr->name);
+			return SLURM_ERROR;
+		}
+		if (!p_ptr->row) {
+			p_ptr->row = xmalloc(p_ptr->num_rows *
+						sizeof(struct part_row_data));
+		}
+		
+		/* find a row to add this job */
+		for (i = 0; i < p_ptr->num_rows; i++) {
+			if (!_can_job_fit_in_row(job, &(p_ptr->row[i])))
+				continue;
+			debug3("cons_res: adding job %u to part %s row %u",
+			job_ptr->job_id, p_ptr->name, i);
+			_add_job_to_row(job, &(p_ptr->row[i]));
 			break;
 		}
-
-		/* Remove debug only */
-		debug3("cons_res: %s: Job %u (+) node %s alloc_mem %u state %d",
-			pre_err, job->job_id, 
-			node_record_table_ptr[host_index].name,
-			this_node->alloc_memory, this_node->node_state);
-		debug3("cons_res: %s: Job %u (+) alloc_ cpus %u offset %u mem %u",
-			pre_err, job->job_id, job->alloc_cpus[i],
-			job->node_offset[i], job->alloc_memory[i]);
-		for (j = 0; j < this_node->sockets; j++)
-			debug3("cons_res: %s: Job %u (+) node %s alloc_cores[%d] %u",
-				pre_err, job->job_id, 
-				node_record_table_ptr[host_index].name, 
-				j, p_ptr->alloc_cores[offset+j]);
+		if (i >= p_ptr->num_rows) {
+			/* ERROR: could not find a row for this job */
+			error("cons_res: ERROR: job overflow: "
+			      "could not find row for job");
+			/* just add the job to the last row for now */
+			_add_job_to_row(job, &(p_ptr->row[p_ptr->num_rows-1]));
+		}
+		/* update the node state */
+		for (i = 0; i < select_node_cnt; i++) {
+			if (bit_test(job->node_bitmap, i))
+				select_node_usage[i].node_state +=job->node_req;
+		}
+#if (CR_DEBUG)
+		info("DEBUG: _add_job_to_res (after):");
+		_dump_part(p_ptr);
+#endif
 	}
-	last_cr_update_time = time(NULL);
-	return rc;
+
+	return SLURM_SUCCESS;
 }
 
-/* deallocate resources that were assigned to this job 
+
+/* deallocate resources to the given job
+ * - subtract 'struct select_job_res' resources from 'struct part_res_record'
+ * - subtract job's memory requirements from 'struct node_res_record'
+ *
+ * if action = 0 then subtract cores and memory
+ * if action = 1 then only subtract memory (suspended job was terminated)
+ * if action = 2 then only subtract cores (job is suspended)
  *
- * if remove_all = 1: deallocate all resources
- * if remove_all = 0: the job has been suspended, so just deallocate CPUs
  */
-static int _rm_job_from_nodes(struct node_cr_record *select_node_ptr,
-			      struct select_cr_job *job, char *pre_err,
-			      int remove_all)
+static int _rm_job_from_res(struct part_res_record *part_record_ptr,
+			    struct node_use_record *node_usage,
+			    struct job_record *job_ptr, int action)
 {
-	int host_index, i, j, k, rc = SLURM_SUCCESS;
+	struct select_job_res *job = job_ptr->select_job;
+	int i, n;
 
-	uint16_t memset = job->state & CR_JOB_ALLOCATED_MEM;
-	uint16_t cpuset = job->state & CR_JOB_ALLOCATED_CPUS;
-	uint16_t remove_memory = 0;
-
-	if (!memset && !cpuset)
-		return rc;
-	if (!cpuset && !remove_all)
-		return rc;
-	if (memset && remove_all &&
-	    ((cr_type == CR_CORE_MEMORY) || (cr_type == CR_CPU_MEMORY) ||
-	     (cr_type == CR_MEMORY) || (cr_type == CR_SOCKET_MEMORY))) {
-	 	remove_memory = 1;
-		job->state &= ~CR_JOB_ALLOCATED_MEM;
+	if (!job || !job->core_bitmap) {
+		error("job %u has no select data", job_ptr->job_id);
+		return SLURM_ERROR;
 	}
-	if (cpuset)
-	 	job->state &= ~CR_JOB_ALLOCATED_CPUS;
-
-	i = -1;
-	for (host_index = 0; host_index < select_node_cnt; host_index++) {
-		struct node_cr_record *this_node;
-		struct part_cr_record *p_ptr;
-		uint16_t offset;
-		
-		if (bit_test(job->node_bitmap, host_index) == 0)
-			continue;
-
-		this_node = &select_node_ptr[host_index];
-		i++;
-
-		/* Update this nodes allocated resources, beginning with
-		 * memory (if applicable) */
-		if (remove_memory) {
-			if (this_node->alloc_memory >= job->alloc_memory[i])
-				this_node->alloc_memory -= job->alloc_memory[i];
-			else {
-				error("%s: alloc_memory underflow on %s",
-				      pre_err, this_node->node_ptr->name);
-				this_node->alloc_memory = 0;
-				rc = SLURM_ERROR;  
+	
+	debug3("cons_res: _rm_job_from_res: job %u act %d", job_ptr->job_id,
+		action);
+#if (CR_DEBUG)
+	_dump_job_res(job);
+#endif
+	
+	/* subtract memory */
+	if (action != 2) {
+		for (i = 0, n = 0; i < select_node_cnt; i++) {
+			if (!bit_test(job->node_bitmap, i))
+				continue;
+			if (node_usage[i].alloc_memory <
+			    job->memory_allocated[n]) {
+				error("error: node %s mem is underallocated "
+				      "(%u-%u) for job %u",
+				      select_node_record[i].node_ptr->name,
+				      node_usage[i].alloc_memory,
+				      job->memory_allocated[n], 
+				      job_ptr->job_id);
+				node_usage[i].alloc_memory = 0;
+			} else {
+				node_usage[i].alloc_memory -=
+						job->memory_allocated[n];
 			}
+			n++;
 		}
+	}
+	
+	/* subtract cores */
+	if (action != 1) {
+		/* reconstruct rows with remaining jobs */
+		struct part_res_record *p_ptr;
 		
-		if (!cpuset)
-			continue;
-		
-		p_ptr = get_cr_part_ptr(this_node, job->job_ptr->part_ptr);
-		if (p_ptr == NULL) {
-			error("%s: could not find part %s", pre_err,
-			      job->job_ptr->partition);
-			continue;
+		for (p_ptr = part_record_ptr; p_ptr; p_ptr = p_ptr->next) {
+			if (strcmp(p_ptr->name, job_ptr->part_ptr->name) == 0)
+				break;
 		}
-
-		/* If the offset is no longer valid, then the job was never
-		 * "allocated" on these cores (see add_job_to_nodes).
-		 * Therefore just continue. */
-		offset = job->node_offset[i];
-		if (offset > (this_node->sockets * (p_ptr->num_rows - 1))) {
-			rc = SLURM_ERROR;
-			continue;
+		if (!p_ptr) {
+			error("error: 'rm' could not find part %s",
+				job_ptr->part_ptr->name);
+			return SLURM_ERROR;
 		}
 		
-		switch(cr_type) {
-		case CR_SOCKET_MEMORY:
-		case CR_SOCKET:
-		case CR_CORE_MEMORY:
-		case CR_CORE:
-			_chk_resize_job(job, i, this_node->sockets);
-			for (j = 0; j < this_node->sockets; j++) {
-				if (p_ptr->alloc_cores[offset+j] >= 
-						job->alloc_cores[i][j])
-					p_ptr->alloc_cores[offset+j] -= 
-							job->alloc_cores[i][j];
-				else {
-					error("%s: alloc_cores underflow on %s",
-					      pre_err, 
-					      node_record_table_ptr[host_index].name);
-					p_ptr->alloc_cores[offset+j] = 0;
-					rc = SLURM_ERROR;
+		if (!p_ptr->row) {
+			return SLURM_SUCCESS;
+		}
+		
+		/* remove the job from the job_list */
+		n = 0;
+		for (i = 0; i < p_ptr->num_rows; i++) {
+			uint32_t j;
+			for (j = 0; j < p_ptr->row[i].num_jobs; j++) {
+				if (p_ptr->row[i].job_list[j] != job)
+					continue;
+				debug3("cons_res: removing job %u from "
+				       "part %s row %u",
+				       job_ptr->job_id, p_ptr->name, i);
+				for (; j < p_ptr->row[i].num_jobs-1; j++) {
+					p_ptr->row[i].job_list[j] =
+						p_ptr->row[i].job_list[j+1];
 				}
+				p_ptr->row[i].job_list[j] = NULL;
+				p_ptr->row[i].num_jobs -= 1;
+				/* found job - we're done */
+				n = 1;
+				i = p_ptr->num_rows;
+				break;
 			}
-			break;
-		case CR_CPU_MEMORY:
-		case CR_CPU:
-			/* CPU count is stored in the first "core" */
-			if (p_ptr->alloc_cores[offset] >= job->alloc_cpus[i])
-				p_ptr->alloc_cores[offset] -=
-							job->alloc_cpus[i];
-			else {
-				error("%s: CPU underflow (%u - %u) on %s",
-				      pre_err, p_ptr->alloc_cores[offset],
-				      job->alloc_cpus[i], 
-				      node_record_table_ptr[host_index].name);
-				p_ptr->alloc_cores[offset] = 0;
-				rc = SLURM_ERROR;  
-			}
-			break;
-		default:
-			break;
 		}
-
-		/* if all cores are available, set NODE_CR_AVAILABLE */
-		if (this_node->node_state != NODE_CR_AVAILABLE) {
-			/* need to scan all partitions */
-			struct part_cr_record *pptr;
-			int count = 0;
-			for (pptr = this_node->parts; pptr; pptr = pptr->next) {
-				/* just need to check single row partitions */
-				if (pptr->num_rows > 1)
+		
+		if (n) {
+			/* job was found and removed, so refresh the bitmaps */
+			_build_row_bitmaps(p_ptr);
+
+			/* Adjust the node_state of all nodes affected by
+			 * the removal of this job. If all cores are now
+			 * available, set node_state = NODE_CR_AVAILABLE
+			 */
+			for (n = 0; n < select_node_cnt; n++) {
+				if (bit_test(job->node_bitmap, n) == 0)
 					continue;
-				k = pptr->num_rows * this_node->sockets;
-				for (j = 0; j < k; j++) {
-					count += p_ptr->alloc_cores[j];
+				if (node_usage[n].node_state >=
+				    job->node_req) {
+					node_usage[n].node_state -=
+								job->node_req;
+				} else {
+					error("cons_res:_rm_job_from_res: "
+						"node_state mis-count");
+					node_usage[n].node_state =
+							NODE_CR_AVAILABLE;
 				}
-				if (count)
-					break;
 			}
-			if (count == 0)
-				this_node->node_state = NODE_CR_AVAILABLE;
 		}
-
-		debug3("%s: Job %u (-) node %s alloc_mem %u offset %d",
-			pre_err, job->job_id, this_node->node_ptr->name,
-			this_node->alloc_memory, offset);
-		for (j = 0; j < this_node->sockets; j++)
-			debug3("cons_res: %s: Job %u (-) node %s alloc_cores[%d] %u",
-				pre_err, job->job_id, 
-				node_record_table_ptr[host_index].name, 
-				j, p_ptr->alloc_cores[offset+j]);
 	}
-	last_cr_update_time = time(NULL);
-	return rc;
-}
 
-static bool _enough_nodes(int avail_nodes, int rem_nodes, 
-			  uint32_t min_nodes, uint32_t req_nodes)
-{
-	int needed_nodes;
-
-	if (req_nodes > min_nodes)
-		needed_nodes = rem_nodes + min_nodes - req_nodes;
-	else
-		needed_nodes = rem_nodes;
-
-	return (avail_nodes >= needed_nodes);
+	return SLURM_SUCCESS;
 }
 
+
 /*
  * init() is called when the plugin is loaded, before any other functions
  * are called.  Put global initialization here.
@@ -1151,15 +1024,15 @@ extern int init(void)
 
 extern int fini(void)
 {
-	_clear_job_list();
-	if (select_cr_job_list) {
-		list_destroy(select_cr_job_list);
-		select_cr_job_list = NULL;
-	}
-
-	_xfree_select_nodes(select_node_ptr, select_node_cnt);
-	select_node_ptr = NULL;
-	select_node_cnt = 0;
+	_destroy_node_data(select_node_usage, select_node_record);
+	select_node_record = NULL;
+	select_node_usage = NULL;
+	_destroy_part_data(select_part_record);
+	select_part_record = NULL;
+	xfree(cr_node_num_cores);
+	xfree(cr_num_core_count);
+	cr_node_num_cores = NULL;
+	cr_num_core_count = NULL;
 
 	verbose("%s shutting down ...", plugin_name);
 	return SLURM_SUCCESS;
@@ -1170,905 +1043,114 @@ extern int fini(void)
  * node selection API.
  */
 
-static int _cr_write_state_buffer(int fd, Buf buffer)
-{
-	int error_code = SLURM_SUCCESS;
-	char *buf  = get_buf_data(buffer);
-	size_t len = get_buf_offset(buffer);
-	while(1) {
-		int wrote = write (fd, buf, len);
-		if ((wrote < 0) && (errno == EINTR))
-			continue;
-		if (wrote == 0)
-			break;
-		if (wrote < 0) {
-			error ("Can't save select/cons_res state: %m");
-			error_code = SLURM_ERROR;
-			break;   
-		}
-		buf += wrote;
-		len -= wrote;
-		if (len == 0) {
-			break;
-		}
-		if (len <= 0) {
-			error ("Can't save select/cons_res state: %m");
-			error_code = SLURM_ERROR;
-			break;   
-		}
-	}
-	return error_code;
-}
-
-static int _cr_read_state_buffer(int fd, char **data_p, int *data_size_p)
-{
-	int error_code = SLURM_SUCCESS;
-        int data_allocated = 0, data_read = 0, data_size = 0;
-	char *data = NULL;
-	int buffer_size = 1024;
-
-        if (fd < 0) {
-	    	error_code = SLURM_ERROR; 
-                error("No fd for select/cons_res state recovery");
-	}
-
-	data_allocated = buffer_size;
-	data = xmalloc(data_allocated);
-	*data_p      = data;
-	*data_size_p = data_size;
-	while (1) {
-		data_read = read (fd, &data[data_size],
-				  buffer_size);
-		if ((data_read < 0) && (errno == EINTR)) {
-			continue;
-		}
-		if (data_read < 0) {
-			error ("Read error recovering select/cons_res state");
-			error_code = SLURM_ERROR;
-			break;
-		} else if (data_read == 0) {
-			break;
-		}
-		data_size      += data_read;
-		data_allocated += data_read;
-		xrealloc(data, data_allocated);
-		*data_p      = data;
-		*data_size_p = data_size;
-	}
-
-	return error_code;
-}
-
-static int _cr_pack_job(struct select_cr_job *job, Buf buffer)
-{
-    	int i;
-	uint32_t nhosts = job->nhosts;
-
-	/* Do not write job->state since we re-establish
-	 * the job's state on the nodes at restart time.
-	 * Likewise for job_ptr and node_bitmap. */
-	pack32(job->job_id, buffer);
-	pack32(job->nprocs, buffer);
-	pack32(job->nhosts, buffer);
-	pack16(job->node_req, buffer);
-
-	pack16_array(job->cpus, nhosts, buffer);
-	pack16_array(job->alloc_cpus, nhosts, buffer);
-	pack16_array(job->node_offset, nhosts, buffer);
-
-	if (job->alloc_cores) {
-		pack16((uint16_t) 1, buffer);
-		for (i = 0; i < nhosts; i++) {
-			uint16_t nsockets = job->num_sockets[i];
-			pack16(nsockets, buffer);
-			pack16_array(job->alloc_cores[i], nsockets, buffer);
-		}
-	} else {
-		pack16((uint16_t) 0, buffer);
-	}
-	pack32_array(job->alloc_memory, nhosts, buffer);
-
-	return 0;
-}
-
-static int _cr_unpack_job(struct select_cr_job *job, Buf buffer)
-{
-    	int i;
-    	uint16_t have_alloc_cores;
-    	uint32_t len32;
-	uint32_t nhosts = 0;
-	uint16_t bit_cnt;
-
-	safe_unpack32(&job->job_id, buffer);
-	safe_unpack32(&job->nprocs, buffer);
-	safe_unpack32(&job->nhosts, buffer);
-	safe_unpack16(&bit_cnt, buffer);
-	nhosts = job->nhosts;
-	job->node_req = bit_cnt;
-
-	safe_unpack16_array(&job->cpus, &len32, buffer);
-	safe_unpack16_array(&job->alloc_cpus, &len32, buffer);
-	safe_unpack16_array(&job->node_offset, &len32, buffer);
-
-	safe_unpack16(&have_alloc_cores, buffer);
-	if (have_alloc_cores) {
-		job->num_sockets = (uint16_t *) xmalloc(job->nhosts * 
-				sizeof(uint16_t));
-		job->alloc_cores = (uint16_t **) xmalloc(job->nhosts * 
-				sizeof(uint16_t *));
-		for (i = 0; i < nhosts; i++) {
-			safe_unpack16(&job->num_sockets[i], buffer);
-			safe_unpack16_array(&job->alloc_cores[i], &len32, buffer);
-			if (len32 != job->num_sockets[i])
-				goto unpack_error;
-		}
-	}
-	safe_unpack32_array((uint32_t**)&job->alloc_memory, &len32, buffer);
-	if (len32 != nhosts)
-		 goto unpack_error;
-
-	return 0;
-
-unpack_error:
-	_xfree_select_cr_job(job);
-	return -1;
-}
-
-extern int select_p_state_save(char *dir_name)
-{
-	int error_code = SLURM_SUCCESS;
-	ListIterator job_iterator;
-	struct select_cr_job *job = NULL;
-	Buf buffer = NULL;
-	int state_fd;
-	uint16_t job_cnt;
-	char *file_name = NULL;
-	static time_t last_save_time;
-
-	if (last_save_time > last_cr_update_time)
-		return SLURM_SUCCESS;
-
-	debug3("cons_res: select_p_state_save");
-
-	/*** create the state file ***/
-        file_name = xstrdup(dir_name);
-        xstrcat(file_name, "/cons_res_state");
-        (void) unlink(file_name);
-        state_fd = creat (file_name, 0600);
-        if (state_fd < 0) {
-                error("Can't save state, error creating file %s", file_name);
-		xfree(file_name);
-                return SLURM_ERROR;
-	}
-
-	buffer = init_buf(1024);
-
-	/*** record the plugin type ***/
-	packstr((char*)plugin_type, buffer);
-	pack32(plugin_version, buffer);
-	pack16(cr_type,        buffer);
-	pack32(pstate_version, buffer);
-
-	slurm_mutex_lock(&cr_mutex);
-	/*** pack the select_cr_job array ***/
-	if (select_cr_job_list) {
-		job_cnt = list_count(select_cr_job_list);
-		pack16(job_cnt, buffer);
-		job_iterator = list_iterator_create(select_cr_job_list);
-		while ((job = (struct select_cr_job *) list_next(job_iterator))) {
-			_cr_pack_job(job, buffer);
-		}
-		list_iterator_destroy(job_iterator);
-	} else
-		pack16((uint16_t) 0, buffer);	/* job count */
-	slurm_mutex_unlock(&cr_mutex);
-
-	/*** close the state file ***/
-	error_code = _cr_write_state_buffer(state_fd, buffer);
-	if (error_code == SLURM_SUCCESS)
-		last_save_time = time(NULL);
-	close (state_fd);
-	xfree(file_name);
-	if (buffer)
-		free_buf(buffer);
-
-	return error_code;
-}
-
-
-/* This is Part 2 of a 4-part procedure which can be found in
- * src/slurmctld/read_config.c. See select_p_node_init for the
- * whole story.
- */
-extern int select_p_state_restore(char *dir_name)
-{
-	int error_code = SLURM_SUCCESS;
-	int state_fd, i;
-	char *file_name = NULL;
-	struct select_cr_job *job;
-	Buf buffer = NULL;
-	uint32_t len32;
-	char *data = NULL;
-	int data_size = 0;
-	char *restore_plugin_type = NULL;
-	uint32_t restore_plugin_version = 0;
-	uint16_t restore_plugin_crtype  = 0;
-	uint32_t restore_pstate_version = 0;
-	uint16_t job_cnt;
-
-	info("cons_res: select_p_state_restore");
-
-	if (select_cr_job_list)		/* preserve current job info */
-		return SLURM_SUCCESS;
-
-	if (!dir_name) {
-		info("Starting cons_res with clean slate");
-		return SLURM_SUCCESS;
-	}
-	file_name = xstrdup(dir_name);
-	xstrcat(file_name, "/cons_res_state");
-	state_fd = open (file_name, O_RDONLY);
-	if (state_fd < 0) {
-		error ("Can't restore state, error opening file %s",
-			file_name);
-		error ("Starting cons_res with clean slate");
-		xfree(file_name);
-		return SLURM_SUCCESS;
-	}
-
-	error_code = _cr_read_state_buffer(state_fd, &data, &data_size);
-
-	if (error_code != SLURM_SUCCESS) {
-		error ("Can't restore state, error reading file %s",
-			file_name);
-		error ("Starting cons_res with clean slate");
-		xfree(data);
-		xfree(file_name);
-		return SLURM_SUCCESS;
-	}
-
-	buffer = create_buf (data, data_size);
-	data = NULL;    /* now in buffer, don't xfree() */
-
-	/*** retrieve the plugin type ***/
-	safe_unpackstr_xmalloc(&restore_plugin_type, &len32, buffer);
-	safe_unpack32(&restore_plugin_version, buffer);
-	safe_unpack16(&restore_plugin_crtype,  buffer);
-	safe_unpack32(&restore_pstate_version, buffer);
-
-	if (restore_plugin_type == NULL)
-		goto unpack_error;
-	if ((strcmp(restore_plugin_type, plugin_type) != 0) ||
-	    (restore_plugin_version != plugin_version) ||
-	    (restore_plugin_crtype  != cr_type) ||
-	    (restore_pstate_version != pstate_version)) { 
-		error ("Can't restore state, state version mismatch: "
-			"saw %s/%u/%u/%u, expected %s/%u/%u/%u",
-			restore_plugin_type,
-			restore_plugin_version,
-			restore_plugin_crtype,
-			restore_pstate_version,
-			plugin_type,
-			plugin_version,
-			cr_type,
-			pstate_version);
-		error ("Starting cons_res with clean slate");
-		xfree(restore_plugin_type);
-		if (buffer)
-			free_buf(buffer);
-		xfree(file_name);
-		return SLURM_SUCCESS;
-	}
-
-	/*** unpack the select_cr_job array ***/
-	_clear_job_list();
-	if (select_cr_job_list) {
-		list_destroy(select_cr_job_list);
-		select_cr_job_list = NULL;
-	}
-	select_cr_job_list = list_create(NULL);
-
-	safe_unpack16(&job_cnt, buffer);
-	for (i=0; i<job_cnt; i++) {
-		job = xmalloc(sizeof(struct select_cr_job));
-		if (_cr_unpack_job(job, buffer) != 0)
-			goto unpack_error;
-		job->job_ptr = find_job_record(job->job_id);
-		if (job->job_ptr == NULL) {
-			error("cons_res: recovered non-existent job %u",
-				job->job_id);
-			_xfree_select_cr_job(job);
-		} else {
-			/* NOTE: Nodes can be added or removed from the
-			 * system on a restart */
-			list_append(select_cr_job_list, job);
-			debug2("recovered cons_res job data for job %u", 
-				job->job_id);
-		}
-	}
-
-	/*** cleanup after restore ***/
-        if (buffer)
-                free_buf(buffer);
-        xfree(restore_plugin_type);
-	xfree(file_name);
-
-	return SLURM_SUCCESS;
-
-unpack_error:
-        if (buffer)
-                free_buf(buffer);
-        xfree(restore_plugin_type);
-
-	error ("Can't restore state, error unpacking file %s", file_name);
-	error ("Starting cons_res with clean slate");
-	return SLURM_SUCCESS;
-}
-
-/* This is Part 3 of a 4-part procedure which can be found in
- * src/slurmctld/read_config.c. See select_p_node_init for the
- * whole story.
- */
-extern int select_p_job_init(List job_list)
-{
-	struct select_cr_job *job = NULL;
-	ListIterator iterator;
-	int suspend;
-
-	info("cons_res: select_p_job_init");
-
-	/* Note: select_cr_job_list restored in select_p_state_restore
-	 * except on a cold-start */
-	if (!select_cr_job_list) {
-		select_cr_job_list = list_create(NULL);
-		return SLURM_SUCCESS;
-	}
-
-	/* Now synchronize the node information to the active jobs */
-	if (list_count(select_cr_job_list) == 0)
-		return SLURM_SUCCESS;
-
-	iterator = list_iterator_create(select_cr_job_list);
-	while ((job = (struct select_cr_job *) list_next(iterator))) {
-		job->job_ptr = find_job_record(job->job_id);
-		if (job->job_ptr == NULL) {
-			error("select_p_job_init: could not find job %u",
-			      job->job_id);
-			list_remove(iterator);
-			continue;
-		}
-		if (job->job_ptr->job_state == JOB_SUSPENDED)
-			suspend = 1;
-		else
-			suspend = 0;
-		FREE_NULL_BITMAP(job->node_bitmap);
-		node_name2bitmap(job->job_ptr->nodes, true,
-				 &job->node_bitmap);
-		_add_job_to_nodes(job, "select_p_job_init", suspend);
-	}
-	list_iterator_destroy(iterator);
-	last_cr_update_time = time(NULL);
-
-	return SLURM_SUCCESS;
-}
-
 /* This is Part 1 of a 4-part procedure which can be found in
  * src/slurmctld/read_config.c. The whole story goes like this:
  *
- * Step 1: select_g_node_init       : initializes 'select_node_ptr' global array
- *                                    sets node_ptr, node_name, and num_sockets
- * Step 2: select_g_state_restore   : IFF a cons_res state file exists:
- *                                    loads global 'select_cr_job_list' with
- *                                    saved job data
- * Step 3: select_g_job_init        : creates global 'select_cr_job_list' if
- *                                    nothing was recovered from state file.
- *                                    Rebuilds select_node_ptr global array.
- * Step 4: select_g_update_nodeinfo : called from reset_job_bitmaps() with each
- *                                    valid recovered job_ptr AND from
+ * Step 1: select_g_node_init       : initializes the global node arrays
+ * Step 2: select_g_state_restore   : NO-OP - nothing to restore
+ * Step 3: select_g_job_init        : NO-OP - nothing to initialize
+ * Step 4: select_g_update_nodeinfo : called from reset_job_bitmaps() with
+ *                                    each valid recovered job_ptr AND from
  *                                    select_nodes(), this procedure adds job
- *                                    data to the 'select_node_ptr' global array
+ *                                    data to the 'select_part_record' global
+ *                                    array
  */
 extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 {
 	int i;
 
 	info("cons_res: select_p_node_init");
-
 	if (node_ptr == NULL) {
 		error("select_g_node_init: node_ptr == NULL");
 		return SLURM_ERROR;
 	}
-
 	if (node_cnt < 0) {
 		error("select_g_node_init: node_cnt < 0");
 		return SLURM_ERROR;
 	}
 
-	/* completely rebuild node data */
-	_xfree_select_nodes(select_node_ptr, select_node_cnt);
-	select_node_cnt = node_cnt;
-	select_node_ptr = xmalloc(sizeof(struct node_cr_record) *
-							select_node_cnt);
+	/* initial global core data structures */
 	select_fast_schedule = slurm_get_fast_schedule();
+	_init_global_core_data(node_ptr, node_cnt);
+	
+	_destroy_node_data(select_node_usage, select_node_record);
+	select_node_cnt  = node_cnt;
+	select_node_record = xmalloc(node_cnt * sizeof(struct node_res_record));
+	select_node_usage  = xmalloc(node_cnt * sizeof(struct node_use_record));
 
 	for (i = 0; i < select_node_cnt; i++) {
-		select_node_ptr[i].node_ptr = &node_ptr[i];
+		select_node_record[i].node_ptr = &node_ptr[i];
 		if (select_fast_schedule) {
 			struct config_record *config_ptr;
 			config_ptr = node_ptr[i].config_ptr;
-			select_node_ptr[i].cpus        = config_ptr->cpus;
-			select_node_ptr[i].sockets     = config_ptr->sockets;
-			select_node_ptr[i].cores       = config_ptr->cores;
-			select_node_ptr[i].threads     = config_ptr->threads;
-			select_node_ptr[i].real_memory = config_ptr->real_memory;
+			select_node_record[i].cpus    = config_ptr->cpus;
+			select_node_record[i].sockets = config_ptr->sockets;
+			select_node_record[i].cores   = config_ptr->cores;
+			select_node_record[i].vpus    = config_ptr->threads;
+			select_node_record[i].real_memory =
+							config_ptr->real_memory;
 		} else {
-			select_node_ptr[i].cpus        = node_ptr[i].cpus;
-			select_node_ptr[i].sockets     = node_ptr[i].sockets;
-			select_node_ptr[i].cores       = node_ptr[i].cores;
-			select_node_ptr[i].threads     = node_ptr[i].threads;
-			select_node_ptr[i].real_memory = node_ptr[i].real_memory;
+			select_node_record[i].cpus    = node_ptr[i].cpus;
+			select_node_record[i].sockets = node_ptr[i].sockets;
+			select_node_record[i].cores   = node_ptr[i].cores;
+			select_node_record[i].vpus    = node_ptr[i].threads;
+			select_node_record[i].real_memory =
+							node_ptr[i].real_memory;
 		}
-		select_node_ptr[i].node_state = NODE_CR_AVAILABLE;
-		/* xmalloc initialized everything to zero, 
-		 * including alloc_memory and parts */
-		_create_node_part_array(&(select_node_ptr[i]));
+		select_node_usage[i].node_state = NODE_CR_AVAILABLE;
 	}
+	_create_part_data();
 
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_block_init(List part_list)
+extern int select_p_state_save(char *dir_name)
 {
+	/* nothing to save */
 	return SLURM_SUCCESS;
 }
 
-/* return the number of tasks that the given
- * job can run on the indexed node */
-static int _get_task_cnt(struct job_record *job_ptr, const int node_index,
-			 int *task_cnt, int *freq, int size)
-{
-	int i, pos, tasks;
-	uint16_t * layout_ptr = NULL;
-
-	layout_ptr = job_ptr->details->req_node_layout;
-
-	pos = 0;
-	for (i = 0; i < size; i++) {
-		if (pos+freq[i] > node_index)
-			break;
-		pos += freq[i];
-	}
-	tasks = task_cnt[i];
-	if (layout_ptr && bit_test(job_ptr->details->req_node_bitmap, i)) {
-		pos = bit_get_pos_num(job_ptr->details->req_node_bitmap, i);
-		tasks = MIN(tasks, layout_ptr[pos]);
-	} else if (layout_ptr) {
-		tasks = 0; /* should not happen? */
-	}
-	return tasks;
-}
-
-static int _eval_nodes(struct job_record *job_ptr, bitstr_t * bitmap,
-		       uint32_t min_nodes, uint32_t max_nodes,
-		       uint32_t req_nodes, int *task_cnt, int *freq, 
-		       int array_size)
+/* This is Part 2 of a 4-part procedure which can be found in
+ * src/slurmctld/read_config.c. See select_p_node_init for the
+ * whole story.
+ */
+extern int select_p_state_restore(char *dir_name)
 {
-	int i, f, index, error_code = SLURM_ERROR;
-	int *consec_nodes;	/* how many nodes we can add from this 
-				 * consecutive set of nodes */
-	int *consec_cpus;	/* how many nodes we can add from this 
-				 * consecutive set of nodes */
-	int *consec_start;	/* where this consecutive set starts (index) */
-	int *consec_end;	/* where this consecutive set ends (index) */
-	int *consec_req;	/* are nodes from this set required 
-				 * (in req_bitmap) */
-	int consec_index, consec_size, sufficient;
-	int rem_cpus, rem_nodes;	/* remaining resources desired */
-	int best_fit_nodes, best_fit_cpus, best_fit_req;
-	int best_fit_sufficient, best_fit_index = 0;
-	int avail_cpus, ll;	/* ll = layout array index */
-	struct multi_core_data *mc_ptr = NULL;
-	uint16_t * layout_ptr = NULL;
-	bool required_node;
-
-	xassert(bitmap);
-	
-	if (bit_set_count(bitmap) < min_nodes)
-		return error_code;
-
-	layout_ptr = job_ptr->details->req_node_layout;
-	mc_ptr = job_ptr->details->mc_ptr;
-
-	consec_size = 50;	/* start allocation for 50 sets of 
-				 * consecutive nodes */
-	consec_cpus  = xmalloc(sizeof(int) * consec_size);
-	consec_nodes = xmalloc(sizeof(int) * consec_size);
-	consec_start = xmalloc(sizeof(int) * consec_size);
-	consec_end   = xmalloc(sizeof(int) * consec_size);
-	consec_req   = xmalloc(sizeof(int) * consec_size);
-
-	/* Build table with information about sets of consecutive nodes */
-	consec_index = 0;
-	consec_cpus[consec_index] = consec_nodes[consec_index] = 0;
-	consec_req[consec_index] = -1;	/* no required nodes here by default */
-
-	rem_cpus = job_ptr->num_procs;
-	if (req_nodes > min_nodes)
-		rem_nodes = req_nodes;
-	else
-		rem_nodes = min_nodes;
-
-	i = 0;
-	f = 0;
-	for (index = 0, ll = -1; index < select_node_cnt; index++, f++) {
-		if (f >= freq[i]) {
-			f = 0;
-			i++;
-		}
-		if (job_ptr->details->req_node_bitmap) {
-			required_node =
-				bit_test(job_ptr->details->req_node_bitmap,
-					 index);
-		} else
-			required_node = false;
-		if (layout_ptr && required_node)
-			ll++;
-		if (bit_test(bitmap, index)) {
-			if (consec_nodes[consec_index] == 0)
-				consec_start[consec_index] = index;
-			avail_cpus = task_cnt[i];
-			if (layout_ptr && required_node){
-				avail_cpus = MIN(avail_cpus, layout_ptr[ll]);
-			} else if (layout_ptr) {
-				avail_cpus = 0; /* should not happen? */
-			}
-			if ((max_nodes > 0) && required_node) {
-				if (consec_req[consec_index] == -1) {
-					/* first required node in set */
-					consec_req[consec_index] = index;
-				}
-				rem_cpus -= avail_cpus;
-				rem_nodes--;
-				/* leaving bitmap set, decrement max limit */
-				max_nodes--;
-			} else {	/* node not selected (yet) */
-				bit_clear(bitmap, index);
-				consec_cpus[consec_index] += avail_cpus;
-				consec_nodes[consec_index]++;
-			}
-		} else if (consec_nodes[consec_index] == 0) {
-			consec_req[consec_index] = -1;
-			/* already picked up any required nodes */
-			/* re-use this record */
-		} else {
-			consec_end[consec_index] = index - 1;
-			if (++consec_index >= consec_size) {
-				consec_size *= 2;
-				xrealloc(consec_cpus, sizeof(int)*consec_size);
-				xrealloc(consec_nodes, sizeof(int)*consec_size);
-				xrealloc(consec_start, sizeof(int)*consec_size);
-				xrealloc(consec_end, sizeof(int)*consec_size);
-				xrealloc(consec_req, sizeof(int)*consec_size);
-			}
-			consec_cpus[consec_index] = 0;
-			consec_nodes[consec_index] = 0;
-			consec_req[consec_index] = -1;
-		}
-	}
-	if (consec_nodes[consec_index] != 0)
-		consec_end[consec_index++] = index - 1;
-	
-	for (i = 0; i < consec_index; i++) {
-		debug3("cons_res: eval_nodes: %d consec c=%d n=%d b=%d e=%d r=%d",
-			i, consec_cpus[i], consec_nodes[i], consec_start[i],
-			consec_end[i], consec_req[i]);
-	}
-	
-	/* accumulate nodes from these sets of consecutive nodes until */
-	/*   sufficient resources have been accumulated */
-	while (consec_index && (max_nodes > 0)) {
-		best_fit_cpus = best_fit_nodes = best_fit_sufficient = 0;
-		best_fit_req = -1;	/* first required node, -1 if none */
-		for (i = 0; i < consec_index; i++) {
-			if (consec_nodes[i] == 0)
-				continue;
-			sufficient =  (consec_cpus[i] >= rem_cpus)
-				&& _enough_nodes(consec_nodes[i], rem_nodes,
-						 min_nodes, req_nodes);
-			
-			/* if first possibility OR */
-			/* contains required nodes OR */
-			/* first set large enough for request OR */
-			/* tightest fit (less resource waste) OR */
-			/* nothing yet large enough, but this is biggest */
-			if ((best_fit_nodes == 0) ||
-			    ((best_fit_req == -1) && (consec_req[i] != -1)) ||
-			    (sufficient && (best_fit_sufficient == 0)) ||
-			    (sufficient && (consec_cpus[i] < best_fit_cpus)) ||
-			    (!sufficient && (consec_cpus[i] > best_fit_cpus))) {
-				best_fit_cpus = consec_cpus[i];
-				best_fit_nodes = consec_nodes[i];
-				best_fit_index = i;
-				best_fit_req = consec_req[i];
-				best_fit_sufficient = sufficient;
-			}
-		}
-		if (best_fit_nodes == 0)
-			break;
-		if (job_ptr->details->contiguous &&
-		    ((best_fit_cpus < rem_cpus) ||
-		     (!_enough_nodes(best_fit_nodes, rem_nodes,
-				     min_nodes, req_nodes))))
-			break;	/* no hole large enough */
-		if (best_fit_req != -1) {
-			/* This collection of nodes includes required ones
-			 * select nodes from this set, first working up
-			 * then down from the required nodes */
-			for (i = best_fit_req;
-			     i <= consec_end[best_fit_index]; i++) {
-				if ((max_nodes <= 0)
-				    ||  ((rem_nodes <= 0) && (rem_cpus <= 0)))
-					break;
-				if (bit_test(bitmap, i))
-					continue;
-				bit_set(bitmap, i);
-				rem_nodes--;
-				max_nodes--;
-				avail_cpus = _get_task_cnt(job_ptr, i,
-							   task_cnt, freq,
-							   array_size);
-				rem_cpus -= avail_cpus;
-			}
-			for (i = (best_fit_req - 1);
-			     i >= consec_start[best_fit_index]; i--) {
-				if ((max_nodes <= 0)
-				    ||  ((rem_nodes <= 0) && (rem_cpus <= 0)))
-					break;
-				if (bit_test(bitmap, i)) 
-					continue;
-				avail_cpus = _get_task_cnt(job_ptr, i,
-							   task_cnt, freq,
-							   array_size);
-				if (avail_cpus <= 0)
-					continue;
-				rem_cpus -= avail_cpus;
-				bit_set(bitmap, i);
-				rem_nodes--;
-				max_nodes--;
-			}
-		} else {
-			for (i = consec_start[best_fit_index];
-			     i <= consec_end[best_fit_index]; i++) {
-				if ((max_nodes <= 0)
-				    || ((rem_nodes <= 0) && (rem_cpus <= 0)))
-					break;
-				if (bit_test(bitmap, i))
-					continue;
-				avail_cpus = _get_task_cnt(job_ptr, i,
-							   task_cnt, freq,
-							   array_size);
-				if (avail_cpus <= 0)
-					continue;
-				if ((max_nodes == 1) && 
-				    (avail_cpus < rem_cpus)) {
-					/* Job can only take one more node and
-					 * this one has insufficient CPU */
-					continue;
-				}
-				rem_cpus -= avail_cpus;
-				bit_set(bitmap, i);
-				rem_nodes--;
-				max_nodes--;
-			}
-		}
-
-		if (job_ptr->details->contiguous ||
-		    ((rem_nodes <= 0) && (rem_cpus <= 0))) {
-			error_code = SLURM_SUCCESS;
-			break;
-		}
-		consec_cpus[best_fit_index] = 0;
-		consec_nodes[best_fit_index] = 0;
-	}
-	
-	if (error_code && (rem_cpus <= 0)
-	    && _enough_nodes(0, rem_nodes, min_nodes, req_nodes))
-		error_code = SLURM_SUCCESS;
-
-	xfree(consec_cpus);
-	xfree(consec_nodes);
-	xfree(consec_start);
-	xfree(consec_end);
-	xfree(consec_req);
-	return error_code;
+	/* nothing to restore */
+	return SLURM_SUCCESS;
 }
 
-/* this is an intermediary step between select_p_job_test and _eval_nodes
- * to tackle the knapsack problem. This code incrementally removes nodes
- * with low task counts for the job and re-evaluates each result */
-static int _select_nodes(struct job_record *job_ptr, bitstr_t * bitmap,
-			 uint32_t min_nodes, uint32_t max_nodes, 
-			 uint32_t req_nodes, int *task_cnt, int *freq, 
-			 int array_size)
+/* This is Part 3 of a 4-part procedure which can be found in
+ * src/slurmctld/read_config.c. See select_p_node_init for the
+ * whole story.
+ */
+extern int select_p_job_init(List job_list)
 {
-	int i, b, count, ec, most_tasks = 0;
-	bitstr_t *origmap, *reqmap = NULL;
-
-	if (job_ptr->details->req_node_bitmap)
-		reqmap = job_ptr->details->req_node_bitmap;
-
-	/* clear nodes from the bitmap that don't have available resources */
-	for (i = 0, b = 0; i < array_size; i++) {
-		for (count = 0; count < freq[i]; count++, b++) {
-			if (bit_test(bitmap, b) && task_cnt[i] < 1) {
-				if (reqmap && bit_test(reqmap, b)) {
-					/* can't clear a required node! */
-					return SLURM_ERROR;
-				}
-				bit_clear(bitmap, b); 
-			}
-		}
-	}
-
-	/* NOTE: num_procs is 1 by default.
-	 * Only reset max_nodes if user explicitly sets a process count */
-	if ((job_ptr->num_procs > 1) && (max_nodes > job_ptr->num_procs))
-		max_nodes = job_ptr->num_procs;
-
-	origmap = bit_copy(bitmap);
-	if (origmap == NULL)
-		fatal("bit_copy malloc failure");
-
-	ec = _eval_nodes(job_ptr, bitmap, min_nodes, max_nodes,
-			 req_nodes, task_cnt, freq, array_size);
-
-	if (ec == SLURM_SUCCESS) {
-		bit_free(origmap);
-		return ec;
-	}
-
-	/* This nodeset didn't work. To avoid a possible knapsack problem, 
-	 * incrementally remove nodes with low task counts and retry */
-
-	for (i = 0; i < array_size; i++) {
-		if (task_cnt[i] > most_tasks)
-			most_tasks = task_cnt[i];
-	}
-
-	for (count = 0; count < most_tasks; count++) {
-		int nochange = 1;
-		bit_or(bitmap, origmap);
-		for (i = 0, b = 0; i < array_size; i++) {
-			if (task_cnt[i] != -1 && task_cnt[i] <= count) {
-				int j = 0, x = b;
-				for (; j < freq[i]; j++, x++) {
-					if (!bit_test(bitmap, x))
-						continue;
-					if (reqmap && bit_test(reqmap, x)) {
-						bit_free(origmap);
-						return SLURM_ERROR;
-					}
-					nochange = 0;
-					bit_clear(bitmap, x);
-					bit_clear(origmap, x);
-				}
-			}
-			b += freq[i];
-		}
-		if (nochange)
-			continue;
-		ec = _eval_nodes(job_ptr, bitmap, min_nodes, max_nodes,
-				 req_nodes, task_cnt, freq, array_size);
-		if (ec == SLURM_SUCCESS) {
-			bit_free(origmap);
-			return ec;
-		}
-	}
-	bit_free(origmap);
-	return ec;
+	/* nothing to initialize for jobs */
+	return SLURM_SUCCESS;
 }
 
-/* test to see if any shared partitions are running jobs */
-static int _is_node_sharing(struct node_cr_record *this_node)
+extern int select_p_block_init(List part_list)
 {
-	int i, size;
-	struct part_cr_record *p_ptr = this_node->parts;
-	for (; p_ptr; p_ptr = p_ptr->next) {
-		if (p_ptr->num_rows < 2)
-			continue;
-		size = p_ptr->num_rows * this_node->sockets;
-		for (i = 0; i < size; i++) {
-			if (p_ptr->alloc_cores[i])
-				return 1;
-		}
-	}
-	return 0;
-
+	return SLURM_SUCCESS;
 }
 
-/* test to see if the given node has any jobs running on it */
-static int _is_node_busy(struct node_cr_record *this_node)
-{
-	int i, size;
-	struct part_cr_record *p_ptr = this_node->parts;
-	for (; p_ptr; p_ptr = p_ptr->next) {
-		size = p_ptr->num_rows * this_node->sockets;
-		for (i = 0; i < size; i++) {
-			if (p_ptr->alloc_cores[i])
-				return 1;
-		}
-	}
-	return 0;
-}
 
-/*
- * Determine which of these nodes are usable by this job
- *
- * Remove nodes from the bitmap that don't have enough memory to
- * support the job. Return SLURM_ERROR if a required node doesn't
- * have enough memory.
- *
- * if node_state = NODE_CR_RESERVED, clear bitmap (if node is required
- *                                   then should we return NODE_BUSY!?!)
- *
- * if node_state = NODE_CR_ONE_ROW, then this node can only be used by
- *                                  another NODE_CR_ONE_ROW job
- *
- * if node_state = NODE_CR_AVAILABLE AND:
- *  - job_node_req = NODE_CR_RESERVED, then we need idle nodes
- *  - job_node_req = NODE_CR_ONE_ROW, then we need idle or non-sharing nodes
- */
-static int _verify_node_state(struct node_cr_record *select_node_ptr,
-			      struct job_record *job_ptr, bitstr_t * bitmap,
-			      enum node_cr_state job_node_req)
+static struct multi_core_data * _create_default_mc(void)
 {
-	int i;
-	uint32_t free_mem, min_mem;
-
-	min_mem = job_ptr->details->job_min_memory & (~MEM_PER_CPU);
-	for (i = 0; i < select_node_cnt; i++) {
-		if (!bit_test(bitmap, i))
-			continue;
-
-		if ((job_ptr->details->job_min_memory) &&
-		    ((cr_type == CR_CORE_MEMORY) || (cr_type == CR_CPU_MEMORY) || 
-		     (cr_type == CR_MEMORY) || (cr_type == CR_SOCKET_MEMORY))) {
-			free_mem = select_node_ptr[i].real_memory;
-			free_mem -= select_node_ptr[i].alloc_memory;
-			if (free_mem < min_mem)
-				goto clear_bit;
-		}
-
-		if (select_node_ptr[i].node_state == NODE_CR_RESERVED) {
-			goto clear_bit;
-		} else if (select_node_ptr[i].node_state == NODE_CR_ONE_ROW) {
-			if ((job_node_req == NODE_CR_RESERVED) ||
-			    (job_node_req == NODE_CR_AVAILABLE))
-				goto clear_bit;
-			/* cannot use this node if it is running jobs
-			 * in sharing partitions */
-			if ( _is_node_sharing(&(select_node_ptr[i])) )
-				goto clear_bit;
-		} else {	/* node_state = NODE_CR_AVAILABLE */
-			if (job_node_req == NODE_CR_RESERVED) {
-				if ( _is_node_busy(&(select_node_ptr[i])) )
-					goto clear_bit;
-			} else if (job_node_req == NODE_CR_ONE_ROW) {
-				if ( _is_node_sharing(&(select_node_ptr[i])) )
-					goto clear_bit;
-			}
-		}
-		continue;	/* node is usable, test next node */
-
-		/* This node is not usable by this job */
- clear_bit:	bit_clear(bitmap, i);
-		if (job_ptr->details->req_node_bitmap &&
-		    bit_test(job_ptr->details->req_node_bitmap, i))
-			return SLURM_ERROR;
-
-	}
-
-	return SLURM_SUCCESS;
+	struct multi_core_data *mc_ptr;
+	mc_ptr = xmalloc(sizeof(struct multi_core_data));
+	mc_ptr->min_sockets = 1;
+	mc_ptr->max_sockets = 0xffff;
+	mc_ptr->min_cores   = 1;
+	mc_ptr->max_cores   = 0xffff;
+	mc_ptr->min_threads = 1;
+	mc_ptr->max_threads = 0xffff;
+/*	mc_ptr is initialized to zero by xmalloc*/
+/*	mc_ptr->ntasks_per_socket = 0; */
+/*	mc_ptr->ntasks_per_core   = 0; */
+/*	mc_ptr->plane_size        = 0; */
+	return mc_ptr;
 }
 
 /* Determine the node requirements for the job:
@@ -2076,7 +1158,7 @@ static int _verify_node_state(struct node_cr_record *select_node_ptr,
  * - can the job run on shared nodes?   (NODE_CR_ONE_ROW)
  * - can the job run on overcommitted resources? (NODE_CR_AVAILABLE)
  */
-static enum node_cr_state _get_job_node_req(struct job_record *job_ptr)
+static uint16_t _get_job_node_req(struct job_record *job_ptr)
 {
 	int max_share = job_ptr->part_ptr->max_share;
 	
@@ -2097,128 +1179,6 @@ static enum node_cr_state _get_job_node_req(struct job_record *job_ptr)
 	return NODE_CR_ONE_ROW;
 }
 
-/* for a given node and partition return the count of rows (time slices)
- * that have resources allocated */
-static int _get_allocated_rows(struct node_cr_record *select_node_ptr,
-			       struct job_record *job_ptr, int n,
-			       enum node_cr_state job_node_req)
-{
-	struct part_cr_record *p_ptr;
-	int i, j, rows = 0;
-	
-	p_ptr = get_cr_part_ptr(&(select_node_ptr[n]), job_ptr->part_ptr);
-	if (p_ptr == NULL)
-		return rows;
-
-	for (i = 0; i < p_ptr->num_rows; i++) {
-		int offset = i * select_node_ptr[n].sockets;
-		for (j = 0; j < select_node_ptr[n].sockets; j++){
-			if (p_ptr->alloc_cores[offset+j]) {
-				rows++;
-				break;
-			}
-		}
-	}
-	return rows;
-}
-
-static int _load_arrays(struct node_cr_record *select_node_ptr,
-			struct job_record *job_ptr, bitstr_t *bitmap, 
-			int **a_rows, int **s_tasks, int **a_tasks, 
-			int **freq, bool test_only,
-			enum node_cr_state job_node_req)
-{
-	int i, index = 0, size = 32;
-	int *busy_rows, *shr_tasks, *all_tasks, *num_nodes;
-	
-	busy_rows = xmalloc (sizeof(int)*size); /* allocated rows */
-	shr_tasks = xmalloc (sizeof(int)*size); /* max free cpus */
-	all_tasks = xmalloc (sizeof(int)*size); /* all cpus */
-	num_nodes = xmalloc (sizeof(int)*size); /* number of nodes */
-	/* above arrays are all zero filled by xmalloc() */
-
-	for (i = 0; i < select_node_cnt; i++) {
-		if (bit_test(bitmap, i)) {
-			int rows;
-			uint16_t atasks, ptasks;
-			rows = _get_allocated_rows(select_node_ptr, job_ptr, 
-						   i, job_node_req);
-			/* false = use free rows (if available) */
-			atasks = _get_task_count(select_node_ptr, job_ptr, i, 
-						 test_only, false,
-						 job_node_req);
-			if (test_only) {
-				ptasks = atasks;
-			} else {
-				/* true = try using an already allocated row */
-				ptasks = _get_task_count(select_node_ptr, 
-							 job_ptr, i, test_only,
-							 true, job_node_req);
-			}
-			if (rows   != busy_rows[index] ||
-			    ptasks != shr_tasks[index] ||
-			    atasks != all_tasks[index]) {
-				if (num_nodes[index]) {
-					index++;
-					if (index >= size) {
-						size *= 2;
-						xrealloc(busy_rows,
-							 sizeof(int)*size);
-						xrealloc(shr_tasks,
-							 sizeof(int)*size);
-						xrealloc(all_tasks,
-							 sizeof(int)*size);
-						xrealloc(num_nodes,
-							 sizeof(int)*size);
-					}
-					num_nodes[index] = 0;
-				}
-				busy_rows[index] = rows;
-				shr_tasks[index] = ptasks;
-				all_tasks[index] = atasks;
-			}
-		} else {
-			if (busy_rows[index] != -1) {
-				if (num_nodes[index] > 0) {
-					index++;
-					if (index >= size) {
-						size *= 2;
-						xrealloc(busy_rows,
-							 sizeof(int)*size);
-						xrealloc(shr_tasks,
-							 sizeof(int)*size);
-						xrealloc(all_tasks,
-							 sizeof(int)*size);
-						xrealloc(num_nodes,
-							 sizeof(int)*size);
-					}
-					num_nodes[index] = 0;
-				}
-				busy_rows[index] = -1;
-				shr_tasks[index]  = -1;
-				all_tasks[index]  = -1;
-			}
-		}
-		num_nodes[index]++;
-	}
-	/* array_index becomes "array size" */
-	index++;
-
-#if (CR_DEBUG)
-	for (i = 0; i < index; i++) {
-		info("cons_res: i %d row %d ptasks %d atasks %d freq %d",
-		     i, busy_rows[i], shr_tasks[i], all_tasks[i], num_nodes[i]);
-	}
-#endif
-
-	*a_rows  = busy_rows;
-	*s_tasks = shr_tasks;
-	*a_tasks = all_tasks;
-	*freq    = num_nodes;
-
-	return index;
-}
-
 /*
  * select_p_job_test - Given a specification of scheduling requirements, 
  *	identify the nodes which "best" satisfy the request.
@@ -2249,32 +1209,55 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap,
 			     uint32_t min_nodes, uint32_t max_nodes, 
 			     uint32_t req_nodes, int mode)
 {
-	enum node_cr_state job_node_req;
+	int rc;
+	uint16_t job_node_req;
+	bool debug_cpu_bind = false, debug_check = false;
 
 	xassert(bitmap);
 
+	if (!debug_check) {
+		debug_check = true;
+		if (slurm_get_debug_flags() & DEBUG_FLAG_CPU_BIND)
+			debug_cpu_bind = true;
+	}
+
 	if (!job_ptr->details)
 		return EINVAL;
 
 	if (!job_ptr->details->mc_ptr)
-		job_ptr->details->mc_ptr = create_default_mc();
+		job_ptr->details->mc_ptr = _create_default_mc();
 	job_node_req = _get_job_node_req(job_ptr);
 
-	debug3("cons_res: select_p_job_test: job %d node_req %d, mode %d",
+	debug3("cons_res: select_p_job_test: job %u node_req %u, mode %d",
 	       job_ptr->job_id, job_node_req, mode);
-	debug3("cons_res: select_p_job_test: min_n %u max_n %u req_n %u",
-	       min_nodes, max_nodes, req_nodes);
+	debug3("cons_res: select_p_job_test: min_n %u max_n %u req_n %u nb %u",
+	       min_nodes, max_nodes, req_nodes, bit_set_count(bitmap));
 
 #if (CR_DEBUG)
-	_dump_state(select_node_ptr);
+	_dump_state(select_part_record);
 #endif
 	if (mode == SELECT_MODE_WILL_RUN) {
-		return _will_run_test(job_ptr, bitmap, min_nodes, max_nodes,
-				      req_nodes, job_node_req);
+		rc = _will_run_test(job_ptr, bitmap, min_nodes, max_nodes,
+				    req_nodes, job_node_req);
+	} else {
+		rc = cr_job_test(job_ptr, bitmap, min_nodes, max_nodes,
+				 req_nodes, mode, cr_type, job_node_req,
+				 select_node_cnt, select_part_record,
+				 select_node_usage);
 	}
 
-	return _job_test(job_ptr, bitmap, min_nodes, max_nodes, req_nodes, 
-			 mode, job_node_req, select_node_ptr);
+#if (CR_DEBUG)
+	if (job_ptr->select_job)
+		log_select_job_res(job_ptr->job_id, job_ptr->select_job);
+	else
+		info("no select_job_res info for job %u", 
+		     job_ptr->job_id);
+#else
+	if (debug_cpu_bind && job_ptr->select_job)
+		log_select_job_res(job_ptr->job_id, job_ptr->select_job);
+#endif
+
+	return rc;
 }
 
 /*
@@ -2301,23 +1284,24 @@ extern int select_p_job_list_test(List req_list)
  *	each one. */
 static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 			uint32_t min_nodes, uint32_t max_nodes, 
-			uint32_t req_nodes, enum node_cr_state job_node_req)
+			uint32_t req_nodes, uint16_t job_node_req)
 {
-	struct node_cr_record *exp_node_cr;
+	struct part_res_record *future_part;
+	struct node_use_record *future_usage;
 	struct job_record *tmp_job_ptr, **tmp_job_pptr;
-	struct select_cr_job *job;
 	List cr_job_list;
 	ListIterator job_iterator;
 	bitstr_t *orig_map;
 	int rc = SLURM_ERROR;
-	uint16_t saved_state;
 	time_t now = time(NULL);
 
 	orig_map = bit_copy(bitmap);
 
 	/* Try to run with currently available nodes */
-	rc = _job_test(job_ptr, bitmap, min_nodes, max_nodes, req_nodes, 
-		       SELECT_MODE_WILL_RUN, job_node_req, select_node_ptr);
+	rc = cr_job_test(job_ptr, bitmap, min_nodes, max_nodes, req_nodes, 
+			 SELECT_MODE_WILL_RUN, cr_type, job_node_req,
+			 select_node_cnt, select_part_record, 
+			 select_node_usage);
 	if (rc == SLURM_SUCCESS) {
 		bit_free(orig_map);
 		job_ptr->start_time = time(NULL);
@@ -2326,14 +1310,21 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 
 	/* Job is still pending. Simulate termination of jobs one at a time 
 	 * to determine when and where the job can start. */
-	exp_node_cr = _dup_node_cr(select_node_ptr, select_node_cnt);
-	if (exp_node_cr == NULL) {
+
+	future_part = _dup_part_data(select_part_record);
+	if (future_part == NULL) {
+		bit_free(orig_map);
+		return SLURM_ERROR;
+	}
+	future_usage = _dup_node_usage(select_node_usage);
+	if (future_usage == NULL) {
+		_destroy_part_data(future_part);
 		bit_free(orig_map);
 		return SLURM_ERROR;
 	}
 
 	/* Build list of running jobs */
-	cr_job_list = list_create(_cr_job_list_del);
+	cr_job_list = list_create(NULL);
 	job_iterator = list_iterator_create(job_list);
 	while ((tmp_job_ptr = (struct job_record *) list_next(job_iterator))) {
 		if (tmp_job_ptr->job_state != JOB_RUNNING)
@@ -2354,23 +1345,15 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	job_iterator = list_iterator_create(cr_job_list);
 	while ((tmp_job_pptr = (struct job_record **) list_next(job_iterator))) {
 		tmp_job_ptr = *tmp_job_pptr;
-		job = list_find_first(select_cr_job_list, _find_job_by_id,
-				      &tmp_job_ptr->job_id);
-		if (!job) {
-			error("cons_res: could not find job %u", 
-			      tmp_job_ptr->job_id);
-			continue;
-		}
-		saved_state = job->state;
-		_rm_job_from_nodes(exp_node_cr, job, "_will_run_test", 1);
-		job->state = saved_state;
+		_rm_job_from_res(future_part, future_usage, tmp_job_ptr, 0);
 		bit_or(bitmap, orig_map);
-		rc = _job_test(job_ptr, bitmap, min_nodes, max_nodes, 
-			       req_nodes, SELECT_MODE_WILL_RUN, job_node_req,
-			       exp_node_cr);
+		rc = cr_job_test(job_ptr, bitmap, min_nodes, max_nodes, 
+				 req_nodes, SELECT_MODE_WILL_RUN, cr_type,
+				 job_node_req, select_node_cnt, future_part,
+				 future_usage);
 		if (rc == SLURM_SUCCESS) {
 			if (tmp_job_ptr->end_time <= now)
-				 job_ptr->start_time = now + 1;
+				job_ptr->start_time = now + 1;
 			else
 				job_ptr->start_time = tmp_job_ptr->end_time;
 			break;
@@ -2378,339 +1361,12 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	}
 	list_iterator_destroy(job_iterator);
 	list_destroy(cr_job_list);
-	_xfree_select_nodes(exp_node_cr, select_node_cnt); 
+	_destroy_part_data(future_part);
+	_destroy_node_data(future_usage, NULL);
 	bit_free(orig_map);
 	return rc;
 }
 
-/* _job_test - does most of the real work for select_p_job_test(), which 
- *	pretty much just handles load-leveling and max_share logic */
-static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-			uint32_t min_nodes, uint32_t max_nodes, 
-			uint32_t req_nodes, int mode, 
-			enum node_cr_state job_node_req,
-			struct node_cr_record *select_node_ptr)
-{
-	int a, f, i, j, k, error_code, ll; /* ll = layout array index */
-	struct multi_core_data *mc_ptr = NULL;
-	static struct select_cr_job *job;
-	uint16_t * layout_ptr = NULL;
-	int  array_size;
-	int *busy_rows, *sh_tasks, *al_tasks, *freq;
-	bitstr_t *origmap, *reqmap = NULL;
-	int row, rows, try;
-	bool test_only;
-	uint32_t save_mem = 0;
-
-	layout_ptr = job_ptr->details->req_node_layout;
-	mc_ptr = job_ptr->details->mc_ptr;
-	reqmap = job_ptr->details->req_node_bitmap;
-
-	/* check node_state and update bitmap as necessary */
-	if (mode == SELECT_MODE_TEST_ONLY) {
-		test_only = true;
-		save_mem = job_ptr->details->job_min_memory;
-		job_ptr->details->job_min_memory = 0;
-	} else	/* SELECT_MODE_RUN_NOW || SELECT_MODE_WILL_RUN  */ 
-		test_only = false;
-
-	if (!test_only) {
-		error_code = _verify_node_state(select_node_ptr, job_ptr, 
-						bitmap, job_node_req);
-		if (error_code != SLURM_SUCCESS)
-			return error_code;
-	}
-
-	/* This is the case if -O/--overcommit  is true */ 
-	if (job_ptr->num_procs == job_ptr->details->min_nodes) {
-		job_ptr->num_procs *= MAX(1, mc_ptr->min_threads);
-		job_ptr->num_procs *= MAX(1, mc_ptr->min_cores);
-		job_ptr->num_procs *= MAX(1, mc_ptr->min_sockets);
-	}
-
-	/* compute condensed arrays of node allocation data */
-	array_size = _load_arrays(select_node_ptr, job_ptr, bitmap, &busy_rows,
-				  &sh_tasks, &al_tasks, &freq, test_only, 
-				  job_node_req);
-
-	if (test_only) {
-        	/* try with all nodes and all possible cpus */
-		error_code = _select_nodes(job_ptr, bitmap, min_nodes,
-					   max_nodes, req_nodes, al_tasks, freq,
-					   array_size);
-		xfree(busy_rows);
-		xfree(sh_tasks);
-		xfree(al_tasks);
-		xfree(freq);
-		if (save_mem)
-			job_ptr->details->job_min_memory = save_mem;
-		return error_code;
-	}
-
-	origmap = bit_copy(bitmap);
-	if (origmap == NULL)
-		fatal("bit_copy malloc failure");
-
-	error_code = SLURM_ERROR;
-	rows = job_ptr->part_ptr->max_share & ~SHARED_FORCE;
-	rows = MAX(1, rows);	/* max_share == 0 for EXCLUSIVE */
-	for (row = 1; row <= rows; row++) {
-
-		/*
-		 * first try : try "as is"
-		 * second try: only add a row to nodes with no free cpus
-		 * third try : add a row to nodes with some alloc cpus
-		 */
-		for (try = 0; try < 3; try++) {
-			bit_or(bitmap, origmap);
-
-			debug3("cons_res: cur row = %d, try = %d", row, try);
-
-			for (i = 0, f = 0; i < array_size; i++) {
-
-				/* Step 1:
-				 * remove nodes from bitmap (unless required)
-				 * who's busy_rows value is bigger than 'row'.
-				 * Why? to enforce "least-loaded" over
-				 *      "contiguous" */
-				if ((busy_rows[i] > row) ||
-				    (busy_rows[i] == row && sh_tasks[i] == 0)) {
-					for (j = f; j < f+freq[i]; j++) {
-						if (reqmap &&
-						    bit_test(reqmap, j))
-							continue;
-						bit_clear(bitmap, j);
-					}
-				}
-				f += freq[i];
-
-				if (try == 0)
-					continue;
-				/* Step 2:
-				 * set sh_tasks = al_tasks for nodes who's
-				 *      busy_rows value is < 'row'.
-				 * Why? to select a new row for these
-				 *      nodes when appropriate */
-				if ((busy_rows[i] == -1) || 
-				    (busy_rows[i] >= row))
-					continue;
-				if (sh_tasks[i] == al_tasks[i])
-					continue;
-				if ((try == 1) && (sh_tasks[i] != 0))
-					continue;
-				sh_tasks[i] = al_tasks[i];
-			}
-			if (bit_set_count(bitmap) < min_nodes)
-				break;
-
-#if (CR_DEBUG)
-			for (i = 0; i < array_size; i++) {
-				info("cons_res: try:%d i:%d busy_rows:%d "
-				     "sh_tasks:%d al_tasks:%d freq:%d",
-				     try, i, busy_rows[i], sh_tasks[i],
-				     al_tasks[i], freq[i]);
-			}
-#endif
-
-			if (row > 1) {
-				/* We need to share resources. 
-				 * Try to find suitable job to share nodes with. */
-
-				/* FIXME: To be added. There is some simple logic 
-				 * to do this in select/linear.c:_find_job_mate(), 
-				 * but the data structures here are very different */
-			}
-
-			error_code = _select_nodes(job_ptr, bitmap, min_nodes,
-						   max_nodes, req_nodes,
-						   sh_tasks, freq, array_size);
-			if (error_code == SLURM_SUCCESS)
-				break;
-		}
-		if (error_code == SLURM_SUCCESS)
-			break;
-	}
-
-	bit_free(origmap);
-
-	if ((mode != SELECT_MODE_WILL_RUN) && (job_ptr->part_ptr == NULL))
-		error_code = EINVAL;
-	if ((error_code == SLURM_SUCCESS) && (mode == SELECT_MODE_WILL_RUN)) {
-		if (job_ptr->details->shared == 0) {
-			uint16_t procs;
-			job_ptr->total_procs = 0;
-			for (i = 0; i < select_node_cnt; i++) {
-				if (!bit_test(bitmap, i))
-					continue;
-				procs = select_node_ptr[i].cpus;
-				job_ptr->total_procs += procs;
-			}
-		} else {
-			job_ptr->total_procs = job_ptr->num_procs;
-			if (job_ptr->details->cpus_per_task &&
-			    (job_ptr->details->cpus_per_task != 
-			     (uint16_t) NO_VAL)) {
-				job_ptr->total_procs *= job_ptr->details->
-							cpus_per_task;
-			}
-		}
-	}
-	if ((error_code != SLURM_SUCCESS) || (mode != SELECT_MODE_RUN_NOW)) {
-		xfree(busy_rows);
-		xfree(sh_tasks);
-		xfree(al_tasks);
-		xfree(freq);
-		return error_code;
-	}
-	
-	/* allocate the job and distribute the tasks appropriately */
-	job = xmalloc(sizeof(struct select_cr_job));
-	job->job_ptr = job_ptr;
-	job->job_id = job_ptr->job_id;
-	job->nhosts = bit_set_count(bitmap);
-	job->nprocs = MAX(job_ptr->num_procs, job->nhosts);
-	job->node_req  = job_node_req;
-
-	job->node_bitmap = bit_copy(bitmap);
-	if (job->node_bitmap == NULL)
-		fatal("bit_copy malloc failure");
-
-	job->cpus          = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t));
-	job->alloc_cpus    = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t));
-	job->node_offset   = (uint16_t *) xmalloc(job->nhosts * sizeof(uint16_t));
-	job->alloc_memory  = (uint32_t *) xmalloc(job->nhosts * sizeof(uint32_t));
-
-	if ((cr_type == CR_CORE)   || (cr_type == CR_CORE_MEMORY) ||
-	    (cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) {
-		job->num_sockets   = (uint16_t *)  xmalloc(job->nhosts * 
-							   sizeof(uint16_t));
-		job->alloc_cores   = (uint16_t **) xmalloc(job->nhosts * 
-							   sizeof(uint16_t *));
-		j = 0;
-		for (i = 0; i < select_node_cnt; i++) {
-			if (!bit_test(job->node_bitmap, i))
-				continue;
-			job->num_sockets[j] = select_node_ptr[i].sockets;
-			job->alloc_cores[j] = (uint16_t *) xmalloc(
-				job->num_sockets[j] * sizeof(uint16_t));
-			j++;
-		}
-	}
-
-	j = 0;
-	a = 0;
-	f = 0;
-	row = 0; /* total up all available cpus for --overcommit scenarios */
-	for (i = 0, ll = -1; i < node_record_count; i++, f++) {
-		if (f >= freq[a]) {
-			f = 0;
-			a++;
-		}
-		if (layout_ptr
-		    && bit_test(job_ptr->details->req_node_bitmap, i)) {
-			ll++;
-		}
-		if (bit_test(bitmap, i) == 0)
-			continue;
-		if (j >= job->nhosts) {
-			error("select_cons_res: job nhosts too small\n");
-			break;
-		}
-		job->cpus[j] = sh_tasks[a];
-		row += sh_tasks[a];
-		if (layout_ptr
-		    && bit_test(job_ptr->details->req_node_bitmap, i)) {
-			job->cpus[j] = MIN(job->cpus[j], layout_ptr[ll]);
-		} else if (layout_ptr) {
-			job->cpus[j] = 0;
-		}
-		job->alloc_cpus[j] = 0;
-		if ((cr_type == CR_CORE) || (cr_type == CR_CORE_MEMORY)||
-		    (cr_type == CR_SOCKET) || (cr_type == CR_SOCKET_MEMORY)) {
-			_chk_resize_job(job, j, job->num_sockets[j]);
-			for (k = 0; k < job->num_sockets[j]; k++)
-				job->alloc_cores[j][k] = 0;
-		}
-		j++;
-	}
-
-	xfree(busy_rows);
-	xfree(sh_tasks);
-	xfree(al_tasks);
-	xfree(freq);
-
-	/* When 'srun --overcommit' is used, nprocs is set to a minimum value
-	 * in order to allocate the appropriate number of nodes based on the
-	 * job request.
-	 * For cons_res, all available logical processors will be allocated on
-	 * each allocated node in order to accommodate the overcommit request.
-	 */
-	if (job_ptr->details->overcommit)
-		job->nprocs = MIN(row, job_ptr->details->num_tasks);
-
-	if (job_ptr->details->shared == 0) {
-		/* Nodes need to be allocated in dedicated
-		   mode. User has specified the --exclusive switch */
-		error_code = cr_exclusive_dist(job, cr_type);
-	} else {
-		/* Determine the number of logical processors
-		 * per node needed for this job.
-		 * Make sure below matches the layouts in
-		 * lllp_distribution in
-		 * plugins/task/affinity/dist_task.c */
-		switch(job_ptr->details->task_dist) {
-		case SLURM_DIST_BLOCK_BLOCK:
-		case SLURM_DIST_CYCLIC_BLOCK:
-			error_code = cr_dist(job, 0, cr_type); 
-			break;
-		case SLURM_DIST_ARBITRARY:
-		case SLURM_DIST_BLOCK:
-		case SLURM_DIST_CYCLIC:				
-		case SLURM_DIST_BLOCK_CYCLIC:
-		case SLURM_DIST_CYCLIC_CYCLIC:
-		case SLURM_DIST_UNKNOWN:
-			error_code = cr_dist(job, 1, cr_type); 
-			break;
-		case SLURM_DIST_PLANE:
-			error_code = cr_plane_dist(job, mc_ptr->plane_size, cr_type); 
-			break;
-		default:
-			error("select/cons_res: invalid dist_type");
-			error_code = SLURM_ERROR;
-			break;
-		}
-	}
-	if (error_code != SLURM_SUCCESS) {
-		_xfree_select_cr_job(job);
-		return error_code;
-	}
-
-	if (job_ptr->details->job_min_memory &&
-	    ((cr_type == CR_CORE_MEMORY) || (cr_type == CR_CPU_MEMORY) ||
-	     (cr_type == CR_MEMORY) || (cr_type == CR_SOCKET_MEMORY))) {
-		j = 0;
-		for (i = 0; i < node_record_count; i++) {
-			if (bit_test(bitmap, i) == 0)
-				continue;
-			if (job_ptr->details->job_min_memory & MEM_PER_CPU) {
-				job->alloc_memory[j] = job_ptr->details->
-						       job_min_memory &
-						       (~MEM_PER_CPU);
-				job->alloc_memory[j] *= job->alloc_cpus[j];
-			} else {
-				job->alloc_memory[j] = job_ptr->details->
-						       job_min_memory;
-			}
-			j++;
-		}
-	}
-
-	_append_to_job_list(job);
-	last_cr_update_time = time(NULL);
-
-	return error_code;
-}
-
 extern int select_p_job_begin(struct job_record *job_ptr)
 {
 	return SLURM_SUCCESS;
@@ -2723,40 +1379,10 @@ extern int select_p_job_ready(struct job_record *job_ptr)
 
 extern int select_p_job_fini(struct job_record *job_ptr)
 {
-	struct select_cr_job *job = NULL;
-	ListIterator iterator;
-
 	xassert(job_ptr);
 	xassert(job_ptr->magic == JOB_MAGIC);
-
-	if (list_count(select_cr_job_list) == 0)
-		return SLURM_SUCCESS;
-
-	iterator = list_iterator_create(select_cr_job_list);
-	while ((job = (struct select_cr_job *) list_next(iterator))) {
-		if (job->job_id == job_ptr->job_id)
-			break;
-	}
-	if (!job) {
-		error("select_p_job_fini: could not find data for job %d",
-			job_ptr->job_id);
-		list_iterator_destroy(iterator);
-		return SLURM_ERROR;
-	}
 	
-	_rm_job_from_nodes(select_node_ptr, job, "select_p_job_fini", 1);
-
-	slurm_mutex_lock(&cr_mutex);
-	list_remove(iterator);
-	slurm_mutex_unlock(&cr_mutex);
-	_xfree_select_cr_job(job);
-	list_iterator_destroy(iterator);
-
-	debug3("cons_res: select_p_job_fini Job_id %u: list_count: %d",
-		job_ptr->job_id, list_count(select_cr_job_list));
-
-	_verify_select_job_list(job_ptr->job_id);
-	last_cr_update_time = time(NULL);
+	_rm_job_from_res(select_part_record, select_node_usage, job_ptr, 0);
 
 	return SLURM_SUCCESS;
 }
@@ -2766,61 +1392,20 @@ extern int select_p_job_fini(struct job_record *job_ptr)
  * This sum is compared with the partition's Shared parameter */
 extern int select_p_job_suspend(struct job_record *job_ptr)
 {
-	struct select_cr_job *job;
-	int rc;
- 
 	xassert(job_ptr);
-	xassert(select_cr_job_list);
 
-	job = list_find_first(select_cr_job_list, _find_job_by_id,
-			      &job_ptr->job_id);
-	if (!job)
-		return ESLURM_INVALID_JOB_ID;
-
-	rc = _rm_job_from_nodes(select_node_ptr, job, 
-				"select_p_job_suspend", 0);
-	return SLURM_SUCCESS;
+	return _rm_job_from_res(select_part_record, select_node_usage,
+				job_ptr, 2);
 }
 
 /* See NOTE with select_p_job_suspend above */
 extern int select_p_job_resume(struct job_record *job_ptr)
 {
-	struct select_cr_job *job;
-	int rc;
-
 	xassert(job_ptr);
-	xassert(select_cr_job_list);
-
-	job = list_find_first(select_cr_job_list, _find_job_by_id,
-			      &job_ptr->job_id);
-	if (!job)
-		return ESLURM_INVALID_JOB_ID;
 	
-	rc = _add_job_to_nodes(job, "select_p_job_resume", 0);
-	return SLURM_SUCCESS;
+	return _add_job_to_res(job_ptr, 2);
 }
 
-extern uint16_t select_p_get_job_cores(uint32_t job_id, int alloc_index, int s)
-{
-	struct select_cr_job *job = list_find_first(select_cr_job_list,
-						    _find_job_by_id, &job_id);
-	if (!job || alloc_index >= job->nhosts)
-		return 0;
-	if (cr_type == CR_CORE   || cr_type == CR_CORE_MEMORY ||
-	    cr_type == CR_SOCKET || cr_type == CR_SOCKET_MEMORY) {
-		if (job->num_sockets == NULL || job->alloc_cores == NULL)
-			return 0;
-		if (s >= job->num_sockets[alloc_index]) 
-			return 0;
-
-		return job->alloc_cores[alloc_index][s];
-	}
-	/* else return the total cpu count for the given node */
-	if (job->alloc_cpus == NULL)
-		return 0;
-
-	return job->alloc_cpus[alloc_index];
-}
 
 extern int select_p_pack_node_info(time_t last_query_time,
 				   Buf * buffer_ptr)
@@ -2829,100 +1414,46 @@ extern int select_p_pack_node_info(time_t last_query_time,
 	return SLURM_ERROR;
 }
 
-extern int select_p_get_extra_jobinfo(struct node_record *node_ptr,
-				      struct job_record *job_ptr,
-				      enum select_data_info cr_info,
-				      void *data)
-{
-	int rc = SLURM_SUCCESS, i, index, node_offset, node_inx;
-	struct select_cr_job *job;
-	struct node_cr_record *this_cr_node;
-	uint16_t *tmp_16 = (uint16_t *) data;
-
-	xassert(job_ptr);
-	xassert(job_ptr->magic == JOB_MAGIC);
-	xassert(node_ptr);
-
-	switch (cr_info) {
-	case SELECT_AVAIL_CPUS:
-		*tmp_16 = 0;
-		job = list_find_first(select_cr_job_list, _find_job_by_id, 
-				      &job_ptr->job_id);
-		if (job == NULL) {
-			error("cons_res: job %u not active", job_ptr->job_id);
-			break;
-		}
-
-		node_offset = -1;
-		node_inx = node_ptr - node_record_table_ptr;
-		for (i = 0; i < node_record_count; i++) {
-			if (bit_test(job->node_bitmap, i) == 0)
-				continue;
-			node_offset++;
-			if (i != node_inx)
-				continue;
-			/* Usable and "allocated" resources for this 
-			 * given job for a specific node --> based 
-			 * on the output from _cr_dist */
-			switch(cr_type) {
-			case CR_MEMORY:
-				index = node_ptr - node_record_table_ptr;
-				this_cr_node = select_node_ptr + index;
-				*tmp_16 = this_cr_node->cpus;
-				break;
-			case CR_SOCKET:
-			case CR_SOCKET_MEMORY:
-			case CR_CORE: 
-			case CR_CORE_MEMORY: 
-			case CR_CPU:
-			case CR_CPU_MEMORY:
-			default:
-				*tmp_16 = job->alloc_cpus[node_offset];
-				break;
-			}
-			break;
-		}
-		if (i >= node_record_count) {
-			error("cons_res could not find %s", node_ptr->name); 
-			rc = SLURM_ERROR;
-		}
-		break;
-	default:
-		error("select_g_get_extra_jobinfo cr_info %d invalid", cr_info);
-		rc = SLURM_ERROR;
-		break;
-	}
-	
-	return rc;
-}
 
 extern int select_p_get_select_nodeinfo(struct node_record *node_ptr,
 					enum select_data_info dinfo,
 					void *data)
 {
-	int index, i, j, rc = SLURM_SUCCESS;
-	struct node_cr_record *this_cr_node;
-	struct part_cr_record *p_ptr;
-	uint16_t *tmp_16;
+	uint32_t n, i, c, start, end;
+	struct part_res_record *p_ptr;
+	uint16_t tmp, *tmp_16;
 
 	xassert(node_ptr);
 
 	switch (dinfo) {
-	case SELECT_ALLOC_CPUS: 
+	case SELECT_ALLOC_CPUS:
 		tmp_16 = (uint16_t *) data;
 		*tmp_16 = 0;
-		index = node_ptr - node_record_table_ptr;
-	        this_cr_node = select_node_ptr + index;
 
 		/* determine the highest number of allocated cores from */
 		/* all rows of all partitions */
-		for (p_ptr = this_cr_node->parts; p_ptr; p_ptr = p_ptr->next) {
-			i = 0;
-			for (j = 0; j < p_ptr->num_rows; j++) {
-				uint16_t tmp = 0;
-				for (; i < this_cr_node->sockets; i++)
-					tmp += p_ptr->alloc_cores[i] *
-					       this_cr_node->threads;
+		for (n = 0; n < node_record_count; n++) {
+			if (&(node_record_table_ptr[n]) == node_ptr)
+				break;
+		}
+		if (n >= node_record_count) {
+			/* did not find the node */
+			return SLURM_ERROR;
+		}
+		start = cr_get_coremap_offset(n);
+		end = cr_get_coremap_offset(n+1);
+		for (p_ptr = select_part_record; p_ptr; p_ptr = p_ptr->next) {
+			if (!p_ptr->row)
+				continue;
+			for (i = 0; i < p_ptr->num_rows; i++) {
+				if (!p_ptr->row[i].row_bitmap)
+					continue;
+				tmp = 0;
+				for (c = start; c < end; c++) {
+					if (bit_test(p_ptr->row[i].row_bitmap,
+						     c))
+						tmp++;
+				}
 				if (tmp > *tmp_16)
 					*tmp_16 = tmp;
 			}
@@ -2930,31 +1461,23 @@ extern int select_p_get_select_nodeinfo(struct node_record *node_ptr,
 		break;
 	default:
 		error("select_g_get_select_nodeinfo info %d invalid", dinfo);
-		rc = SLURM_ERROR;
+		return SLURM_ERROR;
 		break;
 	}
-	return rc;
+	return SLURM_SUCCESS;
 }
 
+
 extern int select_p_update_nodeinfo(struct job_record *job_ptr)
 {
-	int rc = SLURM_SUCCESS;
-	struct select_cr_job *job;
-
 	xassert(job_ptr);
 	xassert(job_ptr->magic == JOB_MAGIC);
 
 	if ((job_ptr->job_state != JOB_RUNNING)
 	&&  (job_ptr->job_state != JOB_SUSPENDED))
 		return SLURM_SUCCESS;
-
-	job = list_find_first(select_cr_job_list, _find_job_by_id,
-			      &job_ptr->job_id);
-	if (!job)
-		return SLURM_SUCCESS;
 	
-	rc = _add_job_to_nodes(job, "select_p_update_nodeinfo", 0);
-	return rc;
+	return _add_job_to_res(job_ptr, 0);
 }
 
 extern int select_p_update_block (update_part_msg_t *part_desc_ptr)
@@ -2967,8 +1490,119 @@ extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
 	return SLURM_SUCCESS;
 }
 
+
+/* Helper function for _synchronize_bitmap().  Check
+ * if the given node has at least one available CPU */
+static uint16_t _is_node_avail(struct part_res_record *p_ptr, uint32_t node_i)
+{
+	uint32_t i, r, cpu_begin, cpu_end;
+
+	cpu_begin = cr_get_coremap_offset(node_i);
+	cpu_end   = cr_get_coremap_offset(node_i+1);
+	
+	if (select_node_usage[node_i].node_state >= NODE_CR_RESERVED) {
+		if (!cr_priority_selection_enabled())
+			return (uint16_t) 0;
+		/* cr_priority_selection has been enabled:
+		 * check to see if the existing job that reserved
+		 * this node is in a partition with a priority that
+		 * is equal-to or greater-than this partition. If it
+		 * is, then this node is NOT available. Otherwise
+		 * this node is available.
+		 */
+		struct part_res_record *s_ptr;
+		for (s_ptr = select_part_record; s_ptr; s_ptr = s_ptr->next) {
+			if (s_ptr->priority < p_ptr->priority)
+				continue;
+			if (!s_ptr->row || !s_ptr->row[0].row_bitmap)
+				continue;
+			for (i = cpu_begin; i < cpu_end; i++) {
+				if (bit_test(s_ptr->row[0].row_bitmap, i))
+					return (uint16_t) 0;
+			}
+		}
+		return (uint16_t) 1;
+	}
+	if (select_node_usage[node_i].node_state >= NODE_CR_ONE_ROW) {
+		/* An existing job has requested that it's CPUs
+		 * NOT be shared, but any other CPUs on the same
+		 * node can be used by other jobs with the same
+		 * CPU restriction.
+		 * Check whether or not there are free CPUs on this
+		 * node in the given partition.
+		 */
+		if (!p_ptr->row || !p_ptr->row[0].row_bitmap)
+			return (uint16_t) 1;
+		for (i = cpu_begin; i < cpu_end; i++) {
+			if (!bit_test(p_ptr->row[0].row_bitmap, i))
+				return (uint16_t) 1;
+		}
+	} else {
+		/* check the core_bitmap in all rows */
+		if (!p_ptr->row)
+			return (uint16_t) 1;
+		for (r = 0; r < p_ptr->num_rows; r++) {
+			if (!p_ptr->row[r].row_bitmap)
+				return (uint16_t) 1;
+			for (i = cpu_begin; i < cpu_end; i++) {
+				if (!bit_test(p_ptr->row[r].row_bitmap, i))
+					return (uint16_t) 1;
+			}
+		}
+	}
+	return (uint16_t) 0;
+}
+
+
+/* Worker function for select_p_get_info_from_plugin() */
+static int _synchronize_bitmaps(struct job_record *job_ptr,
+				bitstr_t ** partially_idle_bitmap)
+{
+	int size, i, idlecpus = bit_set_count(avail_node_bitmap);
+	struct part_res_record *p_ptr;
+	size = bit_size(avail_node_bitmap);
+	bitstr_t *bitmap = bit_alloc(size);
+	if (bitmap == NULL)
+		return SLURM_ERROR;
+
+	debug3("cons_res: synch_bm: avail %d of %d set, idle %d of %d set",
+	       idlecpus, size, bit_set_count(idle_node_bitmap), size);
+
+	if (!job_ptr)
+		fatal("cons_res: error: don't know what job I'm sync'ing");
+		
+	for (p_ptr = select_part_record; p_ptr; p_ptr = p_ptr->next) {
+		if (job_ptr && strcmp(p_ptr->name,job_ptr->part_ptr->name) == 0)
+			break;
+	}
+
+	for (i = 0; i < select_node_cnt; i++) {
+		if (bit_test(avail_node_bitmap, i) == 0)
+			continue;
+
+		if (bit_test(idle_node_bitmap, i) == 1) {
+			bit_set(bitmap, i);
+			continue;
+		}
+		
+		if(!p_ptr || _is_node_avail(p_ptr, i))
+			bit_set(bitmap, i);
+	}
+	idlecpus = bit_set_count(bitmap);
+	if (p_ptr)
+		debug3("cons_res: found %d partially idle nodes in part %s",
+			idlecpus, p_ptr->name);
+	else
+		debug3("cons_res: found %d partially idle nodes",
+			idlecpus);
+
+	*partially_idle_bitmap = bitmap;
+	return SLURM_SUCCESS;
+}
+
+
 extern int select_p_get_info_from_plugin(enum select_data_info info,
-					 void *data)
+					 struct job_record *job_ptr, void *data)
 {
 	int rc = SLURM_SUCCESS;
 
@@ -2978,7 +1612,7 @@ extern int select_p_get_info_from_plugin(enum select_data_info info,
 		bitstr_t **bitmap = (bitstr_t **) data;
 		bitstr_t *tmp_bitmap = NULL;
 		
-		rc = _synchronize_bitmaps(&tmp_bitmap);
+		rc = _synchronize_bitmaps(job_ptr, &tmp_bitmap);
 		if (rc != SLURM_SUCCESS) {
 			FREE_NULL_BITMAP(tmp_bitmap);
 			return rc;
@@ -3017,203 +1651,33 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data)
 extern int select_p_reconfigure(void)
 {
 	ListIterator job_iterator;
-	struct select_cr_job *job;
 	struct job_record *job_ptr;
-	int rc, suspend;
+	int rc = SLURM_SUCCESS;
 
 	info("cons_res: select_p_reconfigure");
-	select_fast_schedule = slurm_get_fast_schedule();
 
-	/* Refresh the select_node_ptr global array in case nodes
-	 * have been added or removed. This procedure will clear all
-	 * partition information and all allocated resource usage.
-	 */
+	/* Rebuild the global data structures */
 	rc = select_p_node_init(node_record_table_ptr, node_record_count);
-
-	/* reload all of the allocated resource usage from job data */
-	if (select_cr_job_list == NULL)
-	    	return SLURM_SUCCESS;
-
-	slurm_mutex_lock(&cr_mutex);
-	job_iterator = list_iterator_create(select_cr_job_list);
-	while ((job = (struct select_cr_job *) list_next(job_iterator))) {
-		suspend = 0;
-		job_ptr = find_job_record(job->job_id);
-		if ((job_ptr == NULL) ||
-		    (job_ptr->part_ptr == NULL) ||
-		    ((job_ptr->job_state != JOB_RUNNING) &&
-		     (job_ptr->job_state != JOB_SUSPENDED))) {
-			list_remove(job_iterator);
-			error("cons_res: select_p_reconfigure: removing "
-				"nonexistent/invalid job %u", job->job_id);
-			_xfree_select_cr_job(job);
-			continue;
-		}
-
-		if (job_ptr->job_state == JOB_SUSPENDED)
-			suspend = 1;
-		if ((job->state & CR_JOB_ALLOCATED_MEM) ||
-		    (job->state & CR_JOB_ALLOCATED_CPUS)) {
-			job->state = 0;
-			_add_job_to_nodes(job, "select_p_reconfigure", suspend);
-			/* ignore any errors. partition and/or node config 
-			 * may have changed while jobs remain running */
+	if (rc != SLURM_SUCCESS)
+		return rc;
+	
+	/* reload job data */
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (job_ptr->job_state == JOB_RUNNING) {
+			/* add the job */
+			_add_job_to_res(job_ptr, 0);
+		} else if (job_ptr->job_state == JOB_SUSPENDED) {
+			/* add the job in a suspended state */
+			_add_job_to_res(job_ptr, 2);
 		}
 	}
 	list_iterator_destroy(job_iterator);
-	slurm_mutex_unlock(&cr_mutex);
-	last_cr_update_time = time(NULL);
-
-	return SLURM_SUCCESS;
-}
-
-extern struct multi_core_data * create_default_mc(void)
-{
-	struct multi_core_data *mc_ptr;
-	mc_ptr = xmalloc(sizeof(struct multi_core_data));
-	mc_ptr->min_sockets = 1;
-	mc_ptr->max_sockets = 0xffff;
-	mc_ptr->min_cores   = 1;
-	mc_ptr->max_cores   = 0xffff;
-	mc_ptr->min_threads = 1;
-	mc_ptr->max_threads = 0xffff;
-/*	mc_ptr is initialized to zero by xmalloc*/
-/*	mc_ptr->ntasks_per_socket = 0; */
-/*	mc_ptr->ntasks_per_core   = 0; */
-/*	mc_ptr->plane_size        = 0; */
-	return mc_ptr;
-}
-
-extern int select_p_step_begin(struct step_record *step_ptr)
-{
-	slurm_step_layout_t *step_layout = step_ptr->step_layout;
-	struct select_cr_job *job;
-	struct node_cr_record *this_node;
-	int job_node_inx, step_node_inx, host_index;
-	uint32_t avail_mem, step_mem;
- 
-	xassert(select_cr_job_list);
-	xassert(step_ptr->job_ptr);
-	xassert(step_ptr->job_ptr->details);
-	xassert(step_ptr->step_node_bitmap);
-
-	if (step_layout == NULL)
-		return SLURM_SUCCESS;	/* batch script */
-	if (step_ptr->job_ptr->details->job_min_memory)
-		return SLURM_SUCCESS;
-	if ((cr_type != CR_CORE_MEMORY) && (cr_type != CR_CPU_MEMORY) &&
-	    (cr_type != CR_MEMORY) && (cr_type != CR_SOCKET_MEMORY))
-		return SLURM_SUCCESS;
-
-	job = list_find_first(select_cr_job_list, _find_job_by_id,
-			      &step_ptr->job_ptr->job_id);
-	if (!job) {
-		error("select_p_step_begin: could not find step %u.%u",
-		      step_ptr->job_ptr->job_id, step_ptr->step_id);
-		return ESLURM_INVALID_JOB_ID;
-	}
-
-	/* test if there is sufficient memory */
-	step_node_inx = -1;
-	for (host_index = 0; host_index < select_node_cnt; host_index++) {
-		if (bit_test(step_ptr->step_node_bitmap, host_index) == 0)
-			continue;
-		step_node_inx++;
-
-		this_node = &select_node_ptr[host_index];
-		step_mem = step_layout->tasks[step_node_inx] * 
-			   step_ptr->mem_per_task;
-		avail_mem = select_node_ptr[host_index].real_memory;
-		if ((this_node->alloc_memory + step_mem) > avail_mem)
-			return SLURM_ERROR;	/* no room */
-	}
-
-	/* reserve the memory */
-	job_node_inx = -1;
-	step_node_inx = -1;
-	for (host_index = 0; host_index < select_node_cnt; host_index++) {
-		if (bit_test(job->node_bitmap, host_index) == 0)
-			continue;
-		job_node_inx++;
-		if (bit_test(step_ptr->step_node_bitmap, host_index) == 0)
-			continue;
-		step_node_inx++;
 
-		this_node = &select_node_ptr[host_index];
-		step_mem = step_layout->tasks[step_node_inx] * 
-			   step_ptr->mem_per_task;
-		job->alloc_memory[job_node_inx] += step_mem;
-		this_node->alloc_memory += step_mem;
-	}
-	last_cr_update_time = time(NULL);
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_step_fini(struct step_record *step_ptr)
+extern List select_p_get_config(void)
 {
-	slurm_step_layout_t *step_layout = step_ptr->step_layout;
-	struct select_cr_job *job;
-	struct node_cr_record *this_node;
-	int job_node_inx, step_node_inx, host_index, rc = SLURM_SUCCESS;
-	uint32_t step_mem;
- 
-	xassert(select_cr_job_list);
-	xassert(step_ptr->job_ptr);
-	xassert(step_ptr->job_ptr->details);
-	xassert(step_ptr->step_node_bitmap);
-
-	if (step_layout == NULL)
-		return SLURM_SUCCESS;	/* batch script */
-	if (step_ptr->job_ptr->details->job_min_memory)
-		return SLURM_SUCCESS;
-	if ((cr_type != CR_CORE_MEMORY) && (cr_type != CR_CPU_MEMORY) &&
-	    (cr_type != CR_MEMORY) && (cr_type != CR_SOCKET_MEMORY))
-		return SLURM_SUCCESS;
-
-	job = list_find_first(select_cr_job_list, _find_job_by_id,
-			      &step_ptr->job_ptr->job_id);
-	if (!job) {
-		error("select_p_step_fini: could not find step %u.%u",
-		      step_ptr->job_ptr->job_id, step_ptr->step_id);
-		return ESLURM_INVALID_JOB_ID;
-	}
-
-	job_node_inx = -1;
-	step_node_inx = -1;
-	for (host_index = 0; host_index < select_node_cnt; host_index++) {
-		if (bit_test(job->node_bitmap, host_index) == 0)
-			continue;
-		job_node_inx++;
-		if (bit_test(step_ptr->step_node_bitmap, host_index) == 0)
-			continue;
-		step_node_inx++;
-
-		this_node = &select_node_ptr[host_index];
-		step_mem = step_layout->tasks[step_node_inx] * 
-			   step_ptr->mem_per_task;
-		if (job->alloc_memory[job_node_inx] >= step_mem)
-			job->alloc_memory[job_node_inx] -= step_mem;
-		else {
-			if (rc == SLURM_SUCCESS) {
-				error("select_p_step_fini: job alloc_memory "
-				      "underflow on %s",
-				      this_node->node_ptr->name);
-				rc = SLURM_ERROR;  
-			}
-			job->alloc_memory[host_index] = 0;
-		}
-		if (this_node->alloc_memory >= step_mem)
-			this_node->alloc_memory -= step_mem;
-		else {
-			if (rc == SLURM_SUCCESS) {
-				error("select_p_step_fini: node alloc_memory "
-				      "underflow on %s",
-				      this_node->node_ptr->name);
-				rc = SLURM_ERROR;  
-			}
-			this_node->alloc_memory = 0;
-		}
-	}
-	last_cr_update_time = time(NULL);
-	return rc;
+	return NULL;
 }
diff --git a/src/plugins/select/cons_res/select_cons_res.h b/src/plugins/select/cons_res/select_cons_res.h
index 63f50c6d63a7434ddc7393afec438465db34a979..3acfd3c16eb1cfbab88c110b6f005ff43fb459b9 100644
--- a/src/plugins/select/cons_res/select_cons_res.h
+++ b/src/plugins/select/cons_res/select_cons_res.h
@@ -5,10 +5,11 @@
  *****************************************************************************
  *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -45,6 +46,7 @@
 #include <slurm/slurm.h>
 #include <slurm/slurm_errno.h>
 
+#include "src/common/bitstring.h"
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/node_select.h"
@@ -58,107 +60,70 @@
 
 #include "src/slurmd/slurmd/slurmd.h"
 
-/* part_cr_record keeps track of the allocated cores of a node that
- * has been assigned to a partition. SLURM allows a node to be
- * assigned to more than one partition. One or more partitions
- * may be configured to share the cores with more than one job.
- */
-struct part_cr_record {
-	struct part_record *part_ptr;	/* ptr to slurmctld partition record */
-	uint16_t *alloc_cores;		/* core count per socket reserved by
-					 * already scheduled jobs */
-	uint16_t num_rows;		/* number of rows in alloc_cores. The
-					 * length of alloc_cores is
-					 * num_sockets * num_rows. */
-	struct part_cr_record *next;	/* ptr to next part_cr_record */
-};
-
 /*
- * node_cr_record.node_state assists with the unique state of each node.
+ * node_res_record.node_state assists with the unique state of each node.
+ * When a job is allocated, these flags provide protection for nodes in a
+ * Shared=NO or Shared=EXCLUSIVE partition from other jobs.
+ *
  * NOTES:
  * - If node is in use by Shared=NO part, some CPUs/memory may be available
- * - Caution with NODE_CR_AVAILABLE: a Sharing partition could be full!!
+ * - Caution with NODE_CR_AVAILABLE: a Sharing partition could be full.
+ *
+ * - these values are staggered so that they can be incremented as multiple
+ * jobs are allocated to each node. This is needed to be able to support
+ * preemption, which can override these protections.
  */
 enum node_cr_state {
-	NODE_CR_RESERVED, /* node is NOT available for use by any other jobs */
-	NODE_CR_ONE_ROW,  /* node is in use by Shared=NO part */
-	NODE_CR_AVAILABLE /* The node may be IDLE or IN USE by Sharing part(s)*/
+	NODE_CR_AVAILABLE = 0,  /* The node may be IDLE or IN USE (shared) */
+	NODE_CR_ONE_ROW = 1,    /* node is in use by Shared=NO part */
+	NODE_CR_RESERVED = 100, /* node is in use by Shared=EXCLUSIVE part */
 };
 
-/* node_cr_record keeps track of the resources within a node which 
- * have been reserved by already scheduled jobs. 
- *
- * NOTE: The locations of these entries are synchronized with the 
- * job records in slurmctld (entry X in both tables are the same).
- */
-struct node_cr_record {
+/* a partition's per-row CPU allocation data */
+struct part_row_data {
+	bitstr_t *row_bitmap;		/* contains all jobs for this row */
+	uint32_t num_jobs;		/* Number of jobs in this row */
+	struct select_job_res **job_list;/* List of jobs in this row */
+	uint32_t job_list_size;		/* Size of job_list array */
+};
+
+/* partition CPU allocation data */
+struct part_res_record {
+	char *name;			/* name of the partition */
+	uint16_t priority;		/* Partition priority */
+	uint16_t num_rows;		/* Number of row_bitmaps */
+	struct part_row_data *row;	/* array of rows containing jobs */
+	struct part_res_record *next;	/* Ptr to next part_res_record */
+};
+
+/* per-node resource data */
+struct node_res_record {
 	struct node_record *node_ptr;	/* ptr to the actual node */
 	uint16_t cpus;			/* count of processors configured */
 	uint16_t sockets;		/* count of sockets configured */
 	uint16_t cores;			/* count of cores configured */
-	uint16_t threads;		/* count of threads configured */
+	uint16_t vpus;			/* count of virtual cpus (hyperthreads)
+					 * configured per core */
 	uint32_t real_memory;		/* MB of real memory configured */
-	enum node_cr_state node_state;	/* see node_cr_state comments */
-	struct part_cr_record *parts;	/* ptr to singly-linked part_cr_record
-					 * list that contains alloc_core info */
-	uint32_t alloc_memory;		/* real memory reserved by already
-					 * scheduled jobs */
 };
-extern struct node_cr_record *select_node_ptr;
-extern uint16_t select_fast_schedule;
 
-/*** NOTE: If any changes are made here, the following data structure has
- ***       persistent state which is maintained by select_cons_res.c:
- ***		select_p_state_save
- ***		select_p_state_restore
- *** 
- *** as well as tracked by version control
- ***		select_cons_res.c:pstate_version
- *** which should be incremented if any changes are made.
- **/
-struct select_cr_job {
-	/* Information preserved across reboots */
-	uint32_t job_id;	/* job ID, default set by SLURM        */
-	enum node_cr_state node_req;    /* see node_cr_state comments */
-	uint32_t nprocs;	/* --nprocs=n,      -n n               */
-	uint32_t nhosts;	/* number of hosts allocated to job    */
-	uint16_t *cpus;		/* number of processors on each host,
-				 * if using Moab scheduler (sched/wiki2)
-				 * then this will be initialized to the
-				 * number of CPUs desired on the node	*/
-	uint16_t *alloc_cpus;	/* number of allocated threads/cpus on
-				 * each host */
-	uint16_t *num_sockets;	/* number of sockets in alloc_cores[node] */
-	uint16_t **alloc_cores;	/* number of allocated cores on each
-				 * host */
-	uint32_t *alloc_memory;	/* number of allocated MB of real
-				 * memory on each host */
-	uint16_t *node_offset;	/* the node_cr_record->alloc_cores row to
-				 * which this job was assigned */
-
-	/* Information re-established after reboot */
-	struct job_record *job_ptr;	/* pointer to slurmctld job record */
-	uint16_t state;		/* job state information               */
-	bitstr_t *node_bitmap;	/* bitmap of nodes allocated to job, 
-				 * NOTE: The node_bitmap in slurmctld's job
-				 * structure clears bits as on completion.
-				 * This bitmap is persistent through lifetime
-				 * of the job. */
+/* per-node resource usage record */
+struct node_use_record {
+	uint16_t node_state;		/* see node_cr_state comments */
+	uint32_t alloc_memory;		/* real memory reserved by already
+					 * scheduled jobs */
 };
 
-struct node_cr_record * find_cr_node_record (const char *name);
 
-/* Find a partition record based upon pointer to slurmctld record */
-struct part_cr_record *get_cr_part_ptr(struct node_cr_record *this_node,
-				      struct part_record *part_ptr);
+extern uint16_t select_fast_schedule;
 
-void get_resources_this_node(uint16_t *cpus, 
-			     uint16_t *sockets, 
-			     uint16_t *cores,
-			     uint16_t *threads, 
-			     struct node_cr_record *this_cr_node,
-			     uint32_t jobid);
+extern struct part_res_record *select_part_record;
+extern struct node_res_record *select_node_record;
+extern struct node_use_record *select_node_usage;
 
-extern struct multi_core_data * create_default_mc(void);
+extern void cr_sort_part_rows(struct part_res_record *p_ptr);
+extern uint32_t cr_get_coremap_offset(uint32_t node_index);
+extern uint32_t cr_get_node_num_cores(uint32_t node_index);
+extern bool cr_priority_selection_enabled();
 
 #endif /* !_CONS_RES_H */
diff --git a/src/plugins/select/linear/Makefile.in b/src/plugins/select/linear/Makefile.in
index c31c64baba7a315e7cae301585494b275c00c58d..09ae7ac33ea30a6ae40974c0a28a451f527d6521 100644
--- a/src/plugins/select/linear/Makefile.in
+++ b/src/plugins/select/linear/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index 7791bd222c6cb39b12af280a54cc68aa18eab289..2e6bcb7c867b164efb718aa76c42f69580c071f0 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -4,13 +4,14 @@
  *  of sets of consecutive nodes using a best-fit algorithm.
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -59,6 +60,7 @@
 #include "src/common/log.h"
 #include "src/common/node_select.h"
 #include "src/common/parse_time.h"
+#include "src/common/select_job_res.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_resource_info.h"
 #include "src/common/xassert.h"
@@ -74,17 +76,17 @@
 static int  _add_job_to_nodes(struct node_cr_record *node_cr_ptr,
 			      struct job_record *job_ptr, char *pre_err, 
 			      int suspended);
-static int  _add_step(struct step_record *step_ptr);
+static void _build_select_struct(struct job_record *job_ptr, bitstr_t *bitmap);
 static void _cr_job_list_del(void *x);
 static int  _cr_job_list_sort(void *x, void *y);
-static void _del_list_step(void *x);
 static void _dump_node_cr(struct node_cr_record *node_cr_ptr);
 static struct node_cr_record *_dup_node_cr(struct node_cr_record *node_cr_ptr);
 static int  _find_job_mate(struct job_record *job_ptr, bitstr_t *bitmap,
 			   uint32_t min_nodes, uint32_t max_nodes,
 			   uint32_t req_nodes);
-static int  _find_step(struct step_record *step_ptr);
 static void _free_node_cr(struct node_cr_record *node_cr_ptr);
+static uint16_t _get_avail_cpus(struct job_record *job_ptr, int index);
+static uint16_t _get_total_cpus(int index);
 static void _init_node_cr(void);
 static int _job_count_bitmap(struct node_cr_record *node_cr_ptr,
 			     struct job_record *job_ptr, 
@@ -93,7 +95,9 @@ static int _job_count_bitmap(struct node_cr_record *node_cr_ptr,
 static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		     uint32_t min_nodes, uint32_t max_nodes, 
 		     uint32_t req_nodes);
-static int _remove_step(struct step_record *step_ptr);
+static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
+			  uint32_t min_nodes, uint32_t max_nodes, 
+			  uint32_t req_nodes);
 static int _rm_job_from_nodes(struct node_cr_record *node_cr_ptr,
 			      struct job_record *job_ptr, char *pre_err, 
 			      int remove_all);
@@ -137,6 +141,8 @@ static struct node_record *select_node_ptr = NULL;
 static int select_node_cnt = 0;
 static uint16_t select_fast_schedule;
 static uint16_t cr_type;
+static bool cr_priority_test      = false;
+static bool cr_priority_selection = false;
 
 static struct node_cr_record *node_cr_ptr = NULL;
 static pthread_mutex_t cr_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -204,8 +210,7 @@ static int _init_status_pthread(void)
 
 	slurm_mutex_lock( &thread_flag_mutex );
 	if ( xcpu_thread ) {
-		debug2("XCPU thread already running, not starting "
-			"another");
+		debug2("XCPU thread already running, not starting another");
 		slurm_mutex_unlock( &thread_flag_mutex );
 		return SLURM_ERROR;
 	}
@@ -227,11 +232,11 @@ static int _fini_status_pthread(void)
 	if ( xcpu_thread ) {
 		agent_fini = 1;
 		for (i=0; i<4; i++) {
+			sleep(1);
 			if (pthread_kill(xcpu_thread, 0)) {
 				xcpu_thread = 0;
 				break;
 			}
-			sleep(1);
 		}
 		if ( xcpu_thread ) {
 			error("could not kill XCPU agent thread");
@@ -243,6 +248,19 @@ static int _fini_status_pthread(void)
 }
 #endif
 
+static inline bool _cr_priority_selection_enabled(void)
+{
+	if (!cr_priority_test) {
+		char *sched_type = slurm_get_sched_type();
+		if (strcmp(sched_type, "sched/gang") == 0)
+			cr_priority_selection = true;
+		xfree(sched_type);
+		cr_priority_test = true;
+	}
+	return cr_priority_selection;
+	
+}
+
 static bool _enough_nodes(int avail_nodes, int rem_nodes, 
 		uint32_t min_nodes, uint32_t req_nodes)
 {
@@ -417,6 +435,107 @@ static uint16_t _get_avail_cpus(struct job_record *job_ptr, int index)
 	return(avail_cpus);
 }
 
+/*
+ * _get_total_cpus - Get the total number of cpus on a node
+ *	Note that the value of cpus is the lowest-level logical 
+ *	processor (LLLP).
+ * IN index - index of node's configuration information in select_node_ptr
+ */
+static uint16_t _get_total_cpus(int index)
+{
+	struct node_record *node_ptr = &(select_node_ptr[index]);
+	if (select_fast_schedule)
+		return node_ptr->config_ptr->cpus;
+	else
+		return node_ptr->cpus;
+}
+
+/* Build the full select_job_res_t structure for a job based upon the nodes
+ *	allocated to it (the bitmap) and the job's memory requirement */
+static void _build_select_struct(struct job_record *job_ptr, bitstr_t *bitmap)
+{
+	int i, j, k;
+	int first_bit, last_bit;
+	uint32_t node_cpus, total_cpus = 0, node_cnt;
+	struct node_record *node_ptr;
+	uint32_t job_memory_cpu = 0, job_memory_node = 0;
+	bool memory_info = false;
+	select_job_res_t select_ptr;
+
+	if (job_ptr->details->job_min_memory  && (cr_type == CR_MEMORY)) {
+		if (job_ptr->details->job_min_memory & MEM_PER_CPU) {
+			job_memory_cpu = job_ptr->details->job_min_memory &
+					 (~MEM_PER_CPU);
+			memory_info = true;
+		} else {
+			job_memory_node = job_ptr->details->job_min_memory;
+			memory_info = true;
+		}
+	}
+
+	if (job_ptr->select_job) {
+		/* Due to job requeue */
+		free_select_job_res(&job_ptr->select_job);
+	}
+
+	node_cnt = bit_set_count(bitmap);
+	job_ptr->select_job = select_ptr = create_select_job_res();
+	select_ptr->cpu_array_reps = xmalloc(sizeof(uint32_t) * node_cnt);
+	select_ptr->cpu_array_value = xmalloc(sizeof(uint16_t) * node_cnt);
+	select_ptr->cpus = xmalloc(sizeof(uint16_t) * node_cnt);
+	select_ptr->cpus_used = xmalloc(sizeof(uint16_t) * node_cnt);
+	select_ptr->memory_allocated = xmalloc(sizeof(uint32_t) * node_cnt);
+	select_ptr->memory_used = xmalloc(sizeof(uint32_t) * node_cnt);
+	select_ptr->nhosts = node_cnt;
+	select_ptr->node_bitmap = bit_copy(bitmap);
+	if (select_ptr->node_bitmap == NULL)
+		fatal("bit_copy malloc failure");
+	select_ptr->nprocs = job_ptr->total_procs;
+	if (build_select_job_res(select_ptr, (void *)select_node_ptr,
+				 select_fast_schedule))
+		error("_build_select_struct: build_select_job_res: %m");
+
+	first_bit = bit_ffs(bitmap);
+	last_bit  = bit_fls(bitmap);
+	for (i=first_bit, j=0, k=-1; ((i<=last_bit) && (first_bit>=0)); i++) {
+		if (!bit_test(bitmap, i))
+			continue;
+		node_ptr = &(select_node_ptr[i]);
+		if (select_fast_schedule)
+			node_cpus = node_ptr->config_ptr->cpus;
+		else
+			node_cpus = node_ptr->cpus;
+		select_ptr->cpus[j] = node_cpus;
+		if ((k == -1) || 
+		    (select_ptr->cpu_array_value[k] != node_cpus)) {
+			select_ptr->cpu_array_cnt++;
+			select_ptr->cpu_array_reps[++k] = 1;
+			select_ptr->cpu_array_value[k] = node_cpus;
+		} else
+			select_ptr->cpu_array_reps[k]++;
+		total_cpus += node_cpus;
+
+		if (!memory_info)
+			;
+		else if (job_memory_node)
+			select_ptr->memory_allocated[j] = job_memory_node;
+		else if (job_memory_cpu) {
+			select_ptr->memory_allocated[j] = 
+					job_memory_cpu * node_cpus;
+		}
+
+		if (set_select_job_res_node(select_ptr, j)) {
+			error("_build_select_struct: set_select_job_res_node: "
+			      "%m");
+		}
+		j++;
+	}
+	if (select_ptr->nprocs != total_cpus) {
+		error("_build_select_struct: nprocs mismatch %u != %u",
+		      select_ptr->nprocs, total_cpus);
+	}
+}
+
 /*
  * select_p_job_test - Given a specification of scheduling requirements, 
  *	identify the nodes which "best" satisfy the request.
@@ -429,7 +548,7 @@ static uint16_t _get_avail_cpus(struct job_record *job_ptr, int index)
  *	satisfy the request are cleared, other left set
  * IN min_nodes - minimum count of nodes
  * IN req_nodes - requested (or desired) count of nodes
- * IN max_nodes - maximum count of nodes (0==don't care)
+ * IN max_nodes - maximum count of nodes
  * IN mode - SELECT_MODE_RUN_NOW: try to schedule job now
  *           SELECT_MODE_TEST_ONLY: test if job can ever run
  *           SELECT_MODE_WILL_RUN: determine when and where job can run
@@ -473,7 +592,7 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	}
 
 	if (mode != SELECT_MODE_TEST_ONLY) {
-		if (job_ptr->details->shared == 1) {
+		if (job_ptr->details->shared) {
 			max_share = job_ptr->part_ptr->max_share & 
 					~SHARED_FORCE;
 		} else	/* ((shared == 0) || (shared == (uint16_t) NO_VAL)) */
@@ -492,6 +611,8 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		job_ptr->details->job_min_memory = 0;
 	}
 
+	debug3("select/linear: job_test: job %u max_share %d avail nodes %u",
+		job_ptr->job_id, max_share, bit_set_count(bitmap));
 	orig_map = bit_copy(bitmap);
 	for (max_run_job=min_share; max_run_job<max_share; max_run_job++) {
 		bool last_iteration = (max_run_job == (max_share -1));
@@ -503,12 +624,14 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 					      orig_map, bitmap, 
 					      max_run_job, 
 					      max_run_job + sus_jobs);
+			debug3("select/linear: job_test: found %d nodes for %u",
+				j, job_ptr->job_id);
 			if ((j == prev_cnt) || (j < min_nodes))
 				continue;
 			prev_cnt = j;
 			if ((mode == SELECT_MODE_RUN_NOW) && (max_run_job > 0)) {
-				/* We need to share. 
-				 * Try to find suitable job to share nodes with */
+				/* We need to share. Try to find 
+				 * suitable job to share nodes with */
 				rc = _find_job_mate(job_ptr, bitmap, min_nodes, 
 						    max_nodes, req_nodes);
 				if (rc == SLURM_SUCCESS)
@@ -523,6 +646,8 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	}
 	bit_free(orig_map);
 	slurm_mutex_unlock(&cr_mutex);
+	if ((rc == SLURM_SUCCESS) && (mode == SELECT_MODE_RUN_NOW))
+		_build_select_struct(job_ptr, bitmap);
 	if (save_mem)
 		job_ptr->details->job_min_memory = save_mem;
 	return rc;
@@ -612,12 +737,62 @@ static int _job_count_bitmap(struct node_cr_record *node_cr_ptr,
 		}
 
 		if ((run_job_cnt != NO_SHARE_LIMIT) &&
+		    (!_cr_priority_selection_enabled()) &&
 		    (node_cr_ptr[i].exclusive_jobid != 0)) {
 			/* already reserved by some exclusive job */
 			bit_clear(jobmap, i);
 			continue;
 		}
 
+		if (_cr_priority_selection_enabled()) {
+			/* clear this node if any higher-priority
+			 * partitions have existing allocations */
+			total_jobs = 0;
+			part_cr_ptr = node_cr_ptr[i].parts;
+			for( ;part_cr_ptr; part_cr_ptr = part_cr_ptr->next) {
+				if (part_cr_ptr->part_ptr->priority <=
+				    job_ptr->part_ptr->priority)
+					continue;
+				total_jobs += part_cr_ptr->tot_job_cnt;
+			}
+			if ((run_job_cnt != NO_SHARE_LIMIT) &&
+			    (total_jobs > 0)) {
+				bit_clear(jobmap, i);
+				continue;
+			}
+			/* if not sharing, then check with other partitions
+			 * of equal priority. Otherwise, load-balance within
+			 * the local partition */
+			total_jobs = 0;
+			total_run_jobs = 0;
+			part_cr_ptr = node_cr_ptr[i].parts;
+			for( ; part_cr_ptr; part_cr_ptr = part_cr_ptr->next) {
+				if (part_cr_ptr->part_ptr->priority !=
+				    job_ptr->part_ptr->priority)
+					continue;
+				if (!job_ptr->details->shared) {
+					total_run_jobs +=
+						      part_cr_ptr->run_job_cnt;
+					total_jobs += part_cr_ptr->tot_job_cnt;
+					continue;
+				}
+				if (part_cr_ptr->part_ptr == job_ptr->part_ptr){
+					total_run_jobs +=
+						      part_cr_ptr->run_job_cnt;
+					total_jobs += part_cr_ptr->tot_job_cnt;
+					break;
+				}
+			}
+			if ((total_run_jobs <= run_job_cnt) &&
+			    (total_jobs     <= tot_job_cnt)) {
+				bit_set(jobmap, i);
+				count++;
+			} else {
+				bit_clear(jobmap, i);
+			}
+			continue;
+		}
+
 		total_jobs = 0;
 		total_run_jobs = 0;
 		part_cr_ptr = node_cr_ptr[i].parts;
@@ -665,12 +840,15 @@ static int _find_job_mate(struct job_record *job_ptr, bitstr_t *bitmap,
 
 	job_iterator = list_iterator_create(job_list);
 	while ((job_scan_ptr = (struct job_record *) list_next(job_iterator))) {
-		if ((job_scan_ptr->part_ptr  != job_ptr->part_ptr) ||
-		    (job_scan_ptr->job_state != JOB_RUNNING) ||
-		    (job_scan_ptr->node_cnt  != req_nodes) ||
-		    (job_scan_ptr->total_procs < job_ptr->num_procs) ||
+		if ((job_scan_ptr->job_state  != JOB_RUNNING)		||
+		    (job_scan_ptr->node_cnt   != req_nodes)		||
+		    (job_scan_ptr->total_procs < job_ptr->num_procs)	||
 		    (!bit_super_set(job_scan_ptr->node_bitmap, bitmap)))
 			continue;
+		if (job_scan_ptr->details && job_ptr->details &&
+		    (job_scan_ptr->details->contiguous != 
+		     job_ptr->details->contiguous))
+			continue;
 
 		if (job_ptr->details->req_node_bitmap &&
 		    (!bit_super_set(job_ptr->details->req_node_bitmap,
@@ -694,8 +872,8 @@ static int _find_job_mate(struct job_record *job_ptr, bitstr_t *bitmap,
 /* _job_test - does most of the real work for select_p_job_test(), which 
  *	pretty much just handles load-leveling and max_share logic */
 static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
-			uint32_t min_nodes, uint32_t max_nodes, 
-			uint32_t req_nodes)
+		     uint32_t min_nodes, uint32_t max_nodes, 
+		     uint32_t req_nodes)
 {
 	int i, index, error_code = EINVAL, sufficient;
 	int *consec_nodes;	/* how many nodes we can add from this 
@@ -710,12 +888,21 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	int rem_cpus, rem_nodes;	/* remaining resources desired */
 	int best_fit_nodes, best_fit_cpus, best_fit_req;
 	int best_fit_location = 0, best_fit_sufficient;
-	int avail_cpus, alloc_cpus = 0;
+	int avail_cpus, alloc_cpus = 0, total_cpus = 0;
+
+	if (bit_set_count(bitmap) < min_nodes)
+		return error_code;
 
 	if ((job_ptr->details->req_node_bitmap) &&
 	    (!bit_super_set(job_ptr->details->req_node_bitmap, bitmap)))
 		return error_code;
 
+	if (switch_record_cnt && switch_record_table) {
+		/* Perform optimized resource selection based upon topology */
+		return _job_test_topo(job_ptr, bitmap, 
+				      min_nodes, max_nodes, req_nodes);
+	}
+
 	consec_index = 0;
 	consec_size  = 50;	/* start allocation for 50 sets of 
 				 * consecutive nodes */
@@ -749,10 +936,11 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 					/* first required node in set */
 					consec_req[consec_index] = index;
 				}
-				rem_cpus   -= avail_cpus;
-				alloc_cpus += avail_cpus;
 				rem_nodes--;
 				max_nodes--;
+				rem_cpus   -= avail_cpus;
+				alloc_cpus += avail_cpus;
+				total_cpus += _get_total_cpus(index);
 			} else {	 /* node not required (yet) */
 				bit_clear(bitmap, index); 
 				consec_cpus[consec_index] += avail_cpus;
@@ -786,7 +974,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		consec_end[consec_index++] = index - 1;
 
 #if SELECT_DEBUG
-	/* don't compile this, slows things down too much */
+	/* don't compile this, it slows things down too much */
 	debug3("rem_cpus=%d, rem_nodes=%d", rem_cpus, rem_nodes);
 	for (i = 0; i < consec_index; i++) {
 		if (consec_req[i] != -1)
@@ -811,10 +999,16 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		best_fit_req = -1;	/* first required node, -1 if none */
 		for (i = 0; i < consec_index; i++) {
 			if (consec_nodes[i] == 0)
-				continue;
-			sufficient = (consec_cpus[i] >= rem_cpus)
-			&& _enough_nodes(consec_nodes[i], rem_nodes,
-					 min_nodes, req_nodes);
+				continue;	/* no usable nodes here */
+
+			if (job_ptr->details->contiguous &&
+			    job_ptr->details->req_node_bitmap &&
+			    (consec_req[i] == -1))
+				continue;	/* no required nodes here */
+
+			sufficient = (consec_cpus[i] >= rem_cpus) &&
+				     _enough_nodes(consec_nodes[i], rem_nodes,
+						   min_nodes, req_nodes);
 
 			/* if first possibility OR */
 			/* contains required nodes OR */
@@ -833,6 +1027,23 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				best_fit_req = consec_req[i];
 				best_fit_sufficient = sufficient;
 			}
+
+			if (job_ptr->details->contiguous &&
+			    job_ptr->details->req_node_bitmap) {
+				/* Must wait for all required nodes to be 
+				 * in a single consecutive block */
+				int j, other_blocks = 0;
+				for (j = (i+1); j < consec_index; j++) {
+					if (consec_req[j] != -1) {
+						other_blocks = 1;
+						break;
+					}
+				}
+				if (other_blocks) {
+					best_fit_nodes = 0;
+					break;
+				}
+			}
 		}
 		if (best_fit_nodes == 0)
 			break;
@@ -858,6 +1069,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				avail_cpus = _get_avail_cpus(job_ptr, i);
 				rem_cpus   -= avail_cpus;
 				alloc_cpus += avail_cpus;
+				total_cpus += _get_total_cpus(i);
 			}
 			for (i = (best_fit_req - 1);
 			     i >= consec_start[best_fit_location]; i--) {
@@ -872,6 +1084,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				avail_cpus = _get_avail_cpus(job_ptr, i);
 				rem_cpus   -= avail_cpus;
 				alloc_cpus += avail_cpus;
+				total_cpus += _get_total_cpus(i);
 			}
 		} else {
 			for (i = consec_start[best_fit_location];
@@ -887,6 +1100,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 				avail_cpus = _get_avail_cpus(job_ptr, i);
 				rem_cpus   -= avail_cpus;
 				alloc_cpus += avail_cpus;
+				total_cpus += _get_total_cpus(i);
 			}
 		}
 		if (job_ptr->details->contiguous || 
@@ -904,7 +1118,7 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	}
 	if (error_code == SLURM_SUCCESS) {
 		/* job's total_procs is needed for SELECT_MODE_WILL_RUN */
-		job_ptr->total_procs = alloc_cpus;
+		job_ptr->total_procs = total_cpus;
 	}
 
 	xfree(consec_cpus);
@@ -915,6 +1129,302 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	return error_code;
 }
 
+/*
+ * _job_test_topo - A topology aware version of _job_test()
+ * NOTE: The logic here is almost identical to that of _eval_nodes_topo() in
+ *       select/cons_res/job_test.c. Any bug found here is probably also there.
+ */
+static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
+			  uint32_t min_nodes, uint32_t max_nodes, 
+			  uint32_t req_nodes)
+{
+	bitstr_t **switches_bitmap;		/* nodes on this switch */
+	int       *switches_cpu_cnt;		/* total CPUs on switch */
+	int       *switches_node_cnt;		/* total nodes on switch */
+	int       *switches_required;		/* set if has required node */
+
+	bitstr_t  *avail_nodes_bitmap = NULL;	/* nodes on any switch */
+	bitstr_t  *req_nodes_bitmap   = NULL;
+	int rem_cpus, rem_nodes;	/* remaining resources desired */
+	int avail_cpus, alloc_cpus = 0, total_cpus = 0;
+	int i, j, rc = SLURM_SUCCESS;
+	int best_fit_inx, first, last;
+	int best_fit_nodes, best_fit_cpus;
+	int best_fit_location = 0, best_fit_sufficient;
+	bool sufficient;
+
+	rem_cpus = job_ptr->num_procs;
+	if (req_nodes > min_nodes)
+		rem_nodes = req_nodes;
+	else
+		rem_nodes = min_nodes;
+
+	if (job_ptr->details->req_node_bitmap) {
+		req_nodes_bitmap = bit_copy(job_ptr->details->req_node_bitmap);
+		i = bit_set_count(req_nodes_bitmap);
+		if (i > max_nodes) {
+			info("job %u requires more nodes than currently "
+			     "available (%u>%u)",
+			     job_ptr->job_id, i, max_nodes);
+			rc = EINVAL;
+			goto fini;
+		}
+	}
+
+	/* Construct a set of switch array entries, 
+	 * use the same indexes as switch_record_table in slurmctld */
+	switches_bitmap   = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
+	switches_cpu_cnt  = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_node_cnt = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_required = xmalloc(sizeof(int)        * switch_record_cnt);
+	avail_nodes_bitmap = bit_alloc(node_record_count);
+	for (i=0; i<switch_record_cnt; i++) {
+		switches_bitmap[i] = bit_copy(switch_record_table[i].
+					      node_bitmap);
+		bit_and(switches_bitmap[i], bitmap);
+		bit_or(avail_nodes_bitmap, switches_bitmap[i]);
+		switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
+		if (req_nodes_bitmap &&
+		    bit_overlap(req_nodes_bitmap, switches_bitmap[i])) {
+			switches_required[i] = 1;
+		}
+	}
+	bit_nclear(bitmap, 0, node_record_count - 1);
+
+#if SELECT_DEBUG
+	/* Don't compile this, it slows things down too much */
+	for (i=0; i<switch_record_cnt; i++) {
+		char *node_names = NULL;
+		if (switches_node_cnt[i])
+			node_names = bitmap2node_name(switches_bitmap[i]);
+		debug("switch=%s nodes=%u:%s required:%u speed=%u",
+		      switch_record_table[i].name,
+		      switches_node_cnt[i], node_names,
+		      switches_required[i],
+		      switch_record_table[i].link_speed);
+		xfree(node_names);
+	}
+#endif
+
+	if (req_nodes_bitmap &&
+	    (!bit_super_set(req_nodes_bitmap, avail_nodes_bitmap))) {
+		info("job %u requires nodes not available on any switch",
+		     job_ptr->job_id);
+		rc = EINVAL;
+		goto fini;
+	}
+
+	if (req_nodes_bitmap) {
+		/* Accumulate specific required resources, if any */
+		first = bit_ffs(req_nodes_bitmap);
+		last  = bit_fls(req_nodes_bitmap);
+		for (i=first; ((i<=last) && (first>=0)); i++) {
+			if (!bit_test(req_nodes_bitmap, i))
+				continue;
+			if (max_nodes <= 0) {
+				info("job %u requires nodes than allowed",
+				     job_ptr->job_id);
+				rc = EINVAL;
+				goto fini;
+			}
+			bit_set(bitmap, i);
+			bit_clear(avail_nodes_bitmap, i);
+			rem_nodes--;
+			max_nodes--;
+			avail_cpus = _get_avail_cpus(job_ptr, i);
+			rem_cpus   -= avail_cpus;
+			alloc_cpus += avail_cpus;
+			total_cpus += _get_total_cpus(i);
+			for (j=0; j<switch_record_cnt; j++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				bit_clear(switches_bitmap[j], i);
+				switches_node_cnt[j]--;
+			}
+		}
+		if ((rem_nodes <= 0) && (rem_cpus <= 0))
+			goto fini;
+
+		/* Accumulate additional resources from leafs that
+		 * contain required nodes */
+		for (j=0; j<switch_record_cnt; j++) {
+			if ((switch_record_table[j].level != 0) ||
+			    (switches_node_cnt[j] == 0) ||
+			    (switches_required[j] == 0)) {
+				continue;
+			}
+			while ((max_nodes > 0) &&
+			       ((rem_nodes > 0) || (rem_cpus > 0))) {
+				i = bit_ffs(switches_bitmap[j]);
+				if (i == -1)
+					break;
+				bit_clear(switches_bitmap[j], i);
+				switches_node_cnt[j]--;
+				if (bit_test(bitmap, i)) {
+					/* node on multiple leaf switches
+					 * and already selected */
+					continue;
+				}
+				bit_set(bitmap, i);
+				bit_clear(avail_nodes_bitmap, i);
+				rem_nodes--;
+				max_nodes--;
+				avail_cpus = _get_avail_cpus(job_ptr, i);
+				rem_cpus   -= avail_cpus;
+				alloc_cpus += avail_cpus;
+				total_cpus += _get_total_cpus(i);
+			}
+		}
+		if ((rem_nodes <= 0) && (rem_cpus <= 0))
+			goto fini;
+
+		/* Update bitmaps and node counts for higher-level switches */
+		for (j=0; j<switch_record_cnt; j++) {
+			if (switches_node_cnt[j] == 0)
+				continue;
+			first = bit_ffs(switches_bitmap[j]);
+			if (first < 0)
+				continue;
+			last  = bit_fls(switches_bitmap[j]);
+			for (i=first; i<=last; i++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				if (!bit_test(avail_nodes_bitmap, i)) {
+					/* cleared from lower level */
+					bit_clear(switches_bitmap[j], i);
+					switches_node_cnt[j]--;
+				} else {
+					switches_cpu_cnt[j] += 
+						_get_avail_cpus(job_ptr, i);
+				}
+			}
+		}
+	} else {
+		/* No specific required nodes, calculate CPU counts */
+		for (j=0; j<switch_record_cnt; j++) {
+			first = bit_ffs(switches_bitmap[j]);
+			if (first < 0)
+				continue;
+			last  = bit_fls(switches_bitmap[j]);
+			for (i=first; i<=last; i++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				switches_cpu_cnt[j] += 
+					_get_avail_cpus(job_ptr, i);
+			}
+		}
+	}
+
+	/* Determine lowest level switch satifying request with best fit */
+	best_fit_inx = -1;
+	for (j=0; j<switch_record_cnt; j++) {
+		if ((switches_cpu_cnt[j]  < rem_cpus) ||
+		    (!_enough_nodes(switches_node_cnt[j], rem_nodes,
+				    min_nodes, req_nodes)))
+			continue;
+		if ((best_fit_inx == -1) ||
+		    (switch_record_table[j].level <
+		     switch_record_table[best_fit_inx].level) ||
+		    ((switch_record_table[j].level ==
+		      switch_record_table[best_fit_inx].level) &&
+		     (switches_node_cnt[j] < switches_node_cnt[best_fit_inx])))
+			best_fit_inx = j;
+	}
+	if (best_fit_inx == -1) {
+		error("job %u: best_fit topology failure", job_ptr->job_id);
+		rc = EINVAL;
+		goto fini;
+	}
+	bit_and(avail_nodes_bitmap, switches_bitmap[best_fit_inx]);
+
+	/* Identify usable leafs (within higher switch having best fit) */
+	for (j=0; j<switch_record_cnt; j++) {
+		if ((switch_record_table[j].level != 0) ||
+		    (!bit_super_set(switches_bitmap[j], 
+				    switches_bitmap[best_fit_inx]))) {
+			switches_node_cnt[j] = 0;
+		}
+	}
+
+	/* Select resources from these leafs on a best-fit basis */
+	while ((max_nodes > 0) && ((rem_nodes > 0) || (rem_cpus > 0))) {
+		best_fit_cpus = best_fit_nodes = best_fit_sufficient = 0;
+		for (j=0; j<switch_record_cnt; j++) {
+			if (switches_node_cnt[j] == 0)
+				continue;
+			sufficient = (switches_cpu_cnt[j] >= rem_cpus) &&
+				     _enough_nodes(switches_node_cnt[j], 
+						   rem_nodes, min_nodes, 
+						   req_nodes);
+			/* If first possibility OR */
+			/* first set large enough for request OR */
+			/* tightest fit (less resource waste) OR */
+			/* nothing yet large enough, but this is biggest */
+			if ((best_fit_nodes == 0) ||	
+			    (sufficient && (best_fit_sufficient == 0)) ||
+			    (sufficient && 
+			     (switches_cpu_cnt[j] < best_fit_cpus)) ||
+			    ((sufficient == 0) && 
+			     (switches_cpu_cnt[j] > best_fit_cpus))) {
+				best_fit_cpus =  switches_cpu_cnt[j];
+				best_fit_nodes = switches_node_cnt[j];
+				best_fit_location = j;
+				best_fit_sufficient = sufficient;
+			}
+		}
+		if (best_fit_nodes == 0)
+			break;
+		/* Use select nodes from this leaf */
+		first = bit_ffs(switches_bitmap[best_fit_location]);
+		last  = bit_fls(switches_bitmap[best_fit_location]);
+		for (i=first; ((i<=last) && (first>=0)); i++) {
+			if (!bit_test(switches_bitmap[best_fit_location], i))
+				continue;
+
+			bit_clear(switches_bitmap[best_fit_location], i);
+			switches_node_cnt[best_fit_location]--;
+			avail_cpus = _get_avail_cpus(job_ptr, i);
+			switches_cpu_cnt[best_fit_location] -= avail_cpus;
+
+			if (bit_test(bitmap, i)) {
+				/* node on multiple leaf switches
+				 * and already selected */
+				continue;
+			}
+
+			bit_set(bitmap, i);
+			rem_nodes--;
+			max_nodes--;
+			rem_cpus   -= avail_cpus;
+			alloc_cpus += avail_cpus;
+			total_cpus += _get_total_cpus(i);
+			if ((max_nodes <= 0) || 
+			    ((rem_nodes <= 0) && (rem_cpus <= 0)))
+				break;
+		}
+		switches_node_cnt[best_fit_location] = 0;
+	}
+	if ((rem_cpus <= 0) && 
+	    _enough_nodes(0, rem_nodes, min_nodes, req_nodes)) {
+		rc = SLURM_SUCCESS;
+	} else
+		rc = EINVAL;
+
+ fini:	if (rc == SLURM_SUCCESS) {
+		/* Job's total_procs is needed for SELECT_MODE_WILL_RUN */
+		job_ptr->total_procs = total_cpus;
+	}
+	FREE_NULL_BITMAP(avail_nodes_bitmap);
+	FREE_NULL_BITMAP(req_nodes_bitmap);
+	for (i=0; i<switch_record_cnt; i++)
+		bit_free(switches_bitmap[i]);
+	xfree(switches_bitmap);
+	xfree(switches_cpu_cnt);
+	xfree(switches_node_cnt);
+	xfree(switches_required);
+
+	return rc;
+}
 extern int select_p_job_begin(struct job_record *job_ptr)
 {
 	int rc = SLURM_SUCCESS;
@@ -998,11 +1508,6 @@ extern int select_p_job_resume(struct job_record *job_ptr)
 	return SLURM_SUCCESS;
 }
 
-extern int select_p_get_job_cores(uint32_t job_id, int alloc_index, int s)
-{
-	return 0;
-}
-
 extern int select_p_job_ready(struct job_record *job_ptr)
 {
 	if (job_ptr->job_state != JOB_RUNNING)
@@ -1026,11 +1531,6 @@ extern int select_p_get_select_nodeinfo (struct node_record *node_ptr,
 
 extern int select_p_update_nodeinfo (struct job_record *job_ptr)
 {
-	int i, node_inx;
-	ListIterator step_iterator;
-	struct step_record *step_ptr;
-	uint32_t step_mem;
-
 	xassert(job_ptr);
 
 	slurm_mutex_lock(&cr_mutex);
@@ -1038,39 +1538,6 @@ extern int select_p_update_nodeinfo (struct job_record *job_ptr)
 		_init_node_cr();
 	slurm_mutex_unlock(&cr_mutex);
 
-	if ((job_ptr->job_state != JOB_RUNNING)
-	&&  (job_ptr->job_state != JOB_SUSPENDED))
-		return SLURM_SUCCESS;
-	if ((cr_type != CR_MEMORY) || (job_ptr->details == NULL) || 
-	    (job_ptr->details->shared == 0) || job_ptr->details->job_min_memory)
-		return SLURM_SUCCESS;
-
-	slurm_mutex_lock(&cr_mutex);
-	step_iterator = list_iterator_create (job_ptr->step_list);
-	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
-		if ((step_ptr->step_node_bitmap == NULL) ||
-		    (step_ptr->step_layout == NULL) ||
-		    (step_ptr->mem_per_task == 0) ||
-		    (_find_step(step_ptr)))	/* already added */
-			continue;
-#if SELECT_DEBUG
-		info("select_p_update_nodeinfo: %u.%u mem:%u", 
-		     step_ptr->job_ptr->job_id, step_ptr->step_id, 
-		     step_ptr->mem_per_task);
-#endif
-		node_inx = -1;
-		for (i = 0; i < select_node_cnt; i++) {
-			if (bit_test(step_ptr->step_node_bitmap, i) == 0)
-				continue;
-			node_inx++;
-			step_mem = step_ptr->step_layout->tasks[node_inx] * 
-				   step_ptr->mem_per_task;
-			node_cr_ptr[i].alloc_memory += step_mem;
-		}
-		_add_step(step_ptr);
-	}
-	list_iterator_destroy (step_iterator);
-	slurm_mutex_unlock(&cr_mutex);
 	return SLURM_SUCCESS;
 }
 
@@ -1083,44 +1550,9 @@ extern int select_p_update_sub_node (update_part_msg_t *part_desc_ptr)
 {
 	return SLURM_SUCCESS;
 }
-extern int select_p_get_extra_jobinfo (struct node_record *node_ptr, 
-                                       struct job_record *job_ptr, 
-                                       enum select_data_info info,
-                                       void *data)
-{
-	int rc = SLURM_SUCCESS;
-	uint16_t *tmp_16;
-
-	xassert(job_ptr);
-	xassert(job_ptr->magic == JOB_MAGIC);
-
-	switch (info) {
-	case SELECT_AVAIL_CPUS:
-		tmp_16 = (uint16_t *) data;
-
-		if (job_ptr->details &&
-		    ((job_ptr->details->cpus_per_task > 1) ||
-		     (job_ptr->details->mc_ptr))) {
-			int index = (node_ptr - node_record_table_ptr);
-			*tmp_16 = _get_avail_cpus(job_ptr, index);
-		} else {
-			if (slurmctld_conf.fast_schedule) {
-				*tmp_16 = node_ptr->config_ptr->cpus;
-			} else {
-				*tmp_16 = node_ptr->cpus;
-			}
-		}
-		break;
-	default:
-		error("select_g_get_extra_jobinfo info %d invalid", info);
-		rc = SLURM_ERROR;
-		break;
-	}
-	
-	return rc;
-}
 
 extern int select_p_get_info_from_plugin (enum select_data_info info,
+					  struct job_record *job_ptr,
 					  void *data)
 {
 	return SLURM_SUCCESS;
@@ -1150,6 +1582,11 @@ extern int select_p_reconfigure(void)
 	return SLURM_SUCCESS;
 }
 
+extern List select_p_get_config(void)
+{
+	return NULL;
+}
+
 /*
  * deallocate resources that were assigned to this job 
  *
@@ -1160,8 +1597,9 @@ static int _rm_job_from_nodes(struct node_cr_record *node_cr_ptr,
 			      struct job_record *job_ptr, char *pre_err, 
 			      int remove_all)
 {
-	int i, rc = SLURM_SUCCESS;
+	int i, i_first, i_last, rc = SLURM_SUCCESS;
 	struct part_cr_record *part_cr_ptr;
+	select_job_res_t select_ptr;
 	uint32_t job_memory, job_memory_cpu = 0, job_memory_node = 0;
 
 	if (node_cr_ptr == NULL) {
@@ -1178,8 +1616,20 @@ static int _rm_job_from_nodes(struct node_cr_record *node_cr_ptr,
 			job_memory_node = job_ptr->details->job_min_memory;
 	}
 
-	for (i = 0; i < select_node_cnt; i++) {
-		if (bit_test(job_ptr->node_bitmap, i) == 0)
+	if ((select_ptr = job_ptr->select_job) == NULL) {
+		error("job %u lacks a select_job_res struct",
+		      job_ptr->job_id);
+		return SLURM_ERROR;
+	}
+	i_first = bit_ffs(select_ptr->node_bitmap);
+	i_last  = bit_fls(select_ptr->node_bitmap);
+	if (i_first < 0) {
+		error("job %u allocated nodes which have been removed "
+		      "from slurm.conf", job_ptr->job_id);
+		return SLURM_ERROR;
+	}
+	for (i = i_first; i <= i_last; i++) {
+		if (bit_test(select_ptr->node_bitmap, i) == 0)
 			continue;
 		if (job_memory_cpu == 0)
 			job_memory = job_memory_node;
@@ -1231,9 +1681,17 @@ static int _rm_job_from_nodes(struct node_cr_record *node_cr_ptr,
 			break;
 		}
 		if (part_cr_ptr == NULL) {
-			error("%s: could not find partition %s for node %s",
-				pre_err, job_ptr->part_ptr->name,
-				node_record_table_ptr[i].name);
+			if(job_ptr->part_ptr)
+				error("%s: could not find partition "
+				      "%s for node %s",
+				      pre_err, job_ptr->part_ptr->name,
+				      node_record_table_ptr[i].name);
+			else
+				error("%s: no partition ptr given for "
+				      "job %u and node %s",
+				      pre_err, job_ptr->job_id,
+				      node_record_table_ptr[i].name);
+				
 			rc = SLURM_ERROR;
 		}
 	}
@@ -1251,8 +1709,9 @@ static int _add_job_to_nodes(struct node_cr_record *node_cr_ptr,
 			     struct job_record *job_ptr, char *pre_err, 
 			     int alloc_all)
 {
-	int i, rc = SLURM_SUCCESS, exclusive = 0;
+	int i, i_first, i_last, rc = SLURM_SUCCESS, exclusive = 0;
 	struct part_cr_record *part_cr_ptr;
+	select_job_res_t select_ptr;
 	uint32_t job_memory_cpu = 0, job_memory_node = 0;
 
 	if (node_cr_ptr == NULL) {
@@ -1272,8 +1731,15 @@ static int _add_job_to_nodes(struct node_cr_record *node_cr_ptr,
 	if (job_ptr->details->shared == 0)
 		exclusive = 1;
 
-	for (i = 0; i < select_node_cnt; i++) {
-		if (bit_test(job_ptr->node_bitmap, i) == 0)
+	if ((select_ptr = job_ptr->select_job) == NULL) {
+		error("job %u lacks a select_job_res struct",
+		      job_ptr->job_id);
+		return SLURM_ERROR;
+	}
+	i_first = bit_ffs(select_ptr->node_bitmap);
+	i_last  = bit_fls(select_ptr->node_bitmap);
+	for (i=i_first; ((i<=i_last) && (i_first>=0)); i++) {
+		if (bit_test(select_ptr->node_bitmap, i) == 0)
 			continue;
 		if (job_memory_cpu == 0)
 			node_cr_ptr[i].alloc_memory += job_memory_node;
@@ -1400,13 +1866,12 @@ static void _init_node_cr(void)
 {
 	struct part_record *part_ptr;
 	struct part_cr_record *part_cr_ptr;
+	select_job_res_t select_ptr;
 	ListIterator part_iterator;
 	struct job_record *job_ptr;
 	ListIterator job_iterator;
-	uint32_t job_memory_cpu, job_memory_node, step_mem = 0;
-	int exclusive, i, node_inx;
-	ListIterator step_iterator;
-	struct step_record *step_ptr;
+	uint32_t job_memory_cpu, job_memory_node;
+	int exclusive, i, i_first, i_last;
 
 	if (node_cr_ptr)
 		return;
@@ -1436,16 +1901,23 @@ static void _init_node_cr(void)
 		if ((job_ptr->job_state != JOB_RUNNING) &&
 		    (job_ptr->job_state != JOB_SUSPENDED))
 			continue;
+		if ((select_ptr = job_ptr->select_job) == NULL) {
+			error("job %u lacks a select_job_res struct",
+			      job_ptr->job_id);
+			continue;
+		}
 
 		job_memory_cpu  = 0;
 		job_memory_node = 0;
 		if (job_ptr->details && 
 		    job_ptr->details->job_min_memory && (cr_type == CR_MEMORY)) {
 			if (job_ptr->details->job_min_memory & MEM_PER_CPU) {
-				job_memory_cpu = job_ptr->details->job_min_memory &
+				job_memory_cpu = job_ptr->details->
+						 job_min_memory &
 						 (~MEM_PER_CPU);
 			} else {
-				job_memory_node = job_ptr->details->job_min_memory;
+				job_memory_node = job_ptr->details->
+						  job_min_memory;
 			}
 		}
 		if (job_ptr->details->shared == 0)
@@ -1453,10 +1925,14 @@ static void _init_node_cr(void)
 		else
 			exclusive = 0;
 
-		for (i = 0; i < select_node_cnt; i++) {
-			if (job_ptr->node_bitmap == NULL)
-				break;
-			if (!bit_test(job_ptr->node_bitmap, i))
+		/* Use select_ptr->node_bitmap rather than job_ptr->node_bitmap
+		 * which can have DOWN nodes cleared from the bitmap */
+		if (select_ptr->node_bitmap == NULL)
+			continue;
+		i_first = bit_ffs(select_ptr->node_bitmap);
+		i_last  = bit_fls(select_ptr->node_bitmap);
+		for (i=i_first; ((i<=i_last) && (i_first>=0)); i++) {
+			if (!bit_test(select_ptr->node_bitmap, i))
 				continue;
 			if (exclusive) {
 				if (node_cr_ptr[i].exclusive_jobid) {
@@ -1498,40 +1974,6 @@ static void _init_node_cr(void)
 					node_record_table_ptr[i].name);
 			}
 		}
-
-		if (job_ptr->details->job_min_memory || 
-		    (job_ptr->details->shared == 0) || (cr_type != CR_MEMORY))
-			continue;
-
-		step_iterator = list_iterator_create (job_ptr->step_list);
-		while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
-			if ((step_ptr->step_node_bitmap == NULL) ||
-			    (step_ptr->step_layout == NULL))
-				continue;
-
-			if (_find_step(step_ptr)) {
-				slurm_mutex_unlock(&cr_mutex);
-				error("_init_node_cr: duplicate for step %u.%u",
-				      job_ptr->job_id, step_ptr->step_id);
-				continue;
-			}
-
-			node_inx = -1;
-			for (i = 0; i < select_node_cnt; i++) {
-				if (bit_test(step_ptr->step_node_bitmap, i) == 0)
-					continue;
-				node_inx++;
-				step_mem = step_ptr->step_layout->tasks[node_inx] * 
-					   step_ptr->mem_per_task;
-				node_cr_ptr[i].alloc_memory += step_mem;
-			}
-#if SELECT_DEBUG
-			info("_init_node_cr: added %u.%u mem:%u", 
-			     job_ptr->job_id, step_ptr->step_id, step_mem);
-#endif
-			_add_step(step_ptr);
-		}
-		list_iterator_destroy (step_iterator);
 	}
 	list_iterator_destroy(job_iterator);
 	_dump_node_cr(node_cr_ptr);
@@ -1628,195 +2070,10 @@ static void _cr_job_list_del(void *x)
 {
 	xfree(x);
 }
+
 static int  _cr_job_list_sort(void *x, void *y)
 {
 	struct job_record **job1_pptr = (struct job_record **) x;
 	struct job_record **job2_pptr = (struct job_record **) y;
 	return (int) difftime(job1_pptr[0]->end_time, job2_pptr[0]->end_time);
 }
-
-extern int select_p_step_begin(struct step_record *step_ptr)
-{
-	slurm_step_layout_t *step_layout = step_ptr->step_layout;
-	int i, node_inx = -1;
-	uint32_t avail_mem, step_mem;
-
-	xassert(step_ptr->job_ptr);
-	xassert(step_ptr->job_ptr->details);
-	xassert(step_ptr->step_node_bitmap);
-
-#if SELECT_DEBUG
-	info("select_p_step_begin: mem:%u", step_ptr->mem_per_task);
-#endif
-	if (step_layout == NULL)
-		return SLURM_SUCCESS;	/* batch script */
-	/* Don't track step memory use if job has reserved memory OR
-	 * job has whole node OR we don't track memory usage */
-	if (step_ptr->job_ptr->details->job_min_memory || 
-	    (step_ptr->job_ptr->details->shared == 0) ||
-	    (cr_type != CR_MEMORY))
-		return SLURM_SUCCESS;
-
-	/* test if there is sufficient memory */
-	slurm_mutex_lock(&cr_mutex);
-	if (node_cr_ptr == NULL)
-		_init_node_cr();
-	if (_find_step(step_ptr)) {
-		slurm_mutex_unlock(&cr_mutex);
-		error("select_p_step_begin: duplicate for step %u.%u",
-		      step_ptr->job_ptr->job_id, step_ptr->step_id);
-		return SLURM_SUCCESS;
-	}
-	for (i = 0; i < select_node_cnt; i++) {
-		if (bit_test(step_ptr->step_node_bitmap, i) == 0)
-			continue;
-		node_inx++;
-		step_mem = step_layout->tasks[node_inx] * step_ptr->mem_per_task;
-		if (select_fast_schedule)
-			avail_mem = node_record_table_ptr[i].
-				    config_ptr->real_memory;
-		else
-			avail_mem = node_record_table_ptr[i].real_memory;
-#if SELECT_DEBUG
-		info("alloc %u need %u avail %u", 
-		     node_cr_ptr[i].alloc_memory, step_mem, avail_mem);
-#endif
-		if ((node_cr_ptr[i].alloc_memory + step_mem) > avail_mem) {
-			slurm_mutex_unlock(&cr_mutex);
-			return SLURM_ERROR;	/* no room */
-		}
-	}
-
-	/* reserve the memory */
-	node_inx = -1;
-	for (i = 0; i < select_node_cnt; i++) {
-		if (bit_test(step_ptr->step_node_bitmap, i) == 0)
-			continue;
-		node_inx++;
-		step_mem = step_layout->tasks[node_inx] * step_ptr->mem_per_task;
-		node_cr_ptr[i].alloc_memory += step_mem;
-	}
-	_add_step(step_ptr);
-	slurm_mutex_unlock(&cr_mutex);
-	return SLURM_SUCCESS;
-}
-
-extern int select_p_step_fini(struct step_record *step_ptr)
-{
-	slurm_step_layout_t *step_layout = step_ptr->step_layout;
-	int i, node_inx = -1;
-	uint32_t step_mem;
-
-	xassert(step_ptr->job_ptr);
-	xassert(step_ptr->job_ptr->details);
-	xassert(step_ptr->step_node_bitmap);
-
-#if SELECT_DEBUG
-	info("select_p_step_fini: mem:%u", step_ptr->mem_per_task);
-#endif
-	if (step_layout == NULL)
-		return SLURM_SUCCESS;	/* batch script */
-	/* Don't track step memory use if job has reserved memory OR
-	 * job has whole node OR we don't track memory usage */
-	if (step_ptr->job_ptr->details->job_min_memory || 
-	    (step_ptr->job_ptr->details->shared == 0) ||
-	    (cr_type != CR_MEMORY))
-		return SLURM_SUCCESS;
-
-	/* release the memory */
-	slurm_mutex_lock(&cr_mutex);
-	if (node_cr_ptr == NULL)
-		_init_node_cr();
-	if (!_find_step(step_ptr)) {
-		slurm_mutex_unlock(&cr_mutex);
-		error("select_p_step_fini: could not find step %u.%u",
-		      step_ptr->job_ptr->job_id, step_ptr->step_id);
-		return SLURM_ERROR;
-	}
-	for (i = 0; i < select_node_cnt; i++) {
-		if (bit_test(step_ptr->step_node_bitmap, i) == 0)
-			continue;
-		node_inx++;
-		step_mem = step_layout->tasks[node_inx] * step_ptr->mem_per_task;
-		if (node_cr_ptr[i].alloc_memory >= step_mem)
-			node_cr_ptr[i].alloc_memory -= step_mem;
-		else {
-			node_cr_ptr[i].alloc_memory = 0;
-			error("select_p_step_fini: alloc_memory underflow on %s",
-				node_record_table_ptr[i].name);
-		}
-	}
-	_remove_step(step_ptr);
-	slurm_mutex_unlock(&cr_mutex);
-	return SLURM_SUCCESS;
-}
-
-/* return 1 if found, 0 otherwise */
-static int _find_step(struct step_record *step_ptr)
-{
-	ListIterator step_iterator;
-	struct step_cr_record *step;
-	int found = 0;
-
-	if (!step_cr_list)
-		return found;
-	step_iterator = list_iterator_create(step_cr_list);
-	if (step_iterator == NULL) {
-		fatal("list_iterator_create: memory allocation failure");
-		return found;
-	}
-	while ((step = list_next(step_iterator))) {
-		if ((step->job_id  == step_ptr->job_ptr->job_id) &&
-		    (step->step_id == step_ptr->step_id)) {
-			found = 1;
-			break;
-		}
-	}
-	list_iterator_destroy(step_iterator);
-	return found;
-}
-static int _add_step(struct step_record *step_ptr)
-{
-	struct step_cr_record *step = xmalloc(sizeof(struct step_cr_record));
-
-	step->job_id  = step_ptr->job_ptr->job_id;
-	step->step_id = step_ptr->step_id;
-	if (!step_cr_list) {
-		step_cr_list = list_create(_del_list_step);
-		if (!step_cr_list)
-			fatal("list_create: memory allocation failure");
-	}
-	if (list_append(step_cr_list, step) == NULL) {
-		fatal("list_append: memory allocation failure");
-		return SLURM_ERROR;
-	}
-	return SLURM_SUCCESS;
-}
-static int _remove_step(struct step_record *step_ptr)
-{
-	ListIterator step_iterator;
-	struct step_cr_record *step;
-	int found = 0;
-
-	if (!step_cr_list)
-		return found;
-	step_iterator = list_iterator_create(step_cr_list);
-	if (step_iterator == NULL) {
-		fatal("list_iterator_create: memory allocation failure");
-		return found;
-	}
-	while ((step = list_next(step_iterator))) {
-		if ((step->job_id  == step_ptr->job_ptr->job_id) &&
-		    (step->step_id == step_ptr->step_id)) {
-			found = 1;
-			list_delete_item(step_iterator);
-			break;
-		}
-	}
-	list_iterator_destroy(step_iterator);
-	return found;
-}
-static void _del_list_step(void *x)
-{
-	xfree(x);
-}
diff --git a/src/plugins/select/linear/select_linear.h b/src/plugins/select/linear/select_linear.h
index cd40a7fd96cb6104063820b6f6a65e686f463db0..bcb0b010bff7a0b7cd88a6241e80589bb89fb694 100644
--- a/src/plugins/select/linear/select_linear.h
+++ b/src/plugins/select/linear/select_linear.h
@@ -1,12 +1,13 @@
 /*****************************************************************************\
  *  select_linear.h 
  *****************************************************************************
- *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
+ *  Copyright (C) 2006-2007 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -69,13 +70,4 @@ struct node_cr_record {
 					 * otherwise value is zero */
 };
 
-/*
- * step_cr_record keeps track of the steps which have been allocated memory
- * independently of the job (ie. the job itself has no reserved memory
- */
-struct step_cr_record {
-	uint32_t job_id;
-	uint32_t step_id;
-};
-
 #endif /* !_SELECT_LINEAR_H */
diff --git a/src/plugins/switch/Makefile.in b/src/plugins/switch/Makefile.in
index b9c9f4a361a743a2784b0ba8253eefa3dabc69d3..468c3427360d44d2590bcdeca5534018b63ff750 100644
--- a/src/plugins/switch/Makefile.in
+++ b/src/plugins/switch/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/switch/elan/Makefile.in b/src/plugins/switch/elan/Makefile.in
index f918beffcac64a203074e1d95c5b61d79c299cfc..cd326967776cb61a491f7803069be72e6d1b93eb 100644
--- a/src/plugins/switch/elan/Makefile.in
+++ b/src/plugins/switch/elan/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -115,6 +119,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/switch/elan/qsw.c b/src/plugins/switch/elan/qsw.c
index 98d317af6501ee3be77ef440ccecd48fbb3e37dd..a3548a7312fabd753685eff150d4b8776e9f3773 100644
--- a/src/plugins/switch/elan/qsw.c
+++ b/src/plugins/switch/elan/qsw.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  qsw.c - Library routines for initiating jobs on QsNet. 
- *  $Id: qsw.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: qsw.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jim Garlick <garlick@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/switch/elan/qsw.h b/src/plugins/switch/elan/qsw.h
index d82345791ba9e77c37519ce68058a48b4f81e69c..d82bd6391099a7ae4dee050fd8cf1f51c5ac89ab 100644
--- a/src/plugins/switch/elan/qsw.h
+++ b/src/plugins/switch/elan/qsw.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jim Garlick <garlick@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/switch/elan/switch_elan.c b/src/plugins/switch/elan/switch_elan.c
index 2e89fec6058ea51b0c55a5a925e5943d9b3993c3..62d19629b719431b9f31f4d8a05bd3afa1046ea9 100644
--- a/src/plugins/switch/elan/switch_elan.c
+++ b/src/plugins/switch/elan/switch_elan.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  switch_elan.c - Library routines for initiating jobs on QsNet. 
- *  $Id: switch_elan.c 13672 2008-03-19 23:10:58Z jette $
  *****************************************************************************
- *  Copyright (C) 2003-2006 The Regents of the University of California.
+ *  Copyright (C) 2003-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -309,12 +310,6 @@ int switch_p_libstate_clear ( void )
 	return qsw_clear();
 }
 
-
-bool switch_p_no_frag ( void )
-{
-	return true;
-}
-
 /*
  * switch functions for job step specific credential
  */
diff --git a/src/plugins/switch/federation/Makefile.in b/src/plugins/switch/federation/Makefile.in
index af6555c5f85ecae087dcdc668f4ac13ed6ee4b8e..4d4bd36da6361ac73aed5f2c707e3d1ff86e02fe 100644
--- a/src/plugins/switch/federation/Makefile.in
+++ b/src/plugins/switch/federation/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -118,6 +122,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/switch/federation/federation.c b/src/plugins/switch/federation/federation.c
index 6e20a93f1eb15c52728ca477986b6f67bffa26e9..0f6c62977461a9b613d3e8a9ce9f413d6b26f0f0 100644
--- a/src/plugins/switch/federation/federation.c
+++ b/src/plugins/switch/federation/federation.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
- **  federation.c - Library routines for initiating jobs on IBM Federation
- **  $Id: federation.c 14365 2008-06-26 19:17:00Z jette $
+ *  federation.c - Library routines for initiating jobs on IBM Federation
  *****************************************************************************
- *  Copyright (C) 2004 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jason King <jking@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/switch/federation/federation.h b/src/plugins/switch/federation/federation.h
index 7b321609c5699e3f787b85ca27d6a73e16946fbe..86ca383a9f2c05b868350fa4b45411adf2648e8a 100644
--- a/src/plugins/switch/federation/federation.h
+++ b/src/plugins/switch/federation/federation.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  **  federation.h - Library routines for initiating jobs on IBM Federation
- **  $Id: federation.h 13672 2008-03-19 23:10:58Z jette $
+ **  $Id: federation.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jason King <jking@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/switch/federation/federation_keys.h b/src/plugins/switch/federation/federation_keys.h
index 9f785dcda735e2592a0e121cb3ac648329f81119..3154adc2fe78dcf119522fb8c3c299b7b87cedd9 100644
--- a/src/plugins/switch/federation/federation_keys.h
+++ b/src/plugins/switch/federation/federation_keys.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  **  federation_keys.h - Key definitions used by the get_jobinfo functions
- **  $Id: federation_keys.h 13672 2008-03-19 23:10:58Z jette $
+ **  $Id: federation_keys.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jason King <jking@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/plugins/switch/federation/switch_federation.c b/src/plugins/switch/federation/switch_federation.c
index 1e8bd41c2500c019e5d69b8beb672063f5c71e54..92892208431cc38323daee020edcb843a3e4b533 100644
--- a/src/plugins/switch/federation/switch_federation.c
+++ b/src/plugins/switch/federation/switch_federation.c
@@ -1,15 +1,16 @@
 /***************************************************************************** \
- **  switch_federation.c - Library routines for initiating jobs on IBM 
- **	Federation
- **  $Id: switch_federation.c 13672 2008-03-19 23:10:58Z jette $
+ *  switch_federation.c - Library routines for initiating jobs on IBM 
+ *	Federation
  *****************************************************************************
- *  Copyright (C) 2004-2006 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jason King <jking@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -607,10 +608,6 @@ int switch_p_job_attach(switch_jobinfo_t jobinfo, char ***env,
 /*
  * switch functions for other purposes
  */
-bool switch_p_no_frag(void)
-{
-	return false;
-}
 
 /* 
  * Linear search through table of errno values and strings,
diff --git a/src/plugins/switch/none/Makefile.in b/src/plugins/switch/none/Makefile.in
index cedf45a319807b6c123993424f7fe45970b893db..96d5e4a4d98dbb35321719d94e94804a95acfcef 100644
--- a/src/plugins/switch/none/Makefile.in
+++ b/src/plugins/switch/none/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/switch/none/switch_none.c b/src/plugins/switch/none/switch_none.c
index 7d0b614f531bd01f5e53931971a557761e7009b8..d22f1a83cad5d576d105b2ea4023844663ce0621 100644
--- a/src/plugins/switch/none/switch_none.c
+++ b/src/plugins/switch/none/switch_none.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  switch_none.c - Library for managing a switch with no special handling.
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -221,11 +223,6 @@ extern int switch_p_get_jobinfo(switch_jobinfo_t switch_job,
 /*
  * switch functions for other purposes
  */
-bool switch_p_no_frag ( void )
-{
-	return false;
-}
-
 extern int switch_p_get_errno(void)
 {
 	return SLURM_SUCCESS;
diff --git a/src/plugins/task/Makefile.in b/src/plugins/task/Makefile.in
index ec2e57c5be3268880d12a09749fe51933c33d0c8..72b37a588befeb55d8f50a7ca0e4dec9e58ef236 100644
--- a/src/plugins/task/Makefile.in
+++ b/src/plugins/task/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -91,6 +95,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/task/affinity/Makefile.in b/src/plugins/task/affinity/Makefile.in
index 21cb51f772e3b96048d110b7c68727728b8feba5..5beb293e86269bd5777935ce8f4d911347483c65 100644
--- a/src/plugins/task/affinity/Makefile.in
+++ b/src/plugins/task/affinity/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -121,6 +125,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/task/affinity/affinity.c b/src/plugins/task/affinity/affinity.c
index f6fb3414453de2f57370b7fc381f34ff6c26cc09..e330f5abc7b0bb15f1dd63d9b5d3c4c8f5539b7f 100644
--- a/src/plugins/task/affinity/affinity.c
+++ b/src/plugins/task/affinity/affinity.c
@@ -5,7 +5,8 @@
  *  Copyright (C) 2005-2006 Hewlett-Packard Development Company, L.P.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -67,6 +68,8 @@ void slurm_chkaffinity(cpu_set_t *mask, slurmd_job_t *job, int statval)
 			units = "_cores";
 		else if (job->cpu_bind_type & CPU_BIND_TO_SOCKETS)
 			units = "_sockets";
+		else if (job->cpu_bind_type & CPU_BIND_TO_LDOMS)
+			units = "_ldoms";
 		else
 			units = "";
 		if (job->cpu_bind_type & CPU_BIND_RANK) {
@@ -75,6 +78,12 @@ void slurm_chkaffinity(cpu_set_t *mask, slurmd_job_t *job, int statval)
 			bind_type = "MAP ";
 		} else if (job->cpu_bind_type & CPU_BIND_MASK) {
 			bind_type = "MASK";
+		} else if (job->cpu_bind_type & CPU_BIND_LDRANK) {
+			bind_type = "LDRANK";
+		} else if (job->cpu_bind_type & CPU_BIND_LDMAP) {
+			bind_type = "LDMAP ";
+		} else if (job->cpu_bind_type & CPU_BIND_LDMASK) {
+			bind_type = "LDMASK";
 		} else if (job->cpu_bind_type & (~CPU_BIND_VERBOSE)) {
 			bind_type = "UNK ";
 		} else {
@@ -95,12 +104,42 @@ void slurm_chkaffinity(cpu_set_t *mask, slurmd_job_t *job, int statval)
 			status);
 }
 
+/* If HAVE_NUMA, create mask for given ldom.
+ * Otherwise create mask for given socket
+ */
+static int _bind_ldom(uint32_t ldom, cpu_set_t *mask)
+{
+#ifdef HAVE_NUMA
+	int c, maxcpus, nnid = 0;
+	int nmax = numa_max_node();
+	if (nmax > 0)
+		nnid = ldom % (nmax+1);
+	debug3("task/affinity: binding to NUMA node %d", nnid);
+	maxcpus = conf->sockets * conf->cores * conf->threads;
+	for (c = 0; c < maxcpus; c++) {
+		if (slurm_get_numa_node(c) == nnid)
+			CPU_SET(c, mask);
+	}
+	return true;
+#else
+	uint16_t s, sid  = ldom % conf->sockets;
+	uint16_t i, cpus = conf->cores * conf->threads;
+	if (!conf->block_map)
+		return false;
+	for (s = sid * cpus; s < (sid+1) * cpus; s++) {
+		i = s % conf->block_map_size;
+		CPU_SET(conf->block_map[i], mask);
+	}
+	return true;
+#endif
+}
+
 int get_cpuset(cpu_set_t *mask, slurmd_job_t *job)
 {
-	int nummasks, maskid, i;
+	int nummasks, maskid, i, threads;
 	char *curstr, *selstr;
 	char mstr[1 + CPU_SETSIZE / 4];
-	int local_id = job->envtp->localid;
+	uint32_t local_id = job->envtp->localid;
 	char buftype[1024];
 
 	slurm_sprint_cpu_bind_type(buftype, job->cpu_bind_type);
@@ -113,9 +152,17 @@ int get_cpuset(cpu_set_t *mask, slurmd_job_t *job)
 	}
 
 	if (job->cpu_bind_type & CPU_BIND_RANK) {
-		CPU_SET(job->envtp->localid % job->cpus, mask);
+		threads = MAX(conf->threads, 1);
+		CPU_SET(job->envtp->localid % (job->cpus*threads), mask);
 		return true;
 	}
+	
+	if (job->cpu_bind_type & CPU_BIND_LDRANK) {
+		/* if HAVE_NUMA then bind this task ID to it's corresponding
+		 * locality domain ID. Otherwise, bind this task ID to it's
+		 * corresponding socket ID */
+		return _bind_ldom(local_id, mask);
+	}
 
 	if (!job->cpu_bind)
 		return false;
@@ -180,6 +227,50 @@ int get_cpuset(cpu_set_t *mask, slurmd_job_t *job)
 		CPU_SET(mycpu, mask);
 		return true;
 	}
+	
+	if (job->cpu_bind_type & CPU_BIND_LDMASK) {
+		/* if HAVE_NUMA bind this task to the locality domains
+		 * identified in mstr. Otherwise bind this task to the
+		 * sockets identified in mstr */
+		int len = strlen(mstr);
+		char *ptr = mstr + len - 1;
+		uint32_t base = 0;
+
+		curstr = mstr;
+		/* skip 0x, it's all hex anyway */
+		if (len > 1 && !memcmp(mstr, "0x", 2L))
+			curstr += 2;
+		while (ptr >= curstr) {
+			char val = char_to_val(*ptr);
+			if (val == (char) -1)
+				return false;
+			if (val & 1)
+				_bind_ldom(base, mask);
+			if (val & 2)
+				_bind_ldom(base + 1, mask);
+			if (val & 4)
+				_bind_ldom(base + 2, mask);
+			if (val & 8)
+				_bind_ldom(base + 3, mask);
+			len--;
+			ptr--;
+			base += 4;
+		}
+		return true;
+	}
+	
+	if (job->cpu_bind_type & CPU_BIND_LDMAP) {
+		/* if HAVE_NUMA bind this task to the given locality
+		 * domain. Otherwise bind this task to the given
+		 * socket */
+		uint32_t myldom = 0;
+		if (strncmp(mstr, "0x", 2) == 0) {
+			myldom = strtoul (&(mstr[2]), NULL, 16);
+		} else {
+			myldom = strtoul (mstr, NULL, 10);
+		}
+		return _bind_ldom(myldom, mask);
+	}
 
 	return false;
 }
@@ -198,9 +289,10 @@ int slurm_setaffinity(pid_t pid, size_t size, const cpu_set_t *mask)
 	rval = sched_setaffinity(pid, mask);
 #  endif
 #endif
-	if (rval)
-		verbose("sched_setaffinity(%d,%d,0x%s) failed with status %d",
-				pid, size, cpuset_to_str(mask, mstr), rval);
+	if (rval) {
+		verbose("sched_setaffinity(%d,%d,0x%s) failed: %m",
+			pid, size, cpuset_to_str(mask, mstr));
+	}
 	return (rval);
 }
 
@@ -219,10 +311,12 @@ int slurm_getaffinity(pid_t pid, size_t size, cpu_set_t *mask)
 	rval = sched_getaffinity(pid, mask);
 #  endif
 #endif
-	if (rval)
+	if (rval) {
 		verbose("sched_getaffinity(%d,%d,0x%s) failed with status %d",
 				pid, size, cpuset_to_str(mask, mstr), rval);
-
-	debug3("sched_getaffinity(%d) = 0x%s", pid, cpuset_to_str(mask, mstr));
+	} else {
+		debug3("sched_getaffinity(%d) = 0x%s",
+		       pid, cpuset_to_str(mask, mstr));
+	}
 	return (rval);
 }
diff --git a/src/plugins/task/affinity/affinity.h b/src/plugins/task/affinity/affinity.h
index 4ee3d27db120dc7da45913c9b40476759d6227b1..29081489ed2aaefaaf1545ba77898e5570c29a1c 100644
--- a/src/plugins/task/affinity/affinity.h
+++ b/src/plugins/task/affinity/affinity.h
@@ -5,7 +5,8 @@
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -92,7 +93,9 @@
 #include "src/common/util-net.h"
 #include "src/common/slurm_resource_info.h"
 
+#ifndef CPUSET_DIR
 #define CPUSET_DIR "/dev/cpuset"
+#endif
 
 /*** from affinity.c ***/
 void	slurm_chkaffinity(cpu_set_t *mask, slurmd_job_t *job, int statval);
@@ -112,8 +115,9 @@ int	slurm_set_cpuset(char *base, char *path, pid_t pid, size_t size,
 
 /*** from numa.c ***/
 #ifdef HAVE_NUMA
-int	get_memset(nodemask_t *mask, slurmd_job_t *job);
-void	slurm_chk_memset(nodemask_t *mask, slurmd_job_t *job);
+int	 get_memset(nodemask_t *mask, slurmd_job_t *job);
+void	 slurm_chk_memset(nodemask_t *mask, slurmd_job_t *job);
+uint16_t slurm_get_numa_node(uint16_t cpuid);
 #endif
 
 /*** from schedutils.c ***/
diff --git a/src/plugins/task/affinity/cpuset.c b/src/plugins/task/affinity/cpuset.c
index dfd0a632fb5cc75ee1ad6282c83d4f19c4c320cb..4c1e67466316bc35a44d595b5c7de0ffb573e488 100644
--- a/src/plugins/task/affinity/cpuset.c
+++ b/src/plugins/task/affinity/cpuset.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2007 The Regents of the University of California.
  *  Written by Don Albert <Don.Albert@Bull.com> and 
  *             Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -99,21 +100,25 @@ int	slurm_build_cpuset(char *base, char *path, uid_t uid, gid_t gid)
 	fd = open(file_path, O_RDONLY);
 	if (fd < 0) {
 		error("open(%s): %m", file_path);
-	} else {
-		rc = read(fd, mstr, sizeof(mstr));
-		close(fd);
-		if (rc < 1)
-			error("read(%s): %m", file_path);
-		snprintf(file_path, sizeof(file_path), "%s/mems", path);
-		fd = open(file_path, O_CREAT | O_WRONLY, 0700);
-		if (fd < 0) {
-			error("open(%s): %m", file_path);
-			return -1;
-		}
-		rc = write(fd, mstr, rc);
-		close(fd);
-		if (rc < 1)
-			error("write(%s): %m", file_path);
+		return -1;
+	}
+	rc = read(fd, mstr, sizeof(mstr));
+	close(fd);
+	if (rc < 1) {
+		error("read(%s): %m", file_path);
+		return -1;
+	}
+	snprintf(file_path, sizeof(file_path), "%s/mems", path);
+	fd = open(file_path, O_CREAT | O_WRONLY, 0700);
+	if (fd < 0) {
+		error("open(%s): %m", file_path);
+		return -1;
+	}
+	rc = write(fd, mstr, rc);
+	close(fd);
+	if (rc < 1) {
+		error("write(%s): %m", file_path);
+		return -1;
 	}
 
 	/* Delete cpuset once its tasks complete.
@@ -126,6 +131,10 @@ int	slurm_build_cpuset(char *base, char *path, uid_t uid, gid_t gid)
 	}
 	rc = write(fd, "1", 2);
 	close(fd);
+	if (rc < 1) {
+		error("write(%s): %m", file_path);
+		return -1;
+	}
 
 	/* Only now can we add tasks.
 	 * We can't add self, so add tasks after exec. */
@@ -134,7 +143,7 @@ int	slurm_build_cpuset(char *base, char *path, uid_t uid, gid_t gid)
 }
 
 int	slurm_set_cpuset(char *base, char *path, pid_t pid, size_t size, 
-		const cpu_set_t *mask)
+			 const cpu_set_t *mask)
 {
 	int fd, rc;
 	char file_path[PATH_MAX];
@@ -169,8 +178,10 @@ int	slurm_set_cpuset(char *base, char *path, pid_t pid, size_t size,
 	} else {
 		rc = read(fd, mstr, sizeof(mstr));
 		close(fd);
-		if (rc < 1)
+		if (rc < 1) {
 			error("read(%s): %m", file_path);
+			return -1;
+		}
 		snprintf(file_path, sizeof(file_path), "%s/mems", path);
 		fd = open(file_path, O_CREAT | O_WRONLY, 0700);
 		if (fd < 0) {
@@ -179,8 +190,10 @@ int	slurm_set_cpuset(char *base, char *path, pid_t pid, size_t size,
 		}
 		rc = write(fd, mstr, rc);
 		close(fd);
-		if (rc < 1)
+		if (rc < 1) {
 			error("write(%s): %m", file_path);
+			return -1;
+		}
 	}
 
 	/* Delete cpuset once its tasks complete.
@@ -267,8 +280,8 @@ int	slurm_set_memset(char *path, nodemask_t *new_mask)
 	int fd, i, max_node;
 	ssize_t rc;
 
-	snprintf(file_path, sizeof(file_path), "%s/mems", CPUSET_DIR);
-	fd = open(file_path, O_CREAT | O_RDONLY, 0700);
+	snprintf(file_path, sizeof(file_path), "%s/mems", path);
+	fd = open(file_path, O_CREAT | O_RDWR, 0700);
 	if (fd < 0) {
 		error("open(%s): %m", file_path);
 		return -1;
diff --git a/src/plugins/task/affinity/dist_tasks.c b/src/plugins/task/affinity/dist_tasks.c
index 357fbc5e09a281e83f4957953b125614b09a95f0..c0c48c2841b529cbacd0609d8d1f16670356d283 100644
--- a/src/plugins/task/affinity/dist_tasks.c
+++ b/src/plugins/task/affinity/dist_tasks.c
@@ -1,10 +1,12 @@
 /*****************************************************************************\
- *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
+ *  Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -32,83 +34,218 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include <limits.h>       /* INT_MAX */
-#include "src/plugins/task/affinity/dist_tasks.h"
-
-static slurm_lllp_ctx_t *lllp_ctx = NULL;	/* binding context */
-static struct node_gids *lllp_tasks = NULL;	/* Keep track of the task count
-						 * for logical processors
-						 * socket/core/thread. */
-static uint32_t lllp_reserved_size = 0;		/* lllp reserved array size */
-static uint32_t *lllp_reserved = NULL;   	/* count of Reserved lllps 
-						 * (socket, core, threads) */
-
-
-static void _task_layout_display_masks(launch_tasks_request_msg_t *req,
-				       const uint32_t *gtid,
-				       const uint32_t maxtasks,
-				       bitstr_t **masks);
-static int _init_lllp(void);
-static int _cleanup_lllp(void);
-static void _print_tasks_per_lllp(void);
+#include "affinity.h"
+#include "dist_tasks.h"
+#include "src/common/bitstring.h"
+#include "src/common/log.h"
+#include "src/common/slurm_cred.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_resource_info.h"
+#include "src/common/xmalloc.h"
+#include "src/slurmd/slurmd/slurmd.h"
+
+#ifdef HAVE_NUMA
+#include <numa.h>
+#endif
+
+static char *_alloc_mask(launch_tasks_request_msg_t *req,
+			 int *whole_node_cnt, int *whole_socket_cnt, 
+			 int *whole_core_cnt, int *whole_thread_cnt,		 				 int *part_socket_cnt, int *part_core_cnt);
+static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req,
+				uint16_t *hw_sockets, uint16_t *hw_cores,
+				uint16_t *hw_threads);
+static int _get_local_node_info(slurm_cred_arg_t *arg, uint32_t job_node_id,
+				uint16_t *sockets, uint16_t *cores);
+
 static int _task_layout_lllp_block(launch_tasks_request_msg_t *req,
-				   const uint32_t *gtid,
-				   const uint32_t maxtasks,
-				   bitstr_t ***masks_p);
+				   uint32_t node_id, bitstr_t ***masks_p);
 static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req,
-				    const uint32_t *gtid,
-				    const uint32_t maxtasks,
-				    bitstr_t ***masks_p);
-static int _task_layout_lllp_plane(launch_tasks_request_msg_t *req,
-				   const uint32_t *gtid,
-				   const uint32_t maxtasks,
-				   bitstr_t ***masks_p);
-static void _lllp_enlarge_masks(launch_tasks_request_msg_t *req,
-				const uint32_t maxtasks,
-				bitstr_t **masks);
-static void _lllp_use_available(launch_tasks_request_msg_t *req,
-				const uint32_t maxtasks,
-				bitstr_t **masks);
-static bitstr_t *_lllp_map_abstract_mask (bitstr_t *bitmask);
+				    uint32_t node_id, bitstr_t ***masks_p);
+static int _task_layout_lllp_multi(launch_tasks_request_msg_t *req, 
+				    uint32_t node_id, bitstr_t ***masks_p);
+
 static void _lllp_map_abstract_masks(const uint32_t maxtasks,
 				     bitstr_t **masks);
 static void _lllp_generate_cpu_bind(launch_tasks_request_msg_t *req,
 				    const uint32_t maxtasks,
 				    bitstr_t **masks);
-static void _lllp_free_masks(launch_tasks_request_msg_t *req,
-			     const uint32_t maxtasks,
-			     bitstr_t **masks);
-static void _single_mask(const uint16_t nsockets, 
-			 const uint16_t ncores, 
-			 const uint16_t nthreads, 
-			 const uint16_t socket_id,
-			 const uint16_t core_id, 
-			 const uint16_t thread_id,
-			 const bool bind_to_exact_socket,
-			 const bool bind_to_exact_core,
-			 const bool bind_to_exact_thread,
-			 bitstr_t ** single_mask);
-static void _get_resources_this_node(uint16_t *cpus,
-				     uint16_t *sockets,
-				     uint16_t *cores,
-				     uint16_t *threads,
-				     uint16_t *alloc_cores,
-				     uint32_t jobid);
-static void _cr_update_reservation(int reserve, uint32_t *reserved, 
-				   bitstr_t *mask);
-
-/* Convenience macros: 
- *     SCT_TO_LLLP   sockets cores threads to abstract block LLLP index
- */
-#define SCT_TO_LLLP(s,c,t,ncores,nthreads)			\
-	(s)*((ncores)*(nthreads)) + (c)*(nthreads) + (t)
+
 /*     BLOCK_MAP     physical machine LLLP index to abstract block LLLP index
  *     BLOCK_MAP_INV physical abstract block LLLP index to machine LLLP index
  */
 #define BLOCK_MAP(index)	_block_map(index, conf->block_map)
 #define BLOCK_MAP_INV(index)	_block_map(index, conf->block_map_inv)
 
-static uint16_t _block_map(uint16_t index, uint16_t *map);
+
+/* _block_map
+ *
+ * safely returns a mapped index using a provided block map
+ *
+ * IN - index to map
+ * IN - map to use
+ */
+static uint16_t _block_map(uint16_t index, uint16_t *map)
+{
+	if (map == NULL) {
+	    	return index;
+	}
+	/* make sure bit falls in map */
+	if (index >= conf->block_map_size) {
+		debug3("wrapping index %u into block_map_size of %u",
+		       index, conf->block_map_size);
+		index = index % conf->block_map_size;
+	}
+	index = map[index];
+	return(index);
+}
+
+static void _task_layout_display_masks(launch_tasks_request_msg_t *req, 
+					const uint32_t *gtid,
+					const uint32_t maxtasks,
+					bitstr_t **masks)
+{
+	int i;
+	char *str = NULL;
+	for(i = 0; i < maxtasks; i++) {
+		str = (char *)bit_fmt_hexmask(masks[i]);
+		debug3("_task_layout_display_masks jobid [%u:%d] %s",
+		       req->job_id, gtid[i], str);
+		xfree(str);
+	}
+}
+
+static void _lllp_free_masks(const uint32_t maxtasks, bitstr_t **masks)
+{
+    	int i;
+	bitstr_t *bitmask;
+	for (i = 0; i < maxtasks; i++) { 
+		bitmask = masks[i];
+	    	if (bitmask) {
+			bit_free(bitmask);
+		}
+	}
+	xfree(masks);
+}
+
+#ifdef HAVE_NUMA
+/* _match_mask_to_ldom
+ *
+ * expand each mask to encompass the whole locality domain
+ * within which it currently exists
+ * NOTE: this assumes that the masks are already in logical
+ * (and not abstract) CPU order.
+ */
+static void _match_masks_to_ldom(const uint32_t maxtasks, bitstr_t **masks)
+{
+	uint32_t i, b, size;
+
+	if (!masks || !masks[0])
+		return;
+	size = bit_size(masks[0]);
+	for(i = 0; i < maxtasks; i++) {
+		for (b = 0; b < size; b++) {
+			if (bit_test(masks[i], b)) {
+				/* get the NUMA node for this CPU, and then
+				 * set all CPUs in the mask that exist in
+				 * the same CPU */
+				int c;
+				uint16_t nnid = slurm_get_numa_node(b);
+				for (c = 0; c < size; c++) {
+					if (slurm_get_numa_node(c) == nnid)
+						bit_set(masks[i], c);
+				}
+			}
+		}
+	}
+}
+#endif
+
+/* 
+ * batch_bind - Set the batch request message so as to bind the shell to the 
+ *	proper resources
+ */
+void batch_bind(batch_job_launch_msg_t *req)
+{
+	bitstr_t *req_map, *hw_map;
+	slurm_cred_arg_t arg;
+	uint16_t sockets=0, cores=0, num_procs;
+	int hw_size, start, p, t, task_cnt=0;
+	char *str;
+
+	if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS) {
+		error("task/affinity: job lacks a credential");
+		return;
+	}
+	start = _get_local_node_info(&arg, 0, &sockets, &cores);
+	if (start != 0) {
+		error("task/affinity: missing node 0 in job credential");
+		slurm_cred_free_args(&arg);
+		return;
+	}
+
+	hw_size    = conf->sockets * conf->cores * conf->threads;
+	num_procs  = MIN((sockets * cores),
+			 (conf->sockets * conf->cores));
+	req_map = (bitstr_t *) bit_alloc(num_procs);
+	hw_map  = (bitstr_t *) bit_alloc(hw_size);
+	if (!req_map || !hw_map) {
+		error("task/affinity: malloc error");
+		bit_free(req_map);
+		bit_free(hw_map);
+		slurm_cred_free_args(&arg);
+	}
+
+	/* Transfer core_bitmap data to local req_map.
+	 * The MOD function handles the case where fewer processes
+	 * physically exist than are configured (slurmd is out of 
+	 * sync with the slurmctld daemon). */
+	for (p = 0; p < (sockets * cores); p++) {
+		if (bit_test(arg.core_bitmap, p))
+			bit_set(req_map, (p % num_procs));
+	}
+	str = (char *)bit_fmt_hexmask(req_map);
+	debug3("task/affinity: job %u CPU mask from slurmctld: %s",
+		req->job_id, str);
+	xfree(str);
+
+	for (p = 0; p < num_procs; p++) {
+		if (bit_test(req_map, p) == 0)
+			continue;
+		/* core_bitmap does not include threads, so we
+		 * add them here but limit them to what the job
+		 * requested */
+		for (t = 0; t < conf->threads; t++) {
+			uint16_t bit = p * conf->threads + t;
+			bit_set(hw_map, bit);
+			task_cnt++;
+		}
+	}
+	if (task_cnt) {
+		req->cpu_bind_type = CPU_BIND_MASK;
+		if (conf->task_plugin_param & CPU_BIND_VERBOSE)
+			req->cpu_bind_type |= CPU_BIND_VERBOSE;
+		req->cpu_bind = (char *)bit_fmt_hexmask(hw_map);
+		info("task/affinity: job %u CPU input mask for node: %s",
+		     req->job_id, req->cpu_bind);
+		/* translate abstract masks to actual hardware layout */
+		_lllp_map_abstract_masks(1, &hw_map);
+#ifdef HAVE_NUMA
+		if (req->cpu_bind_type & CPU_BIND_TO_LDOMS) {
+			_match_masks_to_ldom(1, &hw_map);
+		}
+#endif
+		xfree(req->cpu_bind);
+		req->cpu_bind = (char *)bit_fmt_hexmask(hw_map);
+		info("task/affinity: job %u CPU final HW mask for node: %s",
+		     req->job_id, req->cpu_bind);
+	} else {
+		error("task/affinity: job %u allocated no CPUs", 
+		      req->job_id);
+	}
+	bit_free(hw_map);
+	bit_free(req_map);
+	slurm_cred_free_args(&arg);
+}
 
 /* 
  * lllp_distribution
@@ -118,9 +255,10 @@ static uint16_t _block_map(uint16_t index, uint16_t *map);
  * When automatic binding is enabled:
  *      - no binding flags set >= CPU_BIND_NONE, and
  *      - a auto binding level selected CPU_BIND_TO_{SOCKETS,CORES,THREADS}
+ * Otherwise limit job step to the allocated CPUs
  *
  * generate the appropriate cpu_bind type and string which results in
- * the sepcified lllp distribution.
+ * the specified lllp distribution.
  *
  * IN/OUT- job launch request (cpu_bind_type and cpu_bind updated)
  * IN- global task id array
@@ -131,576 +269,537 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id)
 	bitstr_t **masks = NULL;
 	char buf_type[100];
 	int maxtasks = req->tasks_to_launch[(int)node_id];
+	int whole_nodes, whole_sockets, whole_cores, whole_threads;
+	int part_sockets, part_cores;
         const uint32_t *gtid = req->global_task_ids[(int)node_id];
-	
-	slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
-	if(req->cpu_bind_type >= CPU_BIND_NONE) {
+	static uint16_t bind_entity = CPU_BIND_TO_THREADS | CPU_BIND_TO_CORES |
+				      CPU_BIND_TO_SOCKETS | CPU_BIND_TO_LDOMS;
+	static uint16_t bind_mode = CPU_BIND_NONE   | CPU_BIND_MASK   |
+				    CPU_BIND_RANK   | CPU_BIND_MAP    |
+				    CPU_BIND_LDMASK | CPU_BIND_LDRANK | 
+				    CPU_BIND_LDMAP;
+
+	if (req->cpu_bind_type & bind_mode) {
+		/* Explicit step binding specified by user */
+		char *avail_mask = _alloc_mask(req,
+					       &whole_nodes,  &whole_sockets, 
+					       &whole_cores,  &whole_threads,
+					       &part_sockets, &part_cores);
+		if ((whole_nodes == 0) && avail_mask) {
+			/* Step does NOT have access to whole node, 
+			 * bind to full mask of available processors */
+			xfree(req->cpu_bind);
+			req->cpu_bind = avail_mask;
+			req->cpu_bind_type &= (~bind_mode);
+			req->cpu_bind_type |= CPU_BIND_MASK;
+		} else {
+			/* Step does have access to whole node, 
+			 * bind to whatever step wants */
+			xfree(avail_mask);
+		}
+		slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
 		info("lllp_distribution jobid [%u] manual binding: %s",
 		     req->job_id, buf_type);
 		return;
 	}
-	if (!((req->cpu_bind_type & CPU_BIND_TO_THREADS) ||
-	      (req->cpu_bind_type & CPU_BIND_TO_CORES) ||
-	      (req->cpu_bind_type & CPU_BIND_TO_SOCKETS))) {
+
+	if (!(req->cpu_bind_type & bind_entity)) {
+		/* No bind unit (sockets, cores) specified by user,
+		 * pick something reasonable */
+		int max_tasks = req->tasks_to_launch[(int)node_id];
+		char *avail_mask = _alloc_mask(req,
+					       &whole_nodes,  &whole_sockets, 
+					       &whole_cores,  &whole_threads,
+					       &part_sockets, &part_cores);
+		debug("binding tasks:%d to "
+		      "nodes:%d sockets:%d:%d cores:%d:%d threads:%d",
+		      max_tasks, whole_nodes, whole_sockets ,part_sockets,
+		      whole_cores, part_cores, whole_threads);
+		if ((max_tasks == whole_sockets) && (part_sockets == 0)) {
+			req->cpu_bind_type |= CPU_BIND_TO_SOCKETS;
+			goto make_auto;
+		}
+		if ((max_tasks == whole_cores) && (part_cores == 0)) {
+			req->cpu_bind_type |= CPU_BIND_TO_CORES;
+			goto make_auto;
+		}
+		if (max_tasks == whole_threads) {
+			req->cpu_bind_type |= CPU_BIND_TO_THREADS;
+			goto make_auto;
+		}
+		if (avail_mask) {
+			xfree(req->cpu_bind);
+			req->cpu_bind = avail_mask;
+			req->cpu_bind_type |= CPU_BIND_MASK;
+		}
+		slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
 		info("lllp_distribution jobid [%u] auto binding off: %s",
 		     req->job_id, buf_type);
 		return;
-	}
 
-	/* We are still thinking about this. Does this make sense?
-	if (req->task_dist == SLURM_DIST_ARBITRARY) {
-		req->cpu_bind_type >= CPU_BIND_NONE;
-		info("lllp_distribution jobid [%u] -m hostfile - auto binding off ",
-		     req->job_id);
-		return;
+  make_auto:	xfree(avail_mask);
+		slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
+		info("lllp_distribution jobid [%u] implicit auto binding: "
+		     "%s, dist %d", req->job_id, buf_type, req->task_dist);
+	} else {
+		/* Explicit bind unit (sockets, cores) specified by user */
+		slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
+		info("lllp_distribution jobid [%u] binding: %s, dist %d",
+		     req->job_id, buf_type, req->task_dist);
 	}
-	*/
-
-	info("lllp_distribution jobid [%u] auto binding: %s, dist %d",
-	     req->job_id, buf_type, req->task_dist);
 
 	switch (req->task_dist) {
 	case SLURM_DIST_BLOCK_BLOCK:
 	case SLURM_DIST_CYCLIC_BLOCK:
-		rc = _task_layout_lllp_block(req, gtid, maxtasks, &masks);
+	case SLURM_DIST_PLANE:
+		/* tasks are distributed in blocks within a plane */
+		rc = _task_layout_lllp_block(req, node_id, &masks);
 		break;
 	case SLURM_DIST_CYCLIC:
 	case SLURM_DIST_BLOCK:
 	case SLURM_DIST_CYCLIC_CYCLIC:
 	case SLURM_DIST_BLOCK_CYCLIC:
-		rc = _task_layout_lllp_cyclic(req, gtid, maxtasks, &masks); 
-		break;
-	case SLURM_DIST_PLANE:
-		rc = _task_layout_lllp_plane(req, gtid, maxtasks, &masks); 
+		rc = _task_layout_lllp_cyclic(req, node_id, &masks); 
 		break;
 	default:
-		rc = _task_layout_lllp_cyclic(req, gtid, maxtasks, &masks); 
+		if (req->cpus_per_task > 1)
+			rc = _task_layout_lllp_multi(req, node_id, &masks);
+		else
+			rc = _task_layout_lllp_cyclic(req, node_id, &masks);
 		req->task_dist = SLURM_DIST_BLOCK_CYCLIC;
 		break;
 	}
 
+	/* FIXME: I'm worried about core_bitmap with CPU_BIND_TO_SOCKETS &
+	 * max_cores - does select/cons_res plugin allocate whole
+	 * socket??? Maybe not. Check srun man page.
+	 */
+
 	if (rc == SLURM_SUCCESS) {
 		_task_layout_display_masks(req, gtid, maxtasks, masks); 
-		if (req->cpus_per_task > 1) {
-			_lllp_enlarge_masks(req, maxtasks, masks);
-		}
-		_task_layout_display_masks(req, gtid, maxtasks, masks); 
-	    	_lllp_use_available(req, maxtasks, masks);
-		_task_layout_display_masks(req, gtid, maxtasks, masks); 
-	    	_lllp_map_abstract_masks(maxtasks, masks);
+	    	/* translate abstract masks to actual hardware layout */
+		_lllp_map_abstract_masks(maxtasks, masks);
 		_task_layout_display_masks(req, gtid, maxtasks, masks); 
-	    	_lllp_generate_cpu_bind(req, maxtasks, masks);
+#ifdef HAVE_NUMA
+		if (req->cpu_bind_type & CPU_BIND_TO_LDOMS) {
+			_match_masks_to_ldom(maxtasks, masks);
+			_task_layout_display_masks(req, gtid, maxtasks, masks);
+		}
+#endif
+	    	 /* convert masks into cpu_bind mask string */
+		 _lllp_generate_cpu_bind(req, maxtasks, masks);
+	} else {
+		char *avail_mask = _alloc_mask(req,
+					       &whole_nodes,  &whole_sockets,
+					       &whole_cores,  &whole_threads,
+					       &part_sockets, &part_cores);
+		if (avail_mask) {
+			xfree(req->cpu_bind);
+			req->cpu_bind = avail_mask;
+			req->cpu_bind_type &= (~bind_mode);
+			req->cpu_bind_type |= CPU_BIND_MASK;
+		}
+		slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
+		error("lllp_distribution jobid [%u] overriding binding: %s",
+		      req->job_id, buf_type);
+		error("Verify socket/core/thread counts in configuration");
 	}
-	_lllp_free_masks(req, maxtasks, masks);
+	_lllp_free_masks(maxtasks, masks);
 }
 
-static
-void _task_layout_display_masks(launch_tasks_request_msg_t *req, 
-				const uint32_t *gtid,
-				const uint32_t maxtasks,
-				bitstr_t **masks)
-{
-	int i;
-	for(i=0; i<maxtasks;i++) {
-		char *str = bit_fmt_hexmask(masks[i]);
-		debug3("_task_layout_display_masks jobid [%u:%d] %s",
-		       req->job_id, gtid[i], str);
-		xfree(str);
-	}
-}
 
 /*
- * _compute_min_overlap
- *
- * Given a mask and a set of current reservations, return the
- * minimum overlap between the mask and the reservations and the
- * rotation required to obtain it
- *
- * IN-  bitmask - bitmask to rotate
- * IN-  resv - current reservations
- * IN-  rotmask_size - size of mask to use during rotation
- * IN-  rotval - starting rotation value
- * IN-  rot_incr - rotation increment
- * OUT- p_min_overlap- minimum overlap
- * OUT- p_min_rotval- rotation to obtain minimum overlap
+ * _get_local_node_info - get job allocation details for this node
+ * IN: req         - launch request structure
+ * IN: job_node_id - index of the local node in the job allocation
+ * IN/OUT: sockets - pointer to socket count variable
+ * IN/OUT: cores   - pointer to cores_per_socket count variable
+ * OUT:  returns the core_bitmap index of the first core for this node
  */
-static void
-_compute_min_overlap(bitstr_t *bitmask, uint32_t *resv,
-			int rotmask_size, int rotval, int rot_incr, 
-			int *p_min_overlap, int *p_min_rotval)
+static int _get_local_node_info(slurm_cred_arg_t *arg, uint32_t job_node_id,
+				uint16_t *sockets, uint16_t *cores)
 {
-	int min_overlap = INT_MAX;
-	int min_rotval  = 0;
-	int rot_cnt;
-	int j;
-	if (rot_incr <= 0) {
-		rot_incr = 1;
-	}
-	rot_cnt = rotmask_size / rot_incr;
-	debug3("  rotval:%d rot_incr:%d rot_cnt:%d",
-					rotval, rot_incr, rot_cnt);
-	for (j = 0; j < rot_cnt; j++) {
-		int overlap;		       
-		bitstr_t *newmask = bit_rotate_copy(bitmask, rotval,
-						    rotmask_size);
-		bitstr_t *physmask = _lllp_map_abstract_mask(newmask);
-		overlap = int_and_set_count((int *)resv,
-					    lllp_reserved_size,
-					    physmask);
-		bit_free(newmask);
-		bit_free(physmask);
-		debug3("  rotation #%d %d => overlap:%d", j, rotval, overlap);
-		if (overlap < min_overlap) {
-			min_overlap = overlap;
-			min_rotval  = rotval;
+	int bit_start = 0, bit_finish = 0;
+	int i, index = -1, cur_node_id = -1;
+
+	do {
+		index++;
+		for (i = 0; i < arg->sock_core_rep_count[index] &&
+			    cur_node_id < job_node_id; i++) {
+			bit_start = bit_finish;
+			bit_finish += arg->sockets_per_node[index] *
+					arg->cores_per_socket[index];
+			cur_node_id++;
 		}
-		if (overlap == 0) {	/* no overlap, stop rotating */
-			debug3("  --- found zero overlap, stopping search");
-			break;
-		}
-		rotval += rot_incr;
-	}
-	debug3("  min_overlap:%d min_rotval:%d",
-					min_overlap, min_rotval);
-	*p_min_overlap = min_overlap;
-	*p_min_rotval  = min_rotval;
+		
+	} while (cur_node_id < job_node_id);
+
+	*sockets = arg->sockets_per_node[index];
+	*cores   = arg->cores_per_socket[index];
+	return bit_start;
 }
 
-/*
- * _lllp_enlarge_masks
- *
- * Given an array of masks, update the masks to honor the number
- * of cpus requested per task in req->cpus_per_task.  Note: not
- * concerned with mask overlap between tasks as _lllp_use_available
- * will take care of that.
- *
- * IN- job launch request
- * IN- maximum number of tasks
- * IN/OUT- array of masks
- */
-static void _lllp_enlarge_masks (launch_tasks_request_msg_t *req,
-				const uint32_t maxtasks,
-				bitstr_t **masks)
+/* enforce max_sockets, max_cores */
+static void _enforce_limits(launch_tasks_request_msg_t *req, bitstr_t *mask,
+			    uint16_t hw_sockets, uint16_t hw_cores,
+			    uint16_t hw_threads)
 {
-	int i, j, k, l;
-	int cpus_per_task = req->cpus_per_task;
-
-	debug3("_lllp_enlarge_masks");
+	uint16_t i, j, size, count = 0;
+	int prev = -1;
 
-	/* enlarge each mask */
-	for (i = 0; i < maxtasks; i++) {
-		bitstr_t *bitmask = masks[i];
-		bitstr_t *addmask;
-		int bitmask_size = bit_size(bitmask);
-		int num_added = 0;
-
-		/* get current number of set bits in bitmask */
-		int num_set = bit_set_count(bitmask);
-		if (num_set >= cpus_per_task) {
+	size = bit_size(mask);
+	/* enforce max_sockets */
+	for (i = 0; i < size; i++) {
+		if (bit_test(mask, i) == 0)
 			continue;
+		/* j = first bit in socket; i = last bit in socket */
+		j = i/(hw_cores * hw_threads) * (hw_cores * hw_threads);
+		i = j+(hw_cores * hw_threads)-1;
+		if (++count > req->max_sockets) {
+			bit_nclear(mask, j, i);
+			count--;
 		}
+	}
 
-		/* add bits by selecting disjoint cores first, then threads */
-		for (j = conf->threads; j > 0; j--) {
-			/* rotate current bitmask to find new candidate bits */
-		        for (k = 1; k < bitmask_size / j; k++) {
-				addmask = bit_rotate_copy(bitmask, k*j,
-								bitmask_size);
-
-			    	/* check candidate bits to add into to bitmask */
-				for (l = 0; l < bitmask_size; l++) {
-					if (bit_test(addmask,l) &&
-					    !bit_test(bitmask,l)) {
-						bit_set(bitmask,l);
-						num_set++;
-						num_added++;
-					}
-					if (num_set >= cpus_per_task) {
-						break;
-					}
-				}
+	/* enforce max_cores */
+	for (i = 0; i < size; i++) {
+		if (bit_test(mask, i) == 0)
+			continue;
+		/* j = first bit in socket */
+		j = i/(hw_cores * hw_threads) * (hw_cores * hw_threads);
+		if (j != prev) {
+			/* we're in a new socket, so reset the count */
+			count = 0;
+			prev = j;
+		}
+		/* j = first bit in core; i = last bit in core */
+		j = i/hw_threads * hw_threads;
+		i = j+hw_threads-1;
+		if (++count > req->max_cores) {
+			bit_nclear(mask, j, i);
+			count--;
+		}
+	}
+}
 
-				/* done with candidate mask */
-				bit_free(addmask);
-				if (num_set >= cpus_per_task) {
-					break;
-				}
+/* Determine which CPUs a job step can use. 
+ * OUT whole_<entity>_count - returns count of whole <entities> in this 
+ *                            allocation for this node
+ * OUT part__<entity>_count - returns count of partial <entities> in this 
+ *                            allocation for this node
+ * RET - a string representation of the available mask or NULL on error
+ * NOTE: Caller must xfree() the return value. */
+static char *_alloc_mask(launch_tasks_request_msg_t *req,
+			 int *whole_node_cnt,  int *whole_socket_cnt, 
+			 int *whole_core_cnt,  int *whole_thread_cnt,
+			 int *part_socket_cnt, int *part_core_cnt)
+{
+	uint16_t sockets, cores, threads;
+	int c, s, t, i, mask;
+	int c_miss, s_miss, t_miss, c_hit, t_hit;
+	bitstr_t *alloc_bitmap;
+	char *str_mask;
+
+	*whole_node_cnt   = 0;
+	*whole_socket_cnt = 0;
+	*whole_core_cnt   = 0;
+	*whole_thread_cnt = 0;
+	*part_socket_cnt  = 0;
+	*part_core_cnt    = 0;
+
+	alloc_bitmap = _get_avail_map(req, &sockets, &cores, &threads);
+	if (!alloc_bitmap)
+		return NULL;
+
+	i = mask = 0;
+	for (s=0, s_miss=false; s<sockets; s++) {
+		for (c=0, c_hit=c_miss=false; c<cores; c++) {
+			for (t=0, t_hit=t_miss=false; t<threads; t++) {
+				if (bit_test(alloc_bitmap, i)) {
+					mask |= (1 << i);
+					(*whole_thread_cnt)++;
+					t_hit = true;
+					c_hit = true;
+				} else
+					t_miss = true;
+				i++;
 			}
-			if (num_set >= cpus_per_task) {
-				break;
+			if (!t_miss)
+				(*whole_core_cnt)++;
+			else {
+				if (t_hit)
+					(*part_core_cnt)++;
+				c_miss = true;
 			}
 		}
-		debug3("  mask %d => added %d bits", i, num_added);
+		if (!c_miss)
+			(*whole_socket_cnt)++;
+		else {
+			if (c_hit)
+				(*part_socket_cnt)++;
+			s_miss = true;
+		}
 	}
+	if (!s_miss)
+		(*whole_node_cnt)++;
+	bit_free(alloc_bitmap);
+
+	str_mask = xmalloc(16);
+	snprintf(str_mask, 16, "%x", mask);
+	return str_mask;
 }
 
 /*
- * _lllp_use_available
- *
- * Given an array of masks, update the masks to make best use of
- * available resources based on the current state of reservations
- * recorded in conf->lllp_reserved.
- *
- * IN- job launch request
- * IN- maximum number of tasks
- * IN/OUT- array of masks
+ * Given a job step request, return an equivalent local bitmap for this node
+ * IN req          - The job step launch request
+ * OUT hw_sockets  - number of actual sockets on this node
+ * OUT hw_cores    - number of actual cores per socket on this node
+ * OUT hw_threads  - number of actual threads per core on this node
+ * RET: bitmap of processors available to this job step on this node
+ *      OR NULL on error
  */
-static void _lllp_use_available (launch_tasks_request_msg_t *req,
-				const uint32_t maxtasks,
-				bitstr_t **masks)
+static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req,
+				uint16_t *hw_sockets, uint16_t *hw_cores,
+				uint16_t *hw_threads)
 {
-	int resv_incr, i;
-	uint32_t *resv;
-	int rotval, prev_rotval;
-
-	/* select the unit of reservation rotation increment based on CR type */
-	if ((conf->cr_type == CR_SOCKET) 
-	    || (conf->cr_type == CR_SOCKET_MEMORY)) {
-		resv_incr = conf->cores * conf->threads; /* socket contents */
-	} else if ((conf->cr_type == CR_CORE) 
-		   || (conf->cr_type == CR_CORE_MEMORY)) {
-		resv_incr = conf->threads;		 /* core contents */
-	} else {
-		resv_incr = conf->threads;		 /* core contents */
-	}
-	if (resv_incr < 1) {		/* make sure increment is non-zero */ 
-		debug3("_lllp_use_available changed resv_incr %d to 1", resv_incr);
-		resv_incr = 1;
+	bitstr_t *req_map, *hw_map;
+	slurm_cred_arg_t arg;
+	uint16_t p, t, num_procs, num_threads, sockets, cores, hw_size;
+	uint32_t job_node_id;
+	int start;
+	char *str;
+
+	*hw_sockets = conf->sockets;
+	*hw_cores   = conf->cores;
+	*hw_threads = conf->threads;
+	hw_size    = (*hw_sockets) * (*hw_cores) * (*hw_threads);
+
+	if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS) {
+		error("task/affinity: job lacks a credential");
+		return NULL;
 	}
 
-	debug3("_lllp_use_available resv_incr = %d", resv_incr);
+	/* we need this node's ID in relation to the whole
+	 * job allocation, not just this jobstep */
+	job_node_id = nodelist_find(arg.job_hostlist, conf->node_name);
+	start = _get_local_node_info(&arg, job_node_id, &sockets, &cores);
+	if (start < 0) {
+		error("task/affinity: missing node %u in job credential",
+		      job_node_id);
+		slurm_cred_free_args(&arg);
+		return NULL;
+	}
+	debug3("task/affinity: slurmctld s %u c %u; hw s %u c %u t %u",
+		sockets, cores, *hw_sockets, *hw_cores, *hw_threads);
+
+	num_procs   = MIN((sockets * cores),
+			  ((*hw_sockets)*(*hw_cores)));
+	req_map = (bitstr_t *) bit_alloc(num_procs);
+	hw_map  = (bitstr_t *) bit_alloc(hw_size);
+	if (!req_map || !hw_map) {
+		error("task/affinity: malloc error");
+		bit_free(req_map);
+		bit_free(hw_map);
+		slurm_cred_free_args(&arg);
+		return NULL;
+	}
+	/* Transfer core_bitmap data to local req_map.
+	 * The MOD function handles the case where fewer processes
+	 * physically exist than are configured (slurmd is out of 
+	 * sync with the slurmctld daemon). */
+	for (p = 0; p < (sockets * cores); p++) {
+		if (bit_test(arg.core_bitmap, start+p))
+			bit_set(req_map, (p % num_procs));
+	}
 
-	/* get a copy of the current reservations */
-	resv = xmalloc(lllp_reserved_size * sizeof(uint32_t));
-        memcpy(resv, lllp_reserved, lllp_reserved_size * sizeof(uint32_t));
+	str = (char *)bit_fmt_hexmask(req_map);
+	debug3("task/affinity: job %u.%u CPU mask from slurmctld: %s",
+		req->job_id, req->job_step_id, str);
+	xfree(str);
 
-	/* check each mask against current reservations */
-	rotval      = 0;
-	prev_rotval = 0;
-	for (i = 0; i < maxtasks; i++) {
-		bitstr_t *bitmask = masks[i];
-		bitstr_t *physmask = NULL;
-		int min_overlap, min_rotval;
+	if (req->max_threads == 0) {
+		error("task/affinity: job %u.%u has max_threads=0",
+		      req->job_id, req->job_step_id);
+		req->max_threads = 1;
+	}
+	if (req->max_cores == 0) {
+		error("task/affinity: job %u.%u has max_coress=0",
+		      req->job_id, req->job_step_id);
+		req->max_cores = 1;
+	}
+	if (req->max_sockets == 0) {
+		error("task/affinity: job %u.%u has max_sockets=0",
+		      req->job_id, req->job_step_id);
+		req->max_sockets = 1;
+	}
+	num_threads = MIN(req->max_threads, (*hw_threads));
+	for (p = 0; p < num_procs; p++) {
+		if (bit_test(req_map, p) == 0)
+			continue;
+		/* core_bitmap does not include threads, so we
+		 * add them here but limit them to what the job
+		 * requested */
+		for (t = 0; t < num_threads; t++) {
+			uint16_t bit = p * (*hw_threads) + t;
+			bit_set(hw_map, bit);
+		}
+	}
 
-		/* create masks that are at least as large as the reservation */
-		int bitmask_size = bit_size(bitmask);
-		int rotmask_size = MAX(bitmask_size, lllp_reserved_size);
+	/* enforce max_sockets and max_cores limits */
+	_enforce_limits(req, hw_map, *hw_sockets, *hw_cores, *hw_threads);
+	
+	str = (char *)bit_fmt_hexmask(hw_map);
+	debug3("task/affinity: job %u.%u CPU final mask for local node: %s",
+		req->job_id, req->job_step_id, str);
+	xfree(str);
 
-		/* get maximum number of contiguous bits in bitmask */
-		int contig_bits = bit_nset_max_count(bitmask);
+	bit_free(req_map);
+	slurm_cred_free_args(&arg);
+	return hw_map;
+}
 
-		/* make sure the reservation increment is larger than the number
-		 * of contiguous bits in the mask to maintain any properties
-		 * present in the mask (e.g. use both cores on one socket)
-		 */
-		int this_resv_incr = resv_incr;
-		while (this_resv_incr < contig_bits) {
-			this_resv_incr += resv_incr;
-		}
+/* helper function for _expand_masks() */
+static void _blot_mask(bitstr_t *mask, uint16_t blot)
+{
+	uint16_t i, size = 0;
+	int prev = -1;
 
-		/* rotate mask to find the minimum reservation overlap starting
-		 * with the previous rotation value
-		 */
-		rotval  = prev_rotval;
-		debug3("mask %d compute_min_overlap contig:%d", i, contig_bits);
-		_compute_min_overlap(bitmask, resv,
-					rotmask_size, rotval, this_resv_incr,
-					&min_overlap, &min_rotval);
-
-		/* if we didn't find a zero overlap, recheck at a thread
-		 * granularity
-		 */
-		if (min_overlap != 0) {
-		        int prev_resv_incr = this_resv_incr;
-			this_resv_incr = 1;
-			if (this_resv_incr != prev_resv_incr) {
-				int this_min_overlap, this_min_rotval;
-				_compute_min_overlap(bitmask, resv,
-					rotmask_size, rotval, this_resv_incr,
-					&this_min_overlap, &this_min_rotval);
-				if (this_min_overlap < min_overlap) {
-					min_overlap = this_min_overlap;
-					min_rotval  = this_min_rotval;
-				}
+	if (!mask)
+		return;
+	size = bit_size(mask);
+	for (i = 0; i < size; i++) {
+		if (bit_test(mask, i)) {
+			/* fill in this blot */
+			uint16_t start = (i / blot) * blot;
+			if (start != prev) {
+				bit_nset(mask, start, start+blot-1);
+				prev = start;
 			}
 		}
-
-		rotval = min_rotval;	/* readjust for the minimum overlap */
-		if (rotval != 0) {
-			bitstr_t *newmask = bit_rotate_copy(bitmask, rotval,
-							    rotmask_size);
-			bit_free(masks[i]);
-			masks[i] = newmask;
-		}
-
-		debug3("  mask %d => rotval %d", i, rotval);
-		/* accepted current mask, add to copy of the reservations */
-		physmask = _lllp_map_abstract_mask(masks[i]);
-		_cr_update_reservation(1, resv, physmask);
-		bit_free(physmask);
-		prev_rotval = rotval;
 	}
-	xfree(resv);
 }
 
-/*
- * _lllp_map_abstract_mask
- *
- * Map one abstract block mask to a physical machine mask
- *
- * IN - mask to map
- * OUT - mapped mask (storage allocated in this routine)
- */
-static bitstr_t *_lllp_map_abstract_mask (bitstr_t *bitmask)
+/* foreach mask, expand the mask around the set bits to include the
+ * complete resource to which the set bits are to be bound */
+static void _expand_masks(uint16_t cpu_bind_type, const uint32_t maxtasks,
+			  bitstr_t **masks, uint16_t hw_sockets,
+			  uint16_t hw_cores, uint16_t hw_threads)
 {
-    	int i, bit;
-	int num_bits = bit_size(bitmask);
-	bitstr_t *newmask = bit_alloc(num_bits);
+	uint32_t i;
 
-	/* remap to physical machine */
-	for (i = 0; i < num_bits; i++) {
-		if (bit_test(bitmask,i)) {
-			bit = BLOCK_MAP(i);
-			bit_set(newmask, bit);
+	if (cpu_bind_type & CPU_BIND_TO_THREADS)
+		return;
+	if (cpu_bind_type & CPU_BIND_TO_CORES) {
+		if (hw_threads < 2)
+			return;
+		for (i = 0; i < maxtasks; i++) {
+			_blot_mask(masks[i], hw_threads);
+		}
+		return;
+	}
+	if (cpu_bind_type & CPU_BIND_TO_SOCKETS) {
+		if (hw_threads*hw_cores < 2)
+			return;
+		for (i = 0; i < maxtasks; i++) {
+			_blot_mask(masks[i], hw_threads*hw_cores);
 		}
+		return;
 	}
-	return newmask;
 }
 
-/*
- * _lllp_map_abstract_masks
+/* 
+ * _task_layout_lllp_multi
  *
- * Map an array of abstract block masks to physical machine masks
+ * A variant of _task_layout_lllp_cyclic for use with allocations having 
+ * more than one CPU per task, put the tasks as close as possible (fill 
+ * core rather than going next socket for the extra task)
  *
- * IN- maximum number of tasks
- * IN/OUT- array of masks
  */
-static void _lllp_map_abstract_masks(const uint32_t maxtasks,
-				     bitstr_t **masks)
+static int _task_layout_lllp_multi(launch_tasks_request_msg_t *req, 
+				    uint32_t node_id, bitstr_t ***masks_p)
 {
-    	int i;
-	debug3("_lllp_map_abstract_masks");
+	int last_taskcount = -1, taskcount = 0;
+	uint16_t c, i, s, t, hw_sockets = 0, hw_cores = 0, hw_threads = 0;
+	uint16_t num_threads, num_cores, num_sockets;
+	int size, max_tasks = req->tasks_to_launch[(int)node_id];
+	int max_cpus = max_tasks * req->cpus_per_task;
+	bitstr_t *avail_map;
+	bitstr_t **masks = NULL;
 	
-	for (i = 0; i < maxtasks; i++) { 
-		bitstr_t *bitmask = masks[i];
-	    	if (bitmask) {
-			bitstr_t *newmask = _lllp_map_abstract_mask(bitmask);
-			bit_free(bitmask);
-			masks[i] = newmask;
+	info ("_task_layout_lllp_multi ");
+
+	avail_map = _get_avail_map(req, &hw_sockets, &hw_cores, &hw_threads);
+	if (!avail_map)
+		return SLURM_ERROR;
+	
+	*masks_p = xmalloc(max_tasks * sizeof(bitstr_t*));
+	masks = *masks_p;
+	
+	size = bit_set_count(avail_map);
+	if (size < max_tasks) {
+		error("task/affinity: only %d bits in avail_map for %d tasks!",
+		      size, max_tasks);
+		bit_free(avail_map);
+		return SLURM_ERROR;
+	}
+	if (size < max_cpus) {
+		/* Possible result of overcommit */
+		i = size / max_tasks;
+		info("task/affinity: reset cpus_per_task from %d to %d",
+		     req->cpus_per_task, i);
+		req->cpus_per_task = i;
+	}
+	
+	size = bit_size(avail_map);
+	num_sockets = MIN(req->max_sockets, hw_sockets);
+	num_cores   = MIN(req->max_cores, hw_cores);
+	num_threads = MIN(req->max_threads, hw_threads);
+	i = 0;
+	while (taskcount < max_tasks) {
+		if (taskcount == last_taskcount)
+			fatal("_task_layout_lllp_multi failure");
+		last_taskcount = taskcount; 
+		for (s = 0; s < hw_sockets; s++) {
+			for (c = 0; c < hw_cores; c++) {
+				for (t = 0; t < num_threads; t++) {
+					uint16_t bit = s*(hw_cores*hw_threads) +
+							c*(hw_threads) + t;
+					if (bit_test(avail_map, bit) == 0)
+						continue;
+					if (masks[taskcount] == NULL)
+						masks[taskcount] =
+						    (bitstr_t *)bit_alloc(size);
+					bit_set(masks[taskcount], bit);
+					if (++i < req->cpus_per_task)
+						continue;
+					i = 0;
+					if (++taskcount >= max_tasks)
+						break;
+				}
+				if (taskcount >= max_tasks)
+					break;
+			}
+			if (taskcount >= max_tasks)
+				break;
 		}
 	}
+	bit_free(avail_map);
+	
+	/* last step: expand the masks to bind each task
+	 * to the requested resource */
+	_expand_masks(req->cpu_bind_type, max_tasks, masks,
+			hw_sockets, hw_cores, hw_threads);
+
+	return SLURM_SUCCESS;
 }
 
 /* 
- * _lllp_generate_cpu_bind
+ * _task_layout_lllp_cyclic
  *
- * Generate the cpu_bind type and string given an array of bitstr_t masks
- *
- * IN/OUT- job launch request (cpu_bind_type and cpu_bind updated)
- * IN- maximum number of tasks
- * IN- array of masks
- */
-static void _lllp_generate_cpu_bind(launch_tasks_request_msg_t *req,
-				    const uint32_t maxtasks,
-				    bitstr_t **masks)
-{
-    	int i, num_bits=0, masks_len;
-	bitstr_t *bitmask;
-	bitoff_t charsize;
-	char *masks_str = NULL;
-	char buf_type[100];
-
-	for (i = 0; i < maxtasks; i++) { 
-		bitmask = masks[i];
-	    	if (bitmask) {
-			num_bits = bit_size(bitmask);
-			break;
-		}
-	}
-	charsize = (num_bits + 3) / 4;		/* ASCII hex digits */
-	charsize += 3;				/* "0x" and trailing "," */
-	masks_len = maxtasks * charsize + 1;	/* number of masks + null */
-
-	debug3("_lllp_generate_cpu_bind %d %d %d", maxtasks, charsize, masks_len);
-
-	masks_str = xmalloc(masks_len);
-	masks_len = 0;
-	for (i = 0; i < maxtasks; i++) {
-	    	char *str;
-		int curlen;
-		bitmask = masks[i];
-	    	if (bitmask == NULL) {
-			continue;
-		}
-		str = bit_fmt_hexmask(bitmask);
-		curlen = strlen(str) + 1;
-
-		if (masks_len > 0) masks_str[masks_len-1]=',';
-		strncpy(&masks_str[masks_len], str, curlen);
-		masks_len += curlen;
-		xassert(masks_str[masks_len] == '\0');
-		xfree(str);
-	}
-
-	if (req->cpu_bind) {
-	    	xfree(req->cpu_bind);
-	}
-	if (masks_str[0] != '\0') {
-		req->cpu_bind = masks_str;
-		req->cpu_bind_type |= CPU_BIND_MASK; 
-	} else {
-		req->cpu_bind = NULL;
-		req->cpu_bind_type &= ~CPU_BIND_VERBOSE;
-	}
-
-	/* clear mask generation bits */
-	req->cpu_bind_type &= ~CPU_BIND_TO_THREADS;
-	req->cpu_bind_type &= ~CPU_BIND_TO_CORES;
-	req->cpu_bind_type &= ~CPU_BIND_TO_SOCKETS;
-
-	slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
-	info("_lllp_generate_cpu_bind jobid [%u]: %s, %s",
-	     req->job_id, buf_type, masks_str);
-}
-
-
-static void _lllp_free_masks (launch_tasks_request_msg_t *req,
-			      const uint32_t maxtasks,
-			      bitstr_t **masks)
-{
-    	int i;
-	bitstr_t *bitmask;
-	for (i = 0; i < maxtasks; i++) { 
-		bitmask = masks[i];
-	    	if (bitmask) {
-			bit_free(bitmask);
-		}
-	}
-}
-
-/* 
- * _task_layout_lllp_init performs common initialization required by:
- *	_task_layout_lllp_cyclic
- *	_task_layout_lllp_block
- *	_task_layout_lllp_plane
- */
-static int _task_layout_lllp_init(launch_tasks_request_msg_t *req, 
-				  const uint32_t maxtasks,
-				  bitstr_t ***masks_p,
-				  bool *bind_to_exact_socket,
-				  bool *bind_to_exact_core,
-				  bool *bind_to_exact_thread,
-				  uint16_t *usable_cpus,
-				  uint16_t *usable_sockets, 
-				  uint16_t *usable_cores,
-				  uint16_t *usable_threads,
-				  uint16_t *hw_sockets, 
-				  uint16_t *hw_cores,
-				  uint16_t *hw_threads,
-				  uint16_t *avail_cpus)
-{
-	int min_sockets = 1, min_cores = 1;
-	uint16_t alloc_cores[conf->sockets];
-
-	if (req->cpu_bind_type & CPU_BIND_TO_THREADS) {
-		/* Default: in here in case we decide to change the
-		 * default */
-		info ("task_layout cpu_bind_type CPU_BIND_TO_THREADS ");
-	} else if (req->cpu_bind_type & CPU_BIND_TO_CORES) {
-		*bind_to_exact_thread = false;
-		info ("task_layout cpu_bind_type CPU_BIND_TO_CORES ");
-	} else if (req->cpu_bind_type & CPU_BIND_TO_SOCKETS) {
-		*bind_to_exact_thread = false;
-		*bind_to_exact_core   = false;
-		info ("task_layout cpu_bind_type CPU_BIND_TO_SOCKETS");
-	}
-
-	_get_resources_this_node(usable_cpus, usable_sockets, usable_cores,
-				 usable_threads, alloc_cores, req->job_id);
-
-	*hw_sockets = *usable_sockets;
-	*hw_cores   = *usable_cores;
-	*hw_threads = *usable_threads;
-
-	*avail_cpus = slurm_get_avail_procs(req->max_sockets, 
-					    req->max_cores, 
-					    req->max_threads, 
-					    min_sockets,
-					    min_cores,
-					    req->cpus_per_task,
-					    req->ntasks_per_node,
-					    req->ntasks_per_socket,
-					    req->ntasks_per_core,
-					    usable_cpus, usable_sockets,
-					    usable_cores, usable_threads,
-					    alloc_cores, conf->cr_type,
-					    req->job_id, conf->hostname);
-	/* Allocate masks array */
-	*masks_p = xmalloc(maxtasks * sizeof(bitstr_t*));
-	return SLURM_SUCCESS;
-}
-
-/* _get_resources_this_node determines counts for already allocated
- * resources (currently sockets and lps) for this node.  
- *
- * Only used when cons_res (Consumable Resources) is enabled with
- * CR_Socket, CR_Cores, or CR_CPU.
- *
- * OUT- Number of allocated sockets on this node
- * OUT- Number of allocated logical processors on this node 
- * 
- */
-static void _get_resources_this_node(uint16_t *cpus,
-				     uint16_t *sockets,
-				     uint16_t *cores,
-				     uint16_t *threads,
-				     uint16_t *alloc_cores,
-	                             uint32_t jobid)
-{
-	int bit_index = 0;
-	int i, j, k;
-
-	/* FIX for heterogeneous socket/core/thread count per system
-	 * in future releases */
-	*cpus    = conf->cpus;
-	*sockets = conf->sockets;
-	*cores   = conf->cores;
-	*threads = conf->threads;
-
-	for(i = 0; i < *sockets; i++)
-		alloc_cores[i] = 0;
-
-	for(i = 0; i < *sockets; i++) {
-		for(j = 0; j < *cores; j++) {
-			for(k = 0; k < *threads; k++) {
-				info("jobid %u lllp_reserved[%d]=%d", jobid, 
-				     bit_index, lllp_reserved[bit_index]);
-				if(lllp_reserved[bit_index] > 0) {
-					if (k == 0) {
-						alloc_cores[i]++;
-					}
-				}
-				bit_index++;
-			}
-		}
-	}
-		
-	xassert(bit_index == (*sockets * *cores * *threads));
-
-#if(0)
-	for (i = 0; i < *sockets; i++)
-		info("_get_resources jobid:%u hostname:%s socket id:%d cores:%u", 
-		     jobid, conf->hostname, i, alloc_cores[i]);
-#endif
-}
-	
-/* 
- * _task_layout_lllp_cyclic
- *
- * task_layout_lllp_cyclic creates a cyclic distribution at the
- * lowest level of logical processor which is either socket, core or
- * thread depending on the system architecture. The Cyclic algorithm
- * is the same as the the Cyclic distribution performed in srun.
+ * task_layout_lllp_cyclic creates a cyclic distribution at the
+ * lowest level of logical processor which is either socket, core or
+ * thread depending on the system architecture. The Cyclic algorithm
+ * is the same as the the Cyclic distribution performed in srun.
  *
  *  Distribution at the lllp: 
  *  -m hostfile|plane|block|cyclic:block|cyclic 
@@ -717,70 +816,81 @@ static void _get_resources_this_node(uint16_t *cpus,
  *
  */
 static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req, 
-				    const uint32_t *gtid,
-				    const uint32_t maxtasks,
-				    bitstr_t ***masks_p)
+				    uint32_t node_id, bitstr_t ***masks_p)
 {
-	int retval, i, last_taskcount = -1, taskcount = 0, taskid = 0;
-	uint16_t socket_index = 0, core_index = 0, thread_index = 0;
-	uint16_t hw_sockets = 0, hw_cores = 0, hw_threads = 0;
-	uint16_t usable_cpus = 0, avail_cpus = 0;
-	uint16_t usable_sockets = 0, usable_cores = 0, usable_threads = 0;
-	
+	int last_taskcount = -1, taskcount = 0;
+	uint16_t c, i, s, t, hw_sockets = 0, hw_cores = 0, hw_threads = 0;
+	uint16_t num_threads, num_cores, num_sockets;
+	int size, max_tasks = req->tasks_to_launch[(int)node_id];
+	int max_cpus = max_tasks * req->cpus_per_task;
+	bitstr_t *avail_map;
 	bitstr_t **masks = NULL;
-	bool bind_to_exact_socket = true;
-	bool bind_to_exact_core   = true;
-	bool bind_to_exact_thread = true;
 	
 	info ("_task_layout_lllp_cyclic ");
 
-	retval = _task_layout_lllp_init(req, maxtasks, masks_p,
-					&bind_to_exact_socket, 
-					&bind_to_exact_core, 
-					&bind_to_exact_thread,
-					&usable_cpus, 
-					&usable_sockets, 
-					&usable_cores, 
-					&usable_threads,
-					&hw_sockets, 
-					&hw_cores, 
-					&hw_threads, 
-					&avail_cpus);
-	if (retval != SLURM_SUCCESS)
-		return retval;
+	avail_map = _get_avail_map(req, &hw_sockets, &hw_cores, &hw_threads);
+	if (!avail_map)
+		return SLURM_ERROR;
+	
+	*masks_p = xmalloc(max_tasks * sizeof(bitstr_t*));
 	masks = *masks_p;
+	
+	size = bit_set_count(avail_map);
+	if (size < max_tasks) {
+		error("task/affinity: only %d bits in avail_map for %d tasks!",
+		      size, max_tasks);
+		bit_free(avail_map);
+		return SLURM_ERROR;
+	}
+	if (size < max_cpus) {
+		/* Possible result of overcommit */
+		i = size / max_tasks;
+		info("task/affinity: reset cpus_per_task from %d to %d",
+		     req->cpus_per_task, i);
+		req->cpus_per_task = i;
+	}
 
-	for (i=0; taskcount<maxtasks; i++) {
-		if (taskcount == last_taskcount) {
-			error("_task_layout_lllp_cyclic failure");
-			return SLURM_ERROR;
-		}
+	size = bit_size(avail_map);
+	num_sockets = MIN(req->max_sockets, hw_sockets);
+	num_cores   = MIN(req->max_cores, hw_cores);
+	num_threads = MIN(req->max_threads, hw_threads);
+	i = 0;
+	while (taskcount < max_tasks) {
+		if (taskcount == last_taskcount)
+			fatal("_task_layout_lllp_cyclic failure");
 		last_taskcount = taskcount; 
-		for (thread_index=0; thread_index<usable_threads; thread_index++) {
-			for (core_index=0; core_index<usable_cores; core_index++) {
-				for (socket_index=0; socket_index<usable_sockets; 
-						     socket_index++) {
-					bitstr_t *bitmask = NULL;
-					taskid = gtid[taskcount];
-					_single_mask(hw_sockets, 
-						     hw_cores, 
-						     hw_threads,
-						     socket_index, 
-						     core_index, 
-						     thread_index, 
-						     bind_to_exact_socket, 
-						     bind_to_exact_core,
-						     bind_to_exact_thread, 
-						     &bitmask);
-					xassert(masks[taskcount] == NULL);
-					masks[taskcount] = bitmask;
-					if (++taskcount >= maxtasks)
-						goto fini;
+		for (t = 0; t < num_threads; t++) {
+			for (c = 0; c < hw_cores; c++) {
+				for (s = 0; s < hw_sockets; s++) {
+					uint16_t bit = s*(hw_cores*hw_threads) +
+							c*(hw_threads) + t;
+					if (bit_test(avail_map, bit) == 0)
+						continue;
+					if (masks[taskcount] == NULL)
+						masks[taskcount] =
+						    (bitstr_t *)bit_alloc(size);
+					bit_set(masks[taskcount], bit);
+					if (++i < req->cpus_per_task)
+						continue;
+					i = 0;
+					if (++taskcount >= max_tasks)
+						break;
 				}
+				if (taskcount >= max_tasks)
+					break;
 			}
+			if (taskcount >= max_tasks)
+				break;
 		}
 	}
- fini:	return SLURM_SUCCESS;
+	bit_free(avail_map);
+	
+	/* last step: expand the masks to bind each task
+	 * to the requested resource */
+	_expand_masks(req->cpu_bind_type, max_tasks, masks,
+			hw_sockets, hw_cores, hw_threads);
+
+	return SLURM_SUCCESS;
 }
 
 /* 
@@ -806,845 +916,249 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req,
  *
  */
 static int _task_layout_lllp_block(launch_tasks_request_msg_t *req, 
-				   const uint32_t *gtid,
-				   const uint32_t maxtasks,
-				   bitstr_t ***masks_p)
+				   uint32_t node_id, bitstr_t ***masks_p)
 {
-	int retval, j, k, l, m, last_taskcount = -1, taskcount = 0, taskid = 0;
-	int over_subscribe  = 0, space_remaining = 0;
-	uint16_t core_index = 0, thread_index = 0;
+	int c, i, j, t, size, last_taskcount = -1, taskcount = 0;
 	uint16_t hw_sockets = 0, hw_cores = 0, hw_threads = 0;
-	uint16_t usable_cpus = 0, avail_cpus = 0;
-	uint16_t usable_sockets = 0, usable_cores = 0, usable_threads = 0;
-
+	uint16_t num_sockets, num_cores, num_threads;
+	int max_tasks = req->tasks_to_launch[(int)node_id];
+	int max_cpus = max_tasks * req->cpus_per_task;
+	int *task_array;
+	bitstr_t *avail_map;
 	bitstr_t **masks = NULL;
-	bool bind_to_exact_socket = true;
-	bool bind_to_exact_core   = true;
-	bool bind_to_exact_thread = true;
 
 	info("_task_layout_lllp_block ");
 
-	retval = _task_layout_lllp_init(req, maxtasks, masks_p,
-					&bind_to_exact_socket, 
-					&bind_to_exact_core, 
-					&bind_to_exact_thread,
-					&usable_cpus, 
-					&usable_sockets, 
-					&usable_cores, 
-					&usable_threads,
-					&hw_sockets, 
-					&hw_cores, 
-					&hw_threads, 
-					&avail_cpus);
-	if (retval != SLURM_SUCCESS) {
-		return retval;
+	avail_map = _get_avail_map(req, &hw_sockets, &hw_cores, &hw_threads);
+	if (!avail_map) {
+		return SLURM_ERROR;
 	}
+
+	size = bit_set_count(avail_map);
+	if (size < max_tasks) {
+		error("task/affinity: only %d bits in avail_map for %d tasks!",
+		      size, max_tasks);
+		bit_free(avail_map);
+		return SLURM_ERROR;
+	}
+	if (size < max_cpus) {
+		/* Possible result of overcommit */
+		i = size / max_tasks;
+		info("task/affinity: reset cpus_per_task from %d to %d",
+		     req->cpus_per_task, i);
+		req->cpus_per_task = i;
+	}
+	size = bit_size(avail_map);
+
+	*masks_p = xmalloc(max_tasks * sizeof(bitstr_t*));
 	masks = *masks_p;
 
-	if (_init_lllp() != SLURM_SUCCESS) {
-		error("In lllp_block: _init_lllp() != SLURM_SUCCESS");
+	task_array = xmalloc(size * sizeof(int));
+	if (!task_array) {
+		error("In lllp_block: task_array memory error");
+		bit_free(avail_map);
 		return SLURM_ERROR;
 	}
 	
-	while(taskcount < maxtasks) {
+	/* block distribution with oversubsciption */
+	num_sockets = MIN(req->max_sockets, hw_sockets);
+	num_cores   = MIN(req->max_cores, hw_cores);
+	num_threads = MIN(req->max_threads, hw_threads);
+	c = 0;
+	while(taskcount < max_tasks) {
 		if (taskcount == last_taskcount) {
-			error("_task_layout_lllp_block failure");
-			return SLURM_ERROR;
+			fatal("_task_layout_lllp_block infinite loop");
 		}
 		last_taskcount = taskcount;
-		for (j=0; j<usable_sockets; j++) {
-			for(core_index=0; core_index < usable_cores; core_index++) {
-				if((core_index < usable_cores) || (over_subscribe)) {
-					for(thread_index=0; thread_index<usable_threads; thread_index++) {
-						if((thread_index < usable_threads) || (over_subscribe)) {
-							lllp_tasks->sockets[j].cores[core_index]
-								.threads[thread_index].tasks++;
-							taskcount++;
-							if((thread_index+1) < usable_threads)
-								space_remaining = 1;
-							if(maxtasks <= taskcount) break;
-						}
-						if(maxtasks <= taskcount) break;
-						if (!space_remaining) {
-							over_subscribe = 1;
-						} else {
-							space_remaining = 0;
-						}
-					}
-				}
-				if(maxtasks <= taskcount) break;
-				if((core_index+1) < usable_cores)
-					space_remaining = 1;
-				if (!space_remaining) {
-					over_subscribe = 1;
-				} else {
-					space_remaining = 0;
-				}
-			}
-			if(maxtasks <= taskcount) break;
-			if (!space_remaining) {
-				over_subscribe = 1;
-			} else {
-				space_remaining = 0;
-			}
+		/* the abstract map is already laid out in block order,
+		 * so just iterate over it
+		 */
+		for (i = 0; i < size; i++) {
+			/* skip unrequested threads */
+			if (i%hw_threads >= num_threads)
+				continue;
+			/* skip unavailable resources */
+			if (bit_test(avail_map, i) == 0)
+				continue;
+			/* if multiple CPUs per task, only
+			 * count the task on the first CPU */
+			if (c == 0)
+				task_array[i] += 1;
+			if (++c < req->cpus_per_task)
+				continue;
+			c = 0;
+			if (++taskcount >= max_tasks)
+				break;
 		}
 	}
-	
-	/* Distribute the tasks and create masks for the task
-	 * affinity plug-in */
-	taskid = 0;
+	/* Distribute the tasks and create per-task masks that only
+	 * contain the first CPU. Note that unused resources
+	 * (task_array[i] == 0) will get skipped */
 	taskcount = 0;
-	for (j=0; j<usable_sockets; j++) {
-		for (k=0; k<usable_cores; k++) {
-			for (m=0; m<usable_threads; m++) {
-				for (l=0; l<lllp_tasks->sockets[j]
-					     .cores[k].threads[m].tasks; l++) {
-					bitstr_t *bitmask = NULL;
-					taskid = gtid[taskcount];
-					_single_mask(hw_sockets, 
-						     hw_cores, 
-						     hw_threads,
-						     j, k, m, 
-						     bind_to_exact_socket, 
-						     bind_to_exact_core, 
-						     bind_to_exact_thread,
-						     &bitmask);
-					xassert(masks[taskcount] == NULL);
-					xassert(taskcount < maxtasks);
-					masks[taskcount] = bitmask;
-					taskcount++;
-				}
-			}
+	for (i = 0; i < size; i++) {
+		for (t = 0; t < task_array[i]; t++) {
+			if (masks[taskcount] == NULL)
+				masks[taskcount] = (bitstr_t *)bit_alloc(size);
+			bit_set(masks[taskcount++], i);
 		}
 	}
-	
-	_print_tasks_per_lllp ();
-	_cleanup_lllp();
-
-	return SLURM_SUCCESS;
-}
-
-/* 
- * _task_layout_lllp_plane
- *
- * task_layout_lllp_plane will create a block cyclic distribution at
- * the lowest level of logical processor which is either socket, core or
- * thread depending on the system architecture. The Block algorithm is
- * different from the Block distribution performed at the node level
- * in that this algorithm does not load-balance the tasks across the
- * resources but uses the block size (i.e. plane size) specified by
- * the user.
- *
- *  Distribution at the lllp: 
- *  -m hostfile|plane|block|cyclic:block|cyclic 
- * 
- * The first distribution "hostfile|plane|block|cyclic" is computed
- * in srun. The second distribution "plane|block|cyclic" is computed
- * locally by each slurmd.
- *  
- * The input to the lllp distribution algorithms is the gids
- * (tasksids) generated for the local node.
- *  
- * The output is a mapping of the gids onto logical processors
- * (thread/core/socket)  with is expressed in cpu_bind masks.
- *
- */
-static int _task_layout_lllp_plane(launch_tasks_request_msg_t *req, 
-				   const uint32_t *gtid,
-				   const uint32_t maxtasks,
-				   bitstr_t ***masks_p)
-{
-	int retval, j, k, l, m, taskid = 0, last_taskcount = -1, next = 0;
-	uint16_t core_index = 0, thread_index = 0;
-	uint16_t hw_sockets = 0, hw_cores = 0, hw_threads = 0;
-	uint16_t usable_cpus = 0, avail_cpus = 0;
-	uint16_t usable_sockets = 0, usable_cores = 0, usable_threads = 0;
-	uint16_t plane_size = req->plane_size;
-	int max_plane_size = 0;
-
-	bitstr_t **masks = NULL; 
-	bool bind_to_exact_socket = true;
-	bool bind_to_exact_core   = true;
-	bool bind_to_exact_thread = true;
-
-	info("_task_layout_lllp_plane %d ", req->plane_size);
-
-	retval = _task_layout_lllp_init(req, maxtasks, masks_p,
-					&bind_to_exact_socket, 
-					&bind_to_exact_core, 
-					&bind_to_exact_thread,
-					&usable_cpus, 
-					&usable_sockets, 
-					&usable_cores, 
-					&usable_threads,
-					&hw_sockets, 
-					&hw_cores, 
-					&hw_threads, 
-					&avail_cpus);
-	if (retval != SLURM_SUCCESS) {
-		return retval;
-	}
-	masks = *masks_p;
-
-	max_plane_size = (plane_size > usable_cores) ? plane_size : usable_cores;
-	next = 0;
-
-	for (j=0; next<maxtasks; j++) {
-		if (next == last_taskcount) {
-			error("_task_layout_lllp_plan failure");
-			return SLURM_ERROR;
-		}
-		last_taskcount = next;
-		for (k=0; k<usable_sockets; k++) {
-			max_plane_size = (plane_size > usable_cores) ? plane_size : usable_cores;
-			for (m=0; m<max_plane_size; m++) {
-				if(next>=maxtasks)
-					break;
-				core_index = m%usable_cores;				
-				if(m<usable_cores) {
-					for(l=0; l<usable_threads;l++) {
-						if(next>=maxtasks)
-							break;
-						thread_index = l%usable_threads;
-						
-						if(thread_index<usable_threads) {
-							bitstr_t *bitmask = NULL;
-							taskid = gtid[next];
-							_single_mask(hw_sockets,
-								     hw_cores, 
-								     hw_threads,
-								     k, 
-								     core_index, 
-								     thread_index, 
-								     bind_to_exact_socket, 
-								     bind_to_exact_core,
-								     bind_to_exact_thread, 
-								     &bitmask);
-							xassert(masks[next] == NULL);
-							xassert(next < maxtasks);
-							masks[next] = bitmask;
-							next++;
-						}
-					}
+	/* now set additional CPUs for cpus_per_task > 1 */
+	for (t=0; t<max_tasks && req->cpus_per_task>1; t++) {
+		if (!masks[t])
+			continue;
+		for (i = 0; i < size; i++) {
+			if (bit_test(masks[t], i) == 0)
+				continue;
+			for (j=i+1,c=1; j<size && c<req->cpus_per_task;j++) {
+				if (bit_test(avail_map, j) == 0)
+					continue;
+				bit_set(masks[t], j);
+				c++;
+			}
+			if (c < req->cpus_per_task) {
+				/* we haven't found all of the CPUs for this
+				 * task, so we'll wrap the search to cover the
+				 * whole node */
+				for (j=0; j<i && c<req->cpus_per_task; j++) {
+					if (bit_test(avail_map, j) == 0)
+						continue;
+					bit_set(masks[t], j);
+					c++;
 				}
 			}
 		}
 	}
-	
-	return SLURM_SUCCESS;
-}
-
-/*
- * slurm job state information
- * tracks jobids for which all future credentials have been revoked
- *  
- */
-typedef struct {
-	uint32_t jobid;
-	uint32_t jobstepid;
-	uint32_t numtasks;
-	cpu_bind_type_t cpu_bind_type;
-	char *cpu_bind;
-} lllp_job_state_t;
-
-static lllp_job_state_t *
-_lllp_job_state_create(uint32_t job_id, uint32_t job_step_id,
-		       cpu_bind_type_t cpu_bind_type, char *cpu_bind,
-		       uint32_t numtasks)
-{
-	lllp_job_state_t *j;
-	debug3("creating job [%u.%u] lllp state", job_id, job_step_id);
-
-	j = xmalloc(sizeof(lllp_job_state_t));
-
-	j->jobid	 = job_id;
-	j->jobstepid	 = job_step_id;
-	j->numtasks	 = numtasks;
-	j->cpu_bind_type = cpu_bind_type;
-	j->cpu_bind	 = NULL;
-	if (cpu_bind) {
-		j->cpu_bind = xmalloc(strlen(cpu_bind) + 1);
-		strcpy(j->cpu_bind, cpu_bind);
-	}
-	return j;
-}
-
-static void
-_lllp_job_state_destroy(lllp_job_state_t *j)
-{
-	debug3("destroying job [%u.%u] lllp state", j->jobid, j->jobstepid);
-        if (j) {
-		if (j->cpu_bind)
-			xfree(j->cpu_bind);
-	    	xfree(j);
-	}
-}
-
-#if 0
-/* Note: now inline in cr_release_lllp to support multiple job steps */
-static lllp_job_state_t *
-_find_lllp_job_state(uint32_t jobid)
-{
-        ListIterator  i = NULL;
-        lllp_job_state_t  *j = NULL;
-
-        i = list_iterator_create(lllp_ctx->job_list);
-        while ((j = list_next(i)) && (j->jobid != jobid)) {;}
-        list_iterator_destroy(i);
-        return j;
-}
-
-static void
-_remove_lllp_job_state(uint32_t jobid)
-{
-        ListIterator  i = NULL;
-        lllp_job_state_t  *j = NULL;
-
-        i = list_iterator_create(lllp_ctx->job_list);
-        while ((j = list_next(i)) && (j->jobid != jobid)) {;}
-	if (j) {
-	    	list_delete_item(i);
-	}
-        list_iterator_destroy(i);
-}
-#endif
-
-void
-_append_lllp_job_state(lllp_job_state_t *j)
-{
-        list_append(lllp_ctx->job_list, j);
-}
-
-void
-lllp_ctx_destroy(void)
-{
-	xfree(lllp_reserved);
 
-    	if (lllp_ctx == NULL)
-		return;
-
-        xassert(lllp_ctx->magic == LLLP_CTX_MAGIC);
-
-        slurm_mutex_lock(&lllp_ctx->mutex);
-	list_destroy(lllp_ctx->job_list);
-
-        xassert(lllp_ctx->magic = ~LLLP_CTX_MAGIC);
-
-        slurm_mutex_unlock(&lllp_ctx->mutex);
-        slurm_mutex_destroy(&lllp_ctx->mutex);
-
-    	xfree(lllp_ctx);
-}
-
-void
-lllp_ctx_alloc(void)
-{
-	uint32_t num_lllp;
+	xfree(task_array);
+	bit_free(avail_map);
 
-	debug3("alloc LLLP");
+	/* last step: expand the masks to bind each task
+	 * to the requested resource */
+	_expand_masks(req->cpu_bind_type, max_tasks, masks,
+			hw_sockets, hw_cores, hw_threads);
 
-	xfree(lllp_reserved);
-	num_lllp = conf->sockets * conf->cores * conf->threads;
-	if (conf->cpus > num_lllp) {
-	    	num_lllp = conf->cpus;
-	}
-	lllp_reserved_size = num_lllp;
-	lllp_reserved = xmalloc(num_lllp * sizeof(uint32_t));
-
-	if (lllp_ctx) {
-		lllp_ctx_destroy();
-	}
-
-        lllp_ctx = xmalloc(sizeof(*lllp_ctx));
-
-        slurm_mutex_init(&lllp_ctx->mutex);
-        slurm_mutex_lock(&lllp_ctx->mutex);
-        
-        lllp_ctx->job_list = NULL;
-	lllp_ctx->job_list = list_create((ListDelF) _lllp_job_state_destroy);
-
-        xassert(lllp_ctx->magic = LLLP_CTX_MAGIC);
-        
-        slurm_mutex_unlock(&lllp_ctx->mutex);
-}
-
-static int _init_lllp(void)
-{
-	int j = 0, k = 0;
-	int usable_sockets, usable_threads, usable_cores;
-
-	debug3("init LLLP");
-
-  	/* FIX for heterogeneous socket/core/thread count per system
-	 * in future releases */
-	usable_sockets = conf->sockets;
-	usable_threads = conf->threads;
-	usable_cores   = conf->cores;
-
-        lllp_tasks = xmalloc(sizeof(struct node_gids));
-	lllp_tasks->sockets =  xmalloc(sizeof(struct socket_gids) * usable_sockets);
-	for (j=0; j<usable_sockets; j++) {
-		lllp_tasks->sockets[j].cores =  xmalloc(sizeof(struct core_gids) * usable_cores);
-		for (k=0; k<usable_cores; k++) {
-			lllp_tasks->sockets[j].cores[k].threads = 
-					xmalloc(sizeof(struct thread_gids) * usable_threads);
-		}
-	}
 	return SLURM_SUCCESS;
 }
 
-int _cleanup_lllp(void)
-{
-	int i=0, j=0;
-  	/* FIX for heterogeneous socket/core/thread count per system in future releases */
-	int usable_sockets = conf->sockets;
-	int usable_cores   = conf->cores;
-
-	for (i=0; i<usable_sockets; i++) { 
-		for (j=0; j<usable_cores; j++) {
-			xfree(lllp_tasks->sockets[i].cores[j].threads);
-		}
-		xfree(lllp_tasks->sockets[i].cores);
-	}
-	xfree(lllp_tasks->sockets);
-	xfree(lllp_tasks);
-	return SLURM_SUCCESS;
-}
-
-void _print_tasks_per_lllp (void)
-{
-	int j=0, k=0, l=0;
-  	/* FIX for heterogeneous socket/core/thread count per system
-	 * in future releases */
-	int usable_sockets = conf->sockets;
-	int usable_cores   = conf->cores;  
-	int usable_threads = conf->threads;
-  
-	info("_print_tasks_per_lllp ");
-  
-	for(j=0; j < usable_sockets; j++) {
-		for(k=0; k < usable_cores; k++) {
-			for(l=0; l < usable_threads; l++) {
-				info("socket %d core %d thread %d tasks %d ", j, k, l, 
-				     lllp_tasks->sockets[j].cores[k].threads[l].tasks);
-			}
-		}
-	}
-}
-
-/* _block_map
+/*
+ * _lllp_map_abstract_mask
  *
- * safely returns a mapped index using a provided block map
+ * Map one abstract block mask to a physical machine mask
  *
- * IN - index to map
- * IN - map to use
+ * IN - mask to map
+ * OUT - mapped mask (storage allocated in this routine)
  */
-static uint16_t _block_map(uint16_t index, uint16_t *map)
+static bitstr_t *_lllp_map_abstract_mask(bitstr_t *bitmask)
 {
-	if (map == NULL) {
-	    	return index;
-	}
-	/* make sure bit falls in map */
-	if (index >= conf->block_map_size) {
-		debug3("wrapping index %u into block_map_size of %u",
-		       index, conf->block_map_size);
-		index = index % conf->block_map_size;
-	}
-	index = map[index];
-	return(index);
-}
-
+    	int i, bit;
+	int num_bits = bit_size(bitmask);
+	bitstr_t *newmask = NULL;
+	newmask = (bitstr_t *) bit_alloc(num_bits);
 
-/*
- * _single_mask
- *
- * This function allocates and returns a abstract (unmapped) bitmask given the
- * machine architecture, the index for the task, and the desired binding type
- */
-static void _single_mask(const uint16_t nsockets, 
-			 const uint16_t ncores, 
-			 const uint16_t nthreads, 
-			 const uint16_t socket_id,
-			 const uint16_t core_id, 
-			 const uint16_t thread_id,
-			 const bool bind_to_exact_socket,
-			 const bool bind_to_exact_core,
-			 const bool bind_to_exact_thread,
-			 bitstr_t **single_mask ) 
-{
-	int socket, core, thread;
-	int nsockets_left, ncores_left, nthreads_left;
-	bitoff_t bit;
-	bitoff_t num_bits = nsockets * ncores * nthreads;
-	bitstr_t * bitmask = bit_alloc(num_bits);
-
-	if (bind_to_exact_socket) {
-		nsockets_left = 1;
-		socket = socket_id;
-	} else {
-		nsockets_left = nsockets;
-		socket = 0;
-	}	
-	while (nsockets_left-- > 0) {
-		if (bind_to_exact_core) {
-			ncores_left = 1;
-			core = core_id;
-		} else {
-			ncores_left = ncores;
-			core = 0;
-		}
-		while (ncores_left-- > 0) {
-			if (bind_to_exact_thread) { 
-				nthreads_left = 1; 
-				thread = thread_id;
-			} else { 
-				nthreads_left = nthreads; 
-				thread = 0; 
-			}
-			while (nthreads_left-- > 0) {
-				bit = SCT_TO_LLLP(socket, core, thread,
-						  ncores, nthreads);
-				if (bit < num_bits)
-					bit_set(bitmask, bit);
-				else
-					info("Invalid job cpu_bind mask");
-				thread++;
-			}
-			core++;
+	/* remap to physical machine */
+	for (i = 0; i < num_bits; i++) {
+		if (bit_test(bitmask,i)) {
+			bit = BLOCK_MAP(i);
+			bit_set(newmask, bit);
 		}
-		socket++;
 	}
-	
-	*single_mask = bitmask;
-
-#if(0)
-	char *str = bit_fmt_hexmask(bitmask);
-	info("_single_mask(Real: %d.%d.%d\t Use:%d.%d.%d\t = %s )",
-	     nsockets, ncores, nthreads,
-	     socket_id, core_id, thread_id, *str);
-	xfree(str);
-#endif
+	return newmask;
 }
 
-
 /*
- * cr_reserve_unit
+ * _lllp_map_abstract_masks
+ *
+ * Map an array of abstract block masks to physical machine masks
  *
- * Given a bitstr_t, expand any set bits to cover the:
- * - entire socket if cr_type == CR_SOCKET or CR_SOCKET_MEMORY to 
- *   create a reservation for the entire socket.
- *     or
- * - entire core if cr_type == CR_CORE or CR_CORE_MEMORY to 
- *   create a reservation for the entire core.
+ * IN- maximum number of tasks
+ * IN/OUT- array of masks
  */
-static void _cr_reserve_unit(bitstr_t *bitmask, int cr_type)
+static void _lllp_map_abstract_masks(const uint32_t maxtasks, bitstr_t **masks)
 {
-	uint32_t nsockets = conf->sockets;
-	uint32_t ncores   = conf->cores;
-	uint32_t nthreads = conf->threads;
-	bitoff_t bit;
-	int socket, core, thread;
-	int nsockets_left, ncores_left, nthreads_left;
-	int num_bits;
-	bool reserve_this_socket = false;
-	bool reserve_this_core   = false;
-
-	if (!bitmask) {
-	    	return;
-	}
-	if ((cr_type != CR_SOCKET) &&
-	    (cr_type != CR_SOCKET_MEMORY) &&
-	    (cr_type != CR_CORE) &&
-	    (cr_type != CR_CORE_MEMORY)) {
-		return;
-	}
-
-	num_bits = bit_size(bitmask);
-	nsockets_left = nsockets;
-	socket = 0;
-	while (nsockets_left-- > 0) {
-		reserve_this_socket = false;
-		ncores_left = ncores;
-		core = 0;
-		while (ncores_left-- > 0) { /* check socket for set bits */
-			reserve_this_core = false;
-			nthreads_left = nthreads; 
-			thread = 0; 
-			while (nthreads_left-- > 0) {
-				bit = SCT_TO_LLLP(socket, core, thread,
-						  ncores, nthreads);
-				/* map abstract to machine */
-				bit = BLOCK_MAP(bit);
-				if (bit < num_bits) {
-					if (bit_test(bitmask,bit)) {
-						reserve_this_socket = true;
-						reserve_this_core   = true;
-						nthreads_left = 0;
-					}
-				} else
-					info("Invalid job cpu_bind mask");
-				thread++;
-			}
-			/* mark entire core */
-			if (((cr_type == CR_CORE) ||
-			     (cr_type == CR_CORE_MEMORY)) &&
-			    reserve_this_core) {
-				nthreads_left = nthreads; 
-				thread = 0; 
-				while (nthreads_left-- > 0) {
-					bit = SCT_TO_LLLP(socket, core, thread,
-							  ncores, nthreads);
-					/* map abstract to machine */
-					bit = BLOCK_MAP(bit);
-					if (bit < num_bits)
-						bit_set(bitmask, bit);
-					else
-						info("Invalid job cpu_bind mask");
-					thread++;
-				}
-			}
-			core++;
-		}
-		/* mark entire socket */
-		if (((cr_type == CR_SOCKET) ||
-		     (cr_type == CR_SOCKET_MEMORY)) &&
-		    reserve_this_socket) {
-			ncores_left = ncores;
-			core = 0;
-			while (ncores_left-- > 0) {
-				nthreads_left = nthreads; 
-				thread = 0; 
-				while (nthreads_left-- > 0) {
-					bit = SCT_TO_LLLP(socket, core, thread,
-							  ncores, nthreads);
-					/* map abstract to machine */
-					bit = BLOCK_MAP(bit);
-					if (bit < num_bits)
-						bit_set(bitmask, bit);
-					else
-						info("Invalid job cpu_bind mask");
-					thread++;
-				}
-				core++;
-			}
+    	int i;
+	debug3("_lllp_map_abstract_masks");
+	
+	for (i = 0; i < maxtasks; i++) { 
+		bitstr_t *bitmask = masks[i];
+	    	if (bitmask) {
+			bitstr_t *newmask = _lllp_map_abstract_mask(bitmask);
+			bit_free(bitmask);
+			masks[i] = newmask;
 		}
-		socket++;
 	}
-	
 }
 
-
-static int _get_bitmap_from_cpu_bind(bitstr_t *bitmap_test,
-				     cpu_bind_type_t cpu_bind_type, 
-				     char *cpu_bind, uint32_t numtasks)
+/* 
+ * _lllp_generate_cpu_bind
+ *
+ * Generate the cpu_bind type and string given an array of bitstr_t masks
+ *
+ * IN/OUT- job launch request (cpu_bind_type and cpu_bind updated)
+ * IN- maximum number of tasks
+ * IN- array of masks
+ */
+static void _lllp_generate_cpu_bind(launch_tasks_request_msg_t *req,
+				    const uint32_t maxtasks, bitstr_t **masks)
 {
-	char opt_dist[10];
-	char *dist_str = NULL;
-	char *dist_str_next = NULL;
-	int bitmap_size = bit_size(bitmap_test);
-	int rc = SLURM_SUCCESS;
-	unsigned int i;
-	dist_str = cpu_bind;
-	
-	if (cpu_bind_type & CPU_BIND_RANK) {
-		for (i=0; i<numtasks; i++) {
-			if (i < bitmap_size)
-				bit_set(bitmap_test, i);
-			else {
-				info("Invalid job cpu_bind mask");
-				return SLURM_ERROR;
-			}
-		}
-		return rc;
-	}
-
-	i = 0;
-	while (dist_str != NULL) {
-		if (i >= numtasks) {	/* no more tasks need masks */
-		    	break;
-		}
-		if (*dist_str == ',') {	/* get next mask from cpu_bind */
-			dist_str++;
-		}
-		dist_str_next = strchr(dist_str, ',');
-
-		if (dist_str_next != NULL) {
-			strncpy(opt_dist, dist_str, dist_str_next-dist_str);
-			opt_dist[dist_str_next-dist_str] = '\0';
-		} else {
-			strcpy(opt_dist, dist_str);
-		}
+    	int i, num_bits=0, masks_len;
+	bitstr_t *bitmask;
+	bitoff_t charsize;
+	char *masks_str = NULL;
+	char buf_type[100];
 
-		/* add opt_dist to bitmap_test */
-		if (cpu_bind_type & CPU_BIND_MASK) {
-			bit_unfmt_hexmask(bitmap_test, opt_dist);
-		} else if (cpu_bind_type & CPU_BIND_MAP) {
-			unsigned int mycpu = 0;
-			if (strncmp(opt_dist, "0x", 2) == 0) {
-				mycpu = strtoul(&(opt_dist[2]), NULL, 16);
-			} else {
-				mycpu = strtoul(opt_dist, NULL, 10);
-			}
-			if (mycpu < bitmap_size)
-				bit_set(bitmap_test, mycpu);
-			else {
-				info("Invalid job cpu_bind mask");
-				rc = SLURM_ERROR;
-				/* continue and try to map remaining tasks */
-			}
+	for (i = 0; i < maxtasks; i++) { 
+		bitmask = masks[i];
+	    	if (bitmask) {
+			num_bits = bit_size(bitmask);
+			break;
 		}
-
-		dist_str = dist_str_next;
-		dist_str_next = NULL;
-	    	i++;
 	}
-	return rc;
-}
+	charsize = (num_bits + 3) / 4;		/* ASCII hex digits */
+	charsize += 3;				/* "0x" and trailing "," */
+	masks_len = maxtasks * charsize + 1;	/* number of masks + null */
 
+	debug3("_lllp_generate_cpu_bind %d %d %d", maxtasks, charsize,
+		masks_len);
 
-static void _cr_update_reservation(int reserve, uint32_t *reserved, 
-				   bitstr_t *mask)
-{
-	int i;
-	int num_bits = bit_size(mask);
-
-	for(i=0; i < num_bits; i++) {
-		if (bit_test(mask,i)) {
-			if (reserve) {
-				/* reserve LLLP */
-				reserved[i]++;
-			} else {
-				/* release LLLP only if non-zero */
-				if (reserved[i] > 0) {
-					reserved[i]--;
-				}
-			}
+	masks_str = xmalloc(masks_len);
+	masks_len = 0;
+	for (i = 0; i < maxtasks; i++) {
+	    	char *str;
+		int curlen;
+		bitmask = masks[i];
+	    	if (bitmask == NULL) {
+			continue;
 		}
-	}
-}
-
-static void _cr_update_lllp(int reserve, uint32_t job_id, uint32_t job_step_id,
-			    cpu_bind_type_t cpu_bind_type, char *cpu_bind,
-			    uint32_t numtasks)
-{
-	int buf_len = 1024;
-	char buffer[buf_len], buftmp[128], buf_action[20];	/* for info */
-
-	if (lllp_reserved == NULL) {
-	    	/* fixme: lllp_reserved not allocated */
-	    	return;
-	}
+		str = (char *)bit_fmt_hexmask(bitmask);
+		curlen = strlen(str) + 1;
 
-	if ((cpu_bind_type & CPU_BIND_RANK) ||
-	    (cpu_bind_type & CPU_BIND_MASK) ||
-	    (cpu_bind_type & CPU_BIND_MAP)) {
-		int i = 0;
-		bitoff_t num_bits = 
-			conf->sockets * conf->cores * conf->threads;
-		bitstr_t * bitmap_test = bit_alloc(num_bits);
-		_get_bitmap_from_cpu_bind(bitmap_test,
-					  cpu_bind_type, cpu_bind, numtasks);
-
-		_cr_reserve_unit(bitmap_test, conf->cr_type);
-
-		_cr_update_reservation(reserve, lllp_reserved, bitmap_test);
-
-		bit_free(bitmap_test);	/* not currently stored with job_id */
-
-		/*** display the updated lllp_reserved counts ***/
-		buffer[0] = '\0';
-		for (i=num_bits-1; i >=0; i--) {
-			sprintf(buftmp, "%d", lllp_reserved[i]);
-			if (strlen(buftmp) + strlen(buffer) + 1 < buf_len) {
-			        if (i < (num_bits-1)) strcat(buffer,",");
-				strcat(buffer,buftmp);
-			} else {/* out of space...indicate incomplete string */
-				buffer[strlen(buffer)-1] = '*';
-				buffer[strlen(buffer)] = '\0';
-				break;
-			}
-		}
-		if (reserve) {
-			strcpy(buf_action, "reserve");
-		} else {
-			strcpy(buf_action, "release");
-		}
-		info("LLLP update %s [%u.%u]: %s (CPU IDs: %d...0)",
-			buf_action, job_id, job_step_id, buffer, num_bits-1);
+		if (masks_len > 0)
+			masks_str[masks_len-1]=',';
+		strncpy(&masks_str[masks_len], str, curlen);
+		masks_len += curlen;
+		xassert(masks_str[masks_len] == '\0');
+		xfree(str);
 	}
-}
-
-
-void cr_reserve_lllp(uint32_t job_id,
-			launch_tasks_request_msg_t *req, uint32_t node_id)
-{
-	lllp_job_state_t *j;
-	cpu_bind_type_t cpu_bind_type = req->cpu_bind_type;
-	char *cpu_bind = req->cpu_bind;
-	uint32_t numtasks = 0;
-	char buf_type[100];
-
-	debug3("reserve LLLP job [%u.%u]\n", job_id, req->job_step_id);
 
-	if (req->tasks_to_launch) {
-		numtasks = req->tasks_to_launch[(int)node_id];
+	if (req->cpu_bind) {
+	    	xfree(req->cpu_bind);
 	}
-
-	slurm_sprint_cpu_bind_type(buf_type, cpu_bind_type);
-	debug3("reserve lllp job [%u.%u]: %d tasks; %s[%d], %s",
-	       job_id, req->job_step_id, numtasks,
-	       buf_type, cpu_bind_type, cpu_bind);
-	if (cpu_bind_type == 0)
-		return;
-
-
-    	/* store job_id, cpu_bind_type, cpu_bind */
-	slurm_mutex_lock(&lllp_ctx->mutex);
-
-	j = _lllp_job_state_create(job_id, req->job_step_id,
-					cpu_bind_type, cpu_bind, numtasks);
-
-	if (j) {
-		_append_lllp_job_state(j);
-		_cr_update_lllp(1, job_id, req->job_step_id,
-				cpu_bind_type, cpu_bind, numtasks);
+	if (masks_str[0] != '\0') {
+		req->cpu_bind = masks_str;
+		req->cpu_bind_type |= CPU_BIND_MASK; 
+	} else {
+		req->cpu_bind = NULL;
+		req->cpu_bind_type &= ~CPU_BIND_VERBOSE;
 	}
-	slurm_mutex_unlock(&lllp_ctx->mutex);
-}
 
-void cr_release_lllp(uint32_t job_id)
-{
-	ListIterator  i = NULL;
-	lllp_job_state_t *j;
-	cpu_bind_type_t cpu_bind_type = 0;
-	char *cpu_bind = NULL;
-	uint32_t numtasks = 0;
-	char buf_type[100];
+	/* clear mask generation bits */
+	req->cpu_bind_type &= ~CPU_BIND_TO_THREADS;
+	req->cpu_bind_type &= ~CPU_BIND_TO_CORES;
+	req->cpu_bind_type &= ~CPU_BIND_TO_SOCKETS;
+	req->cpu_bind_type &= ~CPU_BIND_TO_LDOMS;
 
-	debug3("release LLLP job [%u.*]", job_id);
-
-    	/* retrieve cpu_bind_type, cpu_bind from job_id */
-	slurm_mutex_lock(&lllp_ctx->mutex);
-	i = list_iterator_create(lllp_ctx->job_list);
-	while ((j = list_next(i))) {
-		if (j->jobid == job_id) {
-			cpu_bind_type = j->cpu_bind_type;
-			cpu_bind      = j->cpu_bind;
-			numtasks      = j->numtasks;
-			slurm_sprint_cpu_bind_type(buf_type, cpu_bind_type);
-			debug3("release search lllp job %u: %d tasks; %s[%d], %s",
-			       j->jobid, numtasks,
-			       buf_type, cpu_bind_type, cpu_bind);
-
-			_cr_update_lllp(0, job_id, j->jobstepid,
-					cpu_bind_type, cpu_bind, numtasks);
-
-			/* done with saved state, remove entry */
-			list_delete_item(i);
-		}
-	}
-	list_iterator_destroy(i);
-	slurm_mutex_unlock(&lllp_ctx->mutex);
+	slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
+	info("_lllp_generate_cpu_bind jobid [%u]: %s, %s",
+	     req->job_id, buf_type, masks_str);
 }
 
-
diff --git a/src/plugins/task/affinity/dist_tasks.h b/src/plugins/task/affinity/dist_tasks.h
index 92b82a91f430fb89e5728ea649e4efefddefcc81..b009224c47aefbf6f11b27831dac6a28e3f0ab9f 100644
--- a/src/plugins/task/affinity/dist_tasks.h
+++ b/src/plugins/task/affinity/dist_tasks.h
@@ -1,10 +1,11 @@
 /*****************************************************************************\
  *  Copyright (C) 2006 Hewlett-Packard Development Company, L.P.
  *  Written by Susanne M. Balle, <susanne.balle@hp.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -35,73 +36,10 @@
 #ifndef _SLURMSTEPD_DIST_TASKS_H
 #define _SLURMSTEPD_DIST_TASKS_H
 
-#if HAVE_CONFIG_H
-#  include "config.h"
-#endif
+#include <stdint.h>
+#include "src/common/slurm_protocol_defs.h"
 
-#if HAVE_STRING_H
-#  include <string.h>
-#endif
-
-#include <signal.h>
-#include <sys/types.h>
-#include <grp.h>
-#include <stdlib.h>
-
-#include "src/common/xmalloc.h"
-#include "src/common/xassert.h"
-#include "src/common/xstring.h"
-#include "src/common/fd.h"
-#include "src/common/log.h"
-#include "src/common/eio.h"
-#include "src/common/slurm_protocol_api.h"
-#include "src/common/slurm_resource_info.h"
-
-#include "src/common/bitstring.h"
-
-#include "src/slurmd/slurmd/slurmd.h"
-
-/* Structures to create an object oriented version of a 4-D 
-   infrastructure --> task id mapping [node][cpu][core][taskid] = tid
-*/
-struct thread_gids {
-  int *gids;  /* Taskids for a specific thread */
-  int tasks;  /* Number of tasks for a specific thread */
-};
-
-struct core_gids {
-  struct thread_gids *threads; /* Taskids for a specific thread */
-};
-
-struct socket_gids {
-  struct core_gids *cores; /* Taskids for a specific core */
-};
-
-struct node_gids {
-  struct socket_gids *sockets; /* Taskids for a specific CPU */
-};
-
-struct slurm_lllp_context {
-#ifndef NDEBUG
-#  define LLLP_CTX_MAGIC 0x0d0d0d
-        int magic;
-#endif
-#if WITH_PTHREADS  
-        pthread_mutex_t mutex;
-#endif
-        List           job_list;   /* List of job bindings */
-};
-typedef struct slurm_lllp_context slurm_lllp_ctx_t;
-
-void cr_reserve_lllp(uint32_t job_id,
-			launch_tasks_request_msg_t *req, uint32_t node_id);
-void cr_release_lllp(uint32_t job_id);
+void batch_bind(batch_job_launch_msg_t *req);
 void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id);
-void lllp_ctx_destroy(void);
-void lllp_ctx_alloc(void);
-void get_bitmap_from_cpu_bind(bitstr_t *bitmap_test,
-			      cpu_bind_type_t cpu_bind_type, 
-			      char *cpu_bind, uint32_t numtasks);
 
 #endif /* !_SLURMSTEPD_DIST_TASKS_H */
-
diff --git a/src/plugins/task/affinity/numa.c b/src/plugins/task/affinity/numa.c
index a3d7f57b1f1e99d39282a07e82fc5116d646bf1a..3ba7265c09ea79a059355932d4d0bef9b7cad83c 100644
--- a/src/plugins/task/affinity/numa.c
+++ b/src/plugins/task/affinity/numa.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2006 The Regents of the University of California and
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -150,7 +151,7 @@ void slurm_chk_memset(nodemask_t *mask, slurmd_job_t *job)
 
 int get_memset(nodemask_t *mask, slurmd_job_t *job)
 {
-	int nummasks, maskid, i;
+	int nummasks, maskid, i, threads;
 	char *curstr, *selstr;
 	char mstr[1 + NUMA_NUM_NODES / 4];
 	int local_id = job->envtp->localid;
@@ -167,7 +168,8 @@ int get_memset(nodemask_t *mask, slurmd_job_t *job)
 	}
 
 	if (job->mem_bind_type & MEM_BIND_RANK) {
-		nodemask_set(mask, job->envtp->localid % job->cpus);
+		threads = MAX(conf->threads, 1);
+		nodemask_set(mask, job->envtp->localid % (job->cpus*threads));
 		return true;
 	}
 
@@ -238,4 +240,81 @@ int get_memset(nodemask_t *mask, slurmd_job_t *job)
 	return false;
 }
 
+
+static uint16_t *numa_array = NULL;
+
+/* helper function */
+static void _add_numa_mask_to_array(unsigned long *cpu_mask, int size,
+					uint16_t maxcpus, uint16_t nnode_id)
+{
+	unsigned long count = 1;
+	int i, j, x = sizeof(unsigned long) * 8;
+	for (i = 0; i < size; i++) {
+		/* iterate over each bit of this unsigned long */
+		for (j = 0, count = 1; j < x; j++, count *= 2) {
+			if (count & cpu_mask[i]) {
+				/* this bit in the cpu_mask is set */
+				int cpu = i * sizeof(unsigned long) + j;
+				if (cpu < maxcpus) {
+					numa_array[cpu] = nnode_id;
+				}
+			}
+		}
+	}
+}
+
+/* return the numa node for the given cpuid */
+extern uint16_t slurm_get_numa_node(uint16_t cpuid)
+{
+	uint16_t maxcpus = 0, nnid = 0;
+	int size, retry, max_node;
+	unsigned long *cpu_mask;
+	
+	maxcpus = conf->sockets * conf->cores * conf->threads;
+	if (cpuid >= maxcpus)
+		return 0;
+		
+	if (numa_array) {
+		return numa_array[cpuid];
+	}
+	
+	/* need to load the numa_array */
+	max_node = numa_max_node();
+
+	/* The required size of the mask buffer for numa_node_to_cpus()
+	 * is goofed up. The third argument is supposed to be the size
+	 * of the mask, which is an array of unsigned longs. The *unit*
+	 * of the third argument is unclear - should it be in bytes or
+	 * in unsigned longs??? Since I don't know, I'm using this retry
+	 * loop to try and determine an acceptable size. If anyone can
+	 * fix this interaction, please do!!
+	 */
+	size = 8;
+	cpu_mask = xmalloc(sizeof(unsigned long) * size);
+	retry = 0;
+	while (retry++ < 8 && numa_node_to_cpus(nnid, cpu_mask, size) < 0) {
+		size *= 2;
+		xrealloc(cpu_mask, sizeof(unsigned long) * size);
+	}
+	if (retry >= 8) {
+		xfree(cpu_mask);
+		error("NUMA problem with numa_node_to_cpus arguments");
+		return 0;
+	}
+	numa_array = xmalloc(sizeof(uint16_t) * maxcpus);
+	_add_numa_mask_to_array(cpu_mask, size, maxcpus, nnid);
+	while (nnid++ < max_node) {
+		if (numa_node_to_cpus(nnid, cpu_mask, size) < 0) {
+			error("NUMA problem - numa_node_to_cpus 2nd call fail");
+			xfree(cpu_mask);
+			xfree(numa_array);
+			numa_array = NULL;
+			return 0;
+		}
+		_add_numa_mask_to_array(cpu_mask, size, maxcpus, nnid);
+	}
+	xfree(cpu_mask);
+	return numa_array[cpuid];
+}
+
 #endif	/* HAVE_NUMA */
diff --git a/src/plugins/task/affinity/schedutils.c b/src/plugins/task/affinity/schedutils.c
index d5a3716065dcff19c71dd35dc082010dde60896a..18bc2b32509560c2f861afb5cb3c2366320e1704 100644
--- a/src/plugins/task/affinity/schedutils.c
+++ b/src/plugins/task/affinity/schedutils.c
@@ -6,7 +6,8 @@
  *  Copyright (C) 2004 Robert Love
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -122,7 +123,7 @@ char * cpuset_to_str(const cpu_set_t *mask, char *str)
 {
 	int base;
 	char *ptr = str;
-	char *ret = 0;
+	char *ret = NULL;
 
 	for (base = CPU_SETSIZE - 4; base >= 0; base -= 4) {
 		char val = 0;
@@ -138,7 +139,7 @@ char * cpuset_to_str(const cpu_set_t *mask, char *str)
 			ret = ptr;
 		*ptr++ = val_to_char(val);
 	}
-	*ptr = 0;
+	*ptr = '\0';
 	return ret ? ret : ptr - 1;
 }
 
diff --git a/src/plugins/task/affinity/task_affinity.c b/src/plugins/task/affinity/task_affinity.c
index 466622d180b4616f9dd98ba350a4844b77318d1a..5b6596bcc75d906abbe9492aff65e910a358cfca 100644
--- a/src/plugins/task/affinity/task_affinity.c
+++ b/src/plugins/task/affinity/task_affinity.c
@@ -5,12 +5,13 @@
  *  Copyright (C) 2005-2008 Hewlett-Packard Development Company, L.P.
  *  Modified by Hewlett-Packard for task affinity support using task_none.c
  *  Copyright (C) 2005-2007 The Regents of the University of California
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -43,12 +44,15 @@
 #endif
 
 #include <ctype.h>
+#include <dirent.h>
 #include <signal.h>
 #include <sys/types.h>
 
 #include "affinity.h"
 #include "dist_tasks.h"
 
+#define PURGE_CPUSET_DIRS 0
+
 /*
  * These variables are required by the generic plugin interface.  If they
  * are not found in the plugin, the plugin loader will ignore it.
@@ -86,7 +90,6 @@ const uint32_t plugin_version   = 100;
  */
 extern int init (void)
 {
-	lllp_ctx_alloc();
 	verbose("%s loaded", plugin_name);
 	return SLURM_SUCCESS;
 }
@@ -97,99 +100,75 @@ extern int init (void)
  */
 extern int fini (void)
 {
-	lllp_ctx_destroy();
 	verbose("%s unloaded", plugin_name);
 	return SLURM_SUCCESS;
 }
 
-/*
- * _isvalue_task
- * returns 1 is the argument appears to be a value, 0 otherwise
- * this should be identical to _isvalue in src/srun/opt.c
- */
-static int _isvalue_task(char *arg)
+/* cpu bind enforcement, update binding type based upon the
+ *	TaskPluginParam configuration parameter */
+static void _update_bind_type(launch_tasks_request_msg_t *req)
 {
-    	if (isdigit(*arg)) {		/* decimal values and 0x.. hex values */
-	    	return 1;
+	bool set_bind = false;
+
+	if (conf->task_plugin_param & CPU_BIND_NONE) {
+		req->cpu_bind_type |= CPU_BIND_NONE;
+		req->cpu_bind_type &= (~CPU_BIND_TO_SOCKETS);
+		req->cpu_bind_type &= (~CPU_BIND_TO_CORES);
+		req->cpu_bind_type &= (~CPU_BIND_TO_THREADS);
+		req->cpu_bind_type &= (~CPU_BIND_TO_LDOMS);
+		set_bind = true;
+	} else if (conf->task_plugin_param & CPU_BIND_TO_SOCKETS) {
+		req->cpu_bind_type &= (~CPU_BIND_NONE);
+		req->cpu_bind_type |= CPU_BIND_TO_SOCKETS;
+		req->cpu_bind_type &= (~CPU_BIND_TO_CORES);
+		req->cpu_bind_type &= (~CPU_BIND_TO_THREADS);
+		req->cpu_bind_type &= (~CPU_BIND_TO_LDOMS);
+		set_bind = true;
+	} else if (conf->task_plugin_param & CPU_BIND_TO_CORES) {
+		req->cpu_bind_type &= (~CPU_BIND_NONE);
+		req->cpu_bind_type &= (~CPU_BIND_TO_SOCKETS);
+		req->cpu_bind_type |= CPU_BIND_TO_CORES;
+		req->cpu_bind_type &= (~CPU_BIND_TO_THREADS);
+		req->cpu_bind_type &= (~CPU_BIND_TO_LDOMS);
+		set_bind = true;
+	} else if (conf->task_plugin_param & CPU_BIND_TO_THREADS) {
+		req->cpu_bind_type &= (~CPU_BIND_NONE);
+		req->cpu_bind_type &= (~CPU_BIND_TO_SOCKETS);
+		req->cpu_bind_type &= (~CPU_BIND_TO_CORES);
+		req->cpu_bind_type |= CPU_BIND_TO_THREADS;
+		req->cpu_bind_type &= (~CPU_BIND_TO_LDOMS);
+		set_bind = true;
+	} else if (conf->task_plugin_param & CPU_BIND_TO_LDOMS) {
+		req->cpu_bind_type &= (~CPU_BIND_NONE);
+		req->cpu_bind_type &= (~CPU_BIND_TO_SOCKETS);
+		req->cpu_bind_type &= (~CPU_BIND_TO_CORES);
+		req->cpu_bind_type &= (~CPU_BIND_TO_THREADS);
+		req->cpu_bind_type &= CPU_BIND_TO_LDOMS;
+		set_bind = true;
 	}
 
-	while (isxdigit(*arg)) {	/* hex values not preceded by 0x */
-		arg++;
+	if (conf->task_plugin_param & CPU_BIND_VERBOSE) {
+		req->cpu_bind_type |= CPU_BIND_VERBOSE;
+		set_bind = true;
 	}
 
-	if ((*arg == ',') || (*arg == '\0')) { /* end of field or string */
-	    	return 1;
+	if (set_bind) {
+		char bind_str[128];
+		slurm_sprint_cpu_bind_type(bind_str, req->cpu_bind_type);
+		info("task affinity : enforcing '%s' cpu bind method", 
+		     bind_str);
 	}
-
-	return 0;			/* not a value */
 }
 
-/* cpu bind enforcement, update binding type based upon SLURM_ENFORCED_CPU_BIND
- * environment variable */
-static void _update_bind_type(launch_tasks_request_msg_t *req)
+/*
+ * task_slurmd_batch_request()
+ */
+extern int task_slurmd_batch_request (uint32_t job_id, 
+				      batch_job_launch_msg_t *req)
 {
-	char *buf, *p, *tok;
-	char buf_type[100];
-	cpu_bind_type_t cpu_bind_type;
-	int cpu_bind_type_is_valid = 0;
-	char* cpu_bind_type_str = getenv("SLURM_ENFORCED_CPU_BIND");
-
-	if (cpu_bind_type_str == NULL)
-		return;
-
-	buf = xstrdup(cpu_bind_type_str);
-	p = buf;
-
-	/* change all ',' delimiters not followed by a digit to ';'  */
-	/* simplifies parsing tokens while keeping map/mask together */
-	while (p[0] != '\0') {
-		if ((p[0] == ',') && (!_isvalue_task(&(p[1]))))
-			p[0] = ';';
-		p++;
-	}
-
-	p = buf;
-	cpu_bind_type = 0;
-	while ((tok = strsep(&p, ";")) && !cpu_bind_type_is_valid) {
-		if ((strcasecmp(tok, "q") == 0) ||
-		    (strcasecmp(tok, "quiet") == 0)) {
-			cpu_bind_type &= ~CPU_BIND_VERBOSE;
-		} else if ((strcasecmp(tok, "v") == 0) ||
-			   (strcasecmp(tok, "verbose") == 0)) {
-			cpu_bind_type |= CPU_BIND_VERBOSE;
-		} else if ((strcasecmp(tok, "no") == 0) ||
-			   (strcasecmp(tok, "none") == 0)) {
-			cpu_bind_type |= CPU_BIND_NONE;
-			cpu_bind_type_is_valid = 1;
-		} else if ((strcasecmp(tok, "socket") == 0) ||
-			   (strcasecmp(tok, "sockets") == 0)) {
-			cpu_bind_type |= CPU_BIND_TO_SOCKETS;
-			cpu_bind_type_is_valid = 1;
-		} else if ((strcasecmp(tok, "core") == 0) ||
-			   (strcasecmp(tok, "cores") == 0)) {
-			cpu_bind_type |= CPU_BIND_TO_CORES;
-			cpu_bind_type_is_valid = 1;
-		} else if ((strcasecmp(tok, "thread") == 0) ||
-			   (strcasecmp(tok, "threads") == 0)) {
-			cpu_bind_type |= CPU_BIND_TO_THREADS;
-			cpu_bind_type_is_valid = 1;
-		} else {
-			error("task affinity : invalid enforced cpu bind "
-			      "method '%s': none or an auto binding "
-			      "(cores,sockets,threads) is required",
-			      cpu_bind_type_str);
-			cpu_bind_type_is_valid = 0;
-			break;
-		}
-	}
-	xfree(buf);
-
-	if (cpu_bind_type_is_valid) {
-		req->cpu_bind_type = cpu_bind_type;
-		slurm_sprint_cpu_bind_type(buf_type, req->cpu_bind_type);
-		info("task affinity : enforcing '%s' cpu bind method", 
-		     cpu_bind_type_str);
-	}
+	info("task_slurmd_batch_request: %u", job_id);
+	batch_bind(req);
+	return SLURM_SUCCESS;
 }
 
 /*
@@ -221,9 +200,6 @@ extern int task_slurmd_launch_request (uint32_t job_id,
 		info("task affinity : after lllp distribution cpu bind "
 		     "method is '%s' (%s)", buf_type, req->cpu_bind);
 	}
-		
-	/* Remove the slurm msg timeout needs to be investigated some more */
-	/* req->cpu_bind_type = CPU_BIND_NONE; */ 
 	
 	return SLURM_SUCCESS;
 }
@@ -236,7 +212,6 @@ extern int task_slurmd_reserve_resources (uint32_t job_id,
 					  uint32_t node_id)
 {
 	debug("task_slurmd_reserve_resources: %u", job_id);
-	cr_reserve_lllp(job_id, req, node_id);
 	return SLURM_SUCCESS;
 }
 
@@ -264,7 +239,48 @@ extern int task_slurmd_resume_job (uint32_t job_id)
 extern int task_slurmd_release_resources (uint32_t job_id)
 {
 	debug("task_slurmd_release_resources: %u", job_id);
-	cr_release_lllp(job_id);
+
+#if PURGE_CPUSET_DIRS
+	/* NOTE: The notify_on_release flag set in cpuset.c
+	 * should remove the directory, but that is not
+	 * happening reliably. */
+	if (conf->task_plugin_param & CPU_BIND_CPUSETS) {
+		char base[PATH_MAX], path[PATH_MAX];
+		if (snprintf(base, PATH_MAX, "%s/slurm%u",
+				CPUSET_DIR, job_id) > PATH_MAX) {
+			error("cpuset path too long");
+			return SLURM_ERROR;
+		}
+		if (rmdir(base) && (errno == ENOTEMPTY)) {
+			DIR *dirp;
+			struct dirent entry;
+			struct dirent *result;
+			int rc;
+			if ((dirp = opendir(base)) == NULL) {
+				error("could not open dir %s: %m", base);
+				return SLURM_ERROR;
+			}
+			while (1) {
+				rc = readdir_r(dirp, &entry, &result);
+				if (rc && (errno == EAGAIN))
+					continue;
+				if (rc || (result == NULL))
+					break;
+				if (strncmp(entry.d_name, "slurm", 5))
+					continue;
+				if (snprintf(path, PATH_MAX, "%s/%s",
+					     base, entry.d_name) > PATH_MAX) {
+					error("cpuset path too long");
+					break;
+				}
+				rmdir(path);
+			}
+			closedir(dirp);
+			rmdir(base);
+		}
+	}
+#endif
+
 	return SLURM_SUCCESS;
 }
 
@@ -277,7 +293,7 @@ extern int task_pre_setuid (slurmd_job_t *job)
 {
 	char path[PATH_MAX];
 
-	if (!conf->use_cpusets)
+	if (!(conf->task_plugin_param & CPU_BIND_CPUSETS))
 		return SLURM_SUCCESS;
 
 	if (snprintf(path, PATH_MAX, "%s/slurm%u",
@@ -285,8 +301,7 @@ extern int task_pre_setuid (slurmd_job_t *job)
 		error("cpuset path too long");
 		return SLURM_ERROR;
 	}
-	slurm_build_cpuset(CPUSET_DIR, path, job->uid, job->gid);
-	return SLURM_SUCCESS;
+	return slurm_build_cpuset(CPUSET_DIR, path, job->uid, job->gid);
 }
 
 /*
@@ -297,11 +312,13 @@ extern int task_pre_setuid (slurmd_job_t *job)
 extern int task_pre_launch (slurmd_job_t *job)
 {
 	char base[PATH_MAX], path[PATH_MAX];
+	int rc = SLURM_SUCCESS;
 
-	debug("affinity task_pre_launch: %u.%u, task %d", 
-		job->jobid, job->stepid, job->envtp->procid);
+	debug("affinity task_pre_launch:%u.%u, task:%u bind:%u", 
+	      job->jobid, job->stepid, job->envtp->procid,
+	      job->cpu_bind_type);
 
-	if (conf->use_cpusets) {
+	if (conf->task_plugin_param & CPU_BIND_CPUSETS) {
 		info("Using cpuset affinity for tasks");
 		if (snprintf(base, PATH_MAX, "%s/slurm%u",
 				CPUSET_DIR, job->jobid) > PATH_MAX) {
@@ -322,39 +339,51 @@ extern int task_pre_launch (slurmd_job_t *job)
 		cpu_set_t new_mask, cur_mask;
 		pid_t mypid  = job->envtp->task_pid;
 
-		int setval = 0;
 		slurm_getaffinity(mypid, sizeof(cur_mask), &cur_mask);
 
-		if (get_cpuset(&new_mask, job)
-		&&  (!(job->cpu_bind_type & CPU_BIND_NONE))) {
-			if (conf->use_cpusets) {
-				setval = slurm_set_cpuset(base, path, mypid,
+		if (get_cpuset(&new_mask, job) &&
+		    (!(job->cpu_bind_type & CPU_BIND_NONE))) {
+			if (conf->task_plugin_param & CPU_BIND_CPUSETS) {
+				rc = slurm_set_cpuset(base, path, mypid,
 						sizeof(new_mask), 
 						&new_mask);
 				slurm_get_cpuset(path, mypid,
-						sizeof(cur_mask), 
-						&cur_mask);
+						 sizeof(cur_mask), 
+						 &cur_mask);
 			} else {
-				setval = slurm_setaffinity(mypid,
-						sizeof(new_mask), 
-						&new_mask);
+				rc = slurm_setaffinity(mypid,
+						       sizeof(new_mask), 
+						       &new_mask);
 				slurm_getaffinity(mypid,
-						sizeof(cur_mask), 
-						&cur_mask);
+						  sizeof(cur_mask), 
+						  &cur_mask);
 			}
 		}
-		slurm_chkaffinity(setval ? &new_mask : &cur_mask, 
-					job, setval);
+		slurm_chkaffinity(rc ? &cur_mask : &new_mask, 
+				  job, rc);
+	} else if (job->mem_bind_type &&
+		   (conf->task_plugin_param & CPU_BIND_CPUSETS)) {
+		cpu_set_t cur_mask;
+		pid_t mypid  = job->envtp->task_pid;
+
+		/* Establish cpuset just for the memory binding */
+		slurm_getaffinity(mypid, sizeof(cur_mask), &cur_mask);
+		rc = slurm_set_cpuset(base, path, 
+				      (pid_t) job->envtp->task_pid, 
+				      sizeof(cur_mask), &cur_mask);
 	}
 
 #ifdef HAVE_NUMA
-	if (conf->use_cpusets && (slurm_memset_available() >= 0)) {
+	if ((conf->task_plugin_param & CPU_BIND_CPUSETS) && 
+	    (slurm_memset_available() >= 0)) {
 		nodemask_t new_mask, cur_mask;
 
 		cur_mask = numa_get_membind();
-		if (get_memset(&new_mask, job)
-		&&  (!(job->mem_bind_type & MEM_BIND_NONE))) {
+		if (get_memset(&new_mask, job) &&
+		    (!(job->mem_bind_type & MEM_BIND_NONE))) {
 			slurm_set_memset(path, &new_mask);
+			if (numa_available() >= 0)
+				numa_set_membind(&new_mask);
 			cur_mask = new_mask;
 		}
 		slurm_chk_memset(&cur_mask, job);
@@ -370,7 +399,7 @@ extern int task_pre_launch (slurmd_job_t *job)
 		slurm_chk_memset(&cur_mask, job);
 	}
 #endif
-	return SLURM_SUCCESS;
+	return rc;
 }
 
 /*
@@ -381,7 +410,28 @@ extern int task_pre_launch (slurmd_job_t *job)
 extern int task_post_term (slurmd_job_t *job)
 {
 	debug("affinity task_post_term: %u.%u, task %d",
-		job->jobid, job->stepid, job->envtp->procid);
+	      job->jobid, job->stepid, job->envtp->procid);
+
+#if PURGE_CPUSET_DIRS
+	/* NOTE: The notify_on_release flag set in cpuset.c
+	 * should remove the directory, but that is not
+	 * happening reliably. */
+	if (conf->task_plugin_param & CPU_BIND_CPUSETS) {
+		char base[PATH_MAX], path[PATH_MAX];
+		if (snprintf(base, PATH_MAX, "%s/slurm%u",
+				CPUSET_DIR, job->jobid) > PATH_MAX) {
+			error("cpuset path too long");
+			return SLURM_ERROR;
+		}
+		if (snprintf(path, PATH_MAX, "%s/slurm%u.%u_%d",
+				base, job->jobid, job->stepid,
+				job->envtp->localid) > PATH_MAX) {
+			error("cpuset path too long");
+			return SLURM_ERROR;
+		}
+		rmdir(path);
+	}
+#endif
 
 	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/task/none/Makefile.in b/src/plugins/task/none/Makefile.in
index 9758786220157beaf1859aa226161045e92e754d..b31ea8917adbe013ec5dd5a476f8c08f36d23ea6 100644
--- a/src/plugins/task/none/Makefile.in
+++ b/src/plugins/task/none/Makefile.in
@@ -43,14 +43,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/plugins/task/none/task_none.c b/src/plugins/task/none/task_none.c
index 5e852eab6596b349b6eebdf1efb47719b34bb228..a06a7e8b3260d420abb2a1e4dcee13b0ab948928 100644
--- a/src/plugins/task/none/task_none.c
+++ b/src/plugins/task/none/task_none.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -98,6 +99,16 @@ extern int fini (void)
 	return SLURM_SUCCESS;
 }
 
+/*
+ * task_slurmd_batch_request()
+ */
+extern int task_slurmd_batch_request (uint32_t job_id, 
+				      batch_job_launch_msg_t *req)
+{
+	debug("task_slurmd_batch_request: %u", job_id);
+	return SLURM_SUCCESS;
+}
+
 /*
  * task_slurmd_launch_request()
  */
diff --git a/src/plugins/topology/3d_torus/Makefile.am b/src/plugins/topology/3d_torus/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..4763af1d85d0b84d4ef19b4b3f73cac4264bdecd
--- /dev/null
+++ b/src/plugins/topology/3d_torus/Makefile.am
@@ -0,0 +1,17 @@
+# Makefile for topology/3d_torus plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = topology_3d_torus.la
+
+# 3d_torus topology plugin.
+topology_3d_torus_la_SOURCES =	\
+	hilbert.c		\
+	hilbert.h		\
+	hilbert_slurm.c		\
+	topology_3d_torus.c
+topology_3d_torus_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/plugins/topology/3d_torus/Makefile.in b/src/plugins/topology/3d_torus/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..8bb62ba70eb53e36d0c05c743893316ffe1e779f
--- /dev/null
+++ b/src/plugins/topology/3d_torus/Makefile.in
@@ -0,0 +1,574 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for topology/3d_torus plugin
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/topology/3d_torus
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+pkglibLTLIBRARIES_INSTALL = $(INSTALL)
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+topology_3d_torus_la_LIBADD =
+am_topology_3d_torus_la_OBJECTS = hilbert.lo hilbert_slurm.lo \
+	topology_3d_torus.lo
+topology_3d_torus_la_OBJECTS = $(am_topology_3d_torus_la_OBJECTS)
+topology_3d_torus_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(topology_3d_torus_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(topology_3d_torus_la_SOURCES)
+DIST_SOURCES = $(topology_3d_torus_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = topology_3d_torus.la
+
+# 3d_torus topology plugin.
+topology_3d_torus_la_SOURCES = \
+	hilbert.c		\
+	hilbert.h		\
+	hilbert_slurm.c		\
+	topology_3d_torus.c
+
+topology_3d_torus_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/plugins/topology/3d_torus/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  src/plugins/topology/3d_torus/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  if test -f $$p; then \
+	    f=$(am__strip_dir) \
+	    echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	    $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \
+	  else :; fi; \
+	done
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  p=$(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+topology_3d_torus.la: $(topology_3d_torus_la_OBJECTS) $(topology_3d_torus_la_DEPENDENCIES) 
+	$(topology_3d_torus_la_LINK) -rpath $(pkglibdir) $(topology_3d_torus_la_OBJECTS) $(topology_3d_torus_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hilbert.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hilbert_slurm.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/topology_3d_torus.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/slurmctld/hilbert.c b/src/plugins/topology/3d_torus/hilbert.c
similarity index 98%
rename from src/slurmctld/hilbert.c
rename to src/plugins/topology/3d_torus/hilbert.c
index 6322fe0df981db6e3952c2369cdc1c6b96f7fa7f..3b09f7982432f84b24fbc288cb55c5fc1712e929 100644
--- a/src/slurmctld/hilbert.c
+++ b/src/plugins/topology/3d_torus/hilbert.c
@@ -31,7 +31,7 @@
 #include "license.txt"
 */
 
-#include "src/slurmctld/hilbert.h"
+#include "src/plugins/topology/3d_torus/hilbert.h"
 
 extern void TransposetoAxes(
 coord_t* X,            // I O  position   [n]
diff --git a/src/slurmctld/hilbert.h b/src/plugins/topology/3d_torus/hilbert.h
similarity index 100%
rename from src/slurmctld/hilbert.h
rename to src/plugins/topology/3d_torus/hilbert.h
diff --git a/src/slurmctld/hilbert_slurm.c b/src/plugins/topology/3d_torus/hilbert_slurm.c
similarity index 87%
rename from src/slurmctld/hilbert_slurm.c
rename to src/plugins/topology/3d_torus/hilbert_slurm.c
index ad044fc53ffa7a45587e88fc531637bea9b7ae66..466aa96015e0a9662583a76c3e09eb3a8bd43954 100644
--- a/src/slurmctld/hilbert_slurm.c
+++ b/src/plugins/topology/3d_torus/hilbert_slurm.c
@@ -3,13 +3,14 @@
  *	on a Hilbert curve so that the resource allocation problem in 
  *	N-dimensions can be reduced to a 1-dimension problem
  *****************************************************************************
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -41,7 +42,7 @@
 #  include "config.h"
 #endif
 
-#include "src/slurmctld/hilbert.h"
+#include "src/plugins/topology/3d_torus/hilbert.h"
 #include "src/slurmctld/slurmctld.h"
 
 static int _coord(char coord)
@@ -103,13 +104,20 @@ extern void nodes_to_hilbert_curve(void)
 		AxestoTranspose(hilbert, 5, dims);
 #ifdef HAVE_3D
 		node_ptr->hilbert_integer = 
-			((hilbert[0]>>4 & 1) << 14) + ((hilbert[1]>>4 & 1) << 13) +
-			((hilbert[2]>>4 & 1) << 12) + ((hilbert[0]>>3 & 1) << 11) +
-			((hilbert[1]>>3 & 1) << 10) + ((hilbert[2]>>3 & 1) <<  9) +
-			((hilbert[0]>>2 & 1) <<  8) + ((hilbert[1]>>2 & 1) <<  7) +
-			((hilbert[2]>>2 & 1) <<  6) + ((hilbert[0]>>1 & 1) <<  5) +
-			((hilbert[1]>>1 & 1) <<  4) + ((hilbert[2]>>1 & 1) <<  3) +
-			((hilbert[0]>>0 & 1) <<  2) + ((hilbert[1]>>0 & 1) <<  1) +
+			((hilbert[0]>>4 & 1) << 14) + 
+			((hilbert[1]>>4 & 1) << 13) +
+			((hilbert[2]>>4 & 1) << 12) + 
+			((hilbert[0]>>3 & 1) << 11) +
+			((hilbert[1]>>3 & 1) << 10) + 
+			((hilbert[2]>>3 & 1) <<  9) +
+			((hilbert[0]>>2 & 1) <<  8) + 
+			((hilbert[1]>>2 & 1) <<  7) +
+			((hilbert[2]>>2 & 1) <<  6) + 
+			((hilbert[0]>>1 & 1) <<  5) +
+			((hilbert[1]>>1 & 1) <<  4) + 
+			((hilbert[2]>>1 & 1) <<  3) +
+			((hilbert[0]>>0 & 1) <<  2) + 
+			((hilbert[1]>>0 & 1) <<  1) +
 			((hilbert[2]>>0 & 1) <<  0);
 #else
 		/* A variation on the above calculation would be required here
diff --git a/src/plugins/topology/3d_torus/topology_3d_torus.c b/src/plugins/topology/3d_torus/topology_3d_torus.c
new file mode 100644
index 0000000000000000000000000000000000000000..e685a1bfb5666bd55b242ca651054c6bf50a214e
--- /dev/null
+++ b/src/plugins/topology/3d_torus/topology_3d_torus.c
@@ -0,0 +1,113 @@
+/*****************************************************************************\
+ *  topology_3d_torus.c - Support for 3-Dimension torus interconnect
+ *	topology, default for Cray XT and Sun Constellation systems
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if     HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <signal.h>
+#include <sys/types.h>
+
+#include <slurm/slurm_errno.h>
+#include "src/common/slurm_xlator.h"
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *      <application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "task" for task control) and <method> is a description 
+ * of how this plugin satisfies that application.  SLURM will only load
+ * a task plugin if the plugin_type string has a prefix of "task/".
+ *
+ * plugin_version - an unsigned 32-bit integer giving the version number
+ * of the plugin.  If major and minor revisions are desired, the major
+ * version number may be multiplied by a suitable magnitude constant such
+ * as 100 or 1000.  Various SLURM versions will likely require a certain
+ * minimum versions for their plugins as this API matures.
+ */
+const char plugin_name[]        = "topology 3d_torus plugin";
+const char plugin_type[]        = "topology/3d_torus";
+const uint32_t plugin_version   = 100;
+
+extern void nodes_to_hilbert_curve(void);
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ *	are called.  Put global initialization here.
+ */
+extern int init(void)
+{
+	verbose("%s loaded", plugin_name);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * fini() is called when the plugin is removed. Clear any allocated 
+ *	storage here.
+ */
+extern int fini(void)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * topo_build_config - build or rebuild system topology information
+ *	after a system startup or reconfiguration.
+ */
+extern int topo_build_config(void)
+{
+#ifndef HAVE_BG
+	nodes_to_hilbert_curve();
+#endif
+	return SLURM_SUCCESS;
+}
+
diff --git a/src/plugins/topology/Makefile.am b/src/plugins/topology/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..9d076b3e4e0b5235ca48842ebefe3694865e684f
--- /dev/null
+++ b/src/plugins/topology/Makefile.am
@@ -0,0 +1,3 @@
+# Makefile for topology plugins
+
+SUBDIRS = 3d_torus none tree
diff --git a/src/plugins/topology/Makefile.in b/src/plugins/topology/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..7b98f6181d6566395554d5ba778a02c91c94ebc1
--- /dev/null
+++ b/src/plugins/topology/Makefile.in
@@ -0,0 +1,576 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for topology plugins
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/topology
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
+	html-recursive info-recursive install-data-recursive \
+	install-dvi-recursive install-exec-recursive \
+	install-html-recursive install-info-recursive \
+	install-pdf-recursive install-ps-recursive install-recursive \
+	installcheck-recursive installdirs-recursive pdf-recursive \
+	ps-recursive uninstall-recursive
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+SUBDIRS = 3d_torus none tree
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu  src/plugins/topology/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu  src/plugins/topology/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run `make' without going through this Makefile.
+# To change the values of `make' variables: instead of editing Makefiles,
+# (1) if the variable is set in `config.status', edit `config.status'
+#     (which will cause the Makefiles to be regenerated when you run `make');
+# (2) otherwise, pass the desired values on the `make' command line.
+$(RECURSIVE_TARGETS):
+	@failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
+$(RECURSIVE_CLEAN_TARGETS):
+	@failcom='exit 1'; \
+	for f in x $$MAKEFLAGS; do \
+	  case $$f in \
+	    *=* | --[!k]*);; \
+	    *k*) failcom='fail=yes';; \
+	  esac; \
+	done; \
+	dot_seen=no; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	rev=''; for subdir in $$list; do \
+	  if test "$$subdir" = "."; then :; else \
+	    rev="$$subdir $$rev"; \
+	  fi; \
+	done; \
+	rev="$$rev ."; \
+	target=`echo $@ | sed s/-recursive//`; \
+	for subdir in $$rev; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done && test -z "$$fail"
+tags-recursive:
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
+	done
+ctags-recursive:
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
+	done
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS: tags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS: ctags-recursive $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+	list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test -d "$(distdir)/$$subdir" \
+	    || $(MKDIR_P) "$(distdir)/$$subdir" \
+	    || exit 1; \
+	    distdir=`$(am__cd) $(distdir) && pwd`; \
+	    top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
+	    (cd $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$top_distdir" \
+	        distdir="$$distdir/$$subdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-recursive
+all-am: Makefile
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-recursive
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-info: install-info-recursive
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-ps: install-ps-recursive
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) install-am \
+	install-strip
+
+.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
+	all all-am check check-am clean clean-generic clean-libtool \
+	ctags ctags-recursive distclean distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs installdirs-am maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am tags tags-recursive \
+	uninstall uninstall-am
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/topology/none/Makefile.am b/src/plugins/topology/none/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..58d8f0ed4cc8655c4ff314c030181f690067090b
--- /dev/null
+++ b/src/plugins/topology/none/Makefile.am
@@ -0,0 +1,13 @@
+# Makefile for topology/none plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = topology_none.la
+
+# Null topology plugin.
+topology_none_la_SOURCES = topology_none.c
+topology_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/plugins/topology/none/Makefile.in b/src/plugins/topology/none/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..4356e14f6e5f00b53e23ed90d253ce796bbe2f54
--- /dev/null
+++ b/src/plugins/topology/none/Makefile.in
@@ -0,0 +1,566 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for topology/none plugin
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/topology/none
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+pkglibLTLIBRARIES_INSTALL = $(INSTALL)
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+topology_none_la_LIBADD =
+am_topology_none_la_OBJECTS = topology_none.lo
+topology_none_la_OBJECTS = $(am_topology_none_la_OBJECTS)
+topology_none_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(topology_none_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(topology_none_la_SOURCES)
+DIST_SOURCES = $(topology_none_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = topology_none.la
+
+# Null topology plugin.
+topology_none_la_SOURCES = topology_none.c
+topology_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/plugins/topology/none/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  src/plugins/topology/none/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  if test -f $$p; then \
+	    f=$(am__strip_dir) \
+	    echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	    $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \
+	  else :; fi; \
+	done
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  p=$(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+topology_none.la: $(topology_none_la_OBJECTS) $(topology_none_la_DEPENDENCIES) 
+	$(topology_none_la_LINK) -rpath $(pkglibdir) $(topology_none_la_OBJECTS) $(topology_none_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/topology_none.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/topology/none/topology_none.c b/src/plugins/topology/none/topology_none.c
new file mode 100644
index 0000000000000000000000000000000000000000..723201216d59e22b8ab3cb08daa5e5dbd921782c
--- /dev/null
+++ b/src/plugins/topology/none/topology_none.c
@@ -0,0 +1,107 @@
+/*****************************************************************************\
+ *  topology_none.c - Default for system topology
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if     HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <signal.h>
+#include <sys/types.h>
+
+#include <slurm/slurm_errno.h>
+#include "src/common/slurm_xlator.h"
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *      <application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "task" for task control) and <method> is a description 
+ * of how this plugin satisfies that application.  SLURM will only load
+ * a task plugin if the plugin_type string has a prefix of "task/".
+ *
+ * plugin_version - an unsigned 32-bit integer giving the version number
+ * of the plugin.  If major and minor revisions are desired, the major
+ * version number may be multiplied by a suitable magnitude constant such
+ * as 100 or 1000.  Various SLURM versions will likely require a certain
+ * minimum versions for their plugins as this API matures.
+ */
+const char plugin_name[]        = "topology NONE plugin";
+const char plugin_type[]        = "topology/none";
+const uint32_t plugin_version   = 100;
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ *	are called.  Put global initialization here.
+ */
+extern int init(void)
+{
+	verbose("%s loaded", plugin_name);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * fini() is called when the plugin is removed. Clear any allocated 
+ *	storage here.
+ */
+extern int fini(void)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * topo_build_config - build or rebuild system topology information
+ *	after a system startup or reconfiguration.
+ */
+extern int topo_build_config(void)
+{
+	return SLURM_SUCCESS;
+}
+
diff --git a/src/plugins/topology/tree/Makefile.am b/src/plugins/topology/tree/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..5fd70e7478a4009ea95df1a8b74a2e699b45acc4
--- /dev/null
+++ b/src/plugins/topology/tree/Makefile.am
@@ -0,0 +1,14 @@
+# Makefile for topology/tree plugin
+
+CPPFLAGS =  -DTOPOLOGY_CONFIG_FILE=\"$(sysconfdir)/topology.conf\"
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = topology_tree.la
+
+# Null topology plugin.
+topology_tree_la_SOURCES = topology_tree.c
+topology_tree_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/plugins/topology/tree/Makefile.in b/src/plugins/topology/tree/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..1c413519fedc0fc3d4b76f80fdd1c2a30f6b108b
--- /dev/null
+++ b/src/plugins/topology/tree/Makefile.in
@@ -0,0 +1,566 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for topology/tree plugin
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/topology/tree
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = `echo $$p | sed -e 's|^.*/||'`;
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+pkglibLTLIBRARIES_INSTALL = $(INSTALL)
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+topology_tree_la_LIBADD =
+am_topology_tree_la_OBJECTS = topology_tree.lo
+topology_tree_la_OBJECTS = $(am_topology_tree_la_OBJECTS)
+topology_tree_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(topology_tree_la_LDFLAGS) $(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(topology_tree_la_SOURCES)
+DIST_SOURCES = $(topology_tree_la_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = -DTOPOLOGY_CONFIG_FILE=\"$(sysconfdir)/topology.conf\"
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic 
+INCLUDES = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = topology_tree.la
+
+# Null topology plugin.
+topology_tree_la_SOURCES = topology_tree.c
+topology_tree_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/plugins/topology/tree/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  src/plugins/topology/tree/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	test -z "$(pkglibdir)" || $(MKDIR_P) "$(DESTDIR)$(pkglibdir)"
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  if test -f $$p; then \
+	    f=$(am__strip_dir) \
+	    echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) '$$p' '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	    $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(pkglibLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) "$$p" "$(DESTDIR)$(pkglibdir)/$$f"; \
+	  else :; fi; \
+	done
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  p=$(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$p'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$p"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; for p in $$list; do \
+	  dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
+	  test "$$dir" != "$$p" || dir=.; \
+	  echo "rm -f \"$${dir}/so_locations\""; \
+	  rm -f "$${dir}/so_locations"; \
+	done
+topology_tree.la: $(topology_tree_la_OBJECTS) $(topology_tree_la_DEPENDENCIES) 
+	$(topology_tree_la_LINK) -rpath $(pkglibdir) $(topology_tree_la_OBJECTS) $(topology_tree_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/topology_tree.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES ctags distclean \
+	distclean-compile distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-pkglibLTLIBRARIES \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags uninstall uninstall-am uninstall-pkglibLTLIBRARIES
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/topology/tree/topology_tree.c b/src/plugins/topology/tree/topology_tree.c
new file mode 100644
index 0000000000000000000000000000000000000000..bff65785c54b32931e88d0cdb5363c3c1655ad33
--- /dev/null
+++ b/src/plugins/topology/tree/topology_tree.c
@@ -0,0 +1,423 @@
+/*****************************************************************************\
+ *  topology_tree.c - Build configuration information for hierarchical
+ *	switch topology
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if     HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include <slurm/slurm_errno.h>
+#include "src/common/slurm_xlator.h"
+#include "src/slurmctld/slurmctld.h"
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *      <application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "task" for task control) and <method> is a description 
+ * of how this plugin satisfies that application.  SLURM will only load
+ * a task plugin if the plugin_type string has a prefix of "task/".
+ *
+ * plugin_version - an unsigned 32-bit integer giving the version number
+ * of the plugin.  If major and minor revisions are desired, the major
+ * version number may be multiplied by a suitable magnitude constant such
+ * as 100 or 1000.  Various SLURM versions will likely require a certain
+ * minimum versions for their plugins as this API matures.
+ */
+const char plugin_name[]        = "topology tree plugin";
+const char plugin_type[]        = "topology/tree";
+const uint32_t plugin_version   = 100;
+
+typedef struct slurm_conf_switches {
+	uint32_t link_speed;	/* link speed, arbitrary units */
+	char *nodes;		/* names of nodes directly connect to
+				 * this switch, if any */
+	char *switch_name;	/* name of this switch */
+	char *switches;		/* names if child switches directly
+				 * connected to this switch, if any */
+} slurm_conf_switches_t;
+static s_p_hashtbl_t *conf_hashtbl = NULL;
+static char* topo_conf = NULL;
+
+static void _destroy_switches(void *ptr);
+static void _free_switch_record_table(void);
+static int  _get_switch_inx(const char *name);
+static char *_get_topo_conf(void);
+static void _log_switches(void);
+static int  _parse_switches(void **dest, slurm_parser_enum_t type,
+			    const char *key, const char *value,
+			    const char *line, char **leftover);
+extern int  _read_topo_file(slurm_conf_switches_t **ptr_array[]);
+static void _validate_switches(void);
+
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ *	are called.  Put global initialization here.
+ */
+extern int init(void)
+{
+	verbose("%s loaded", plugin_name);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * fini() is called when the plugin is removed. Clear any allocated 
+ *	storage here.
+ */
+extern int fini(void)
+{
+	_free_switch_record_table();
+	xfree(topo_conf);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * topo_build_config - build or rebuild system topology information
+ *	after a system startup or reconfiguration.
+ */
+extern int topo_build_config(void)
+{
+	_validate_switches();
+	return SLURM_SUCCESS;
+}
+
+static void _validate_switches(void)
+{
+	slurm_conf_switches_t *ptr, **ptr_array;
+	int depth, i, j;
+	struct switch_record *switch_ptr;
+	hostlist_t hl;
+	char *child;
+	bitstr_t *multi_homed_bitmap = NULL;	/* nodes on >1 leaf switch */
+	bitstr_t *switches_bitmap = NULL;	/* nodes on any leaf switch */
+	bitstr_t *tmp_bitmap = NULL;
+
+	_free_switch_record_table();
+
+	switch_record_cnt = _read_topo_file(&ptr_array);
+	if (switch_record_cnt == 0) {
+		error("No switches configured");
+		s_p_hashtbl_destroy(conf_hashtbl);
+		return;
+	}
+
+	switch_record_table = xmalloc(sizeof(struct switch_record) * 
+				      switch_record_cnt);
+	multi_homed_bitmap = bit_alloc(node_record_count);
+	switch_ptr = switch_record_table;
+	for (i=0; i<switch_record_cnt; i++, switch_ptr++) {
+		ptr = ptr_array[i];
+		switch_ptr->name = xstrdup(ptr->switch_name);
+		switch_ptr->link_speed = ptr->link_speed;
+		if (ptr->nodes) {
+			switch_ptr->level = 0;	/* leaf switch */
+			switch_ptr->nodes = xstrdup(ptr->nodes);
+			if (node_name2bitmap(ptr->nodes, true, 
+					     &switch_ptr->node_bitmap)) {
+				fatal("Invalid node name (%s) in switch "
+				      "config (%s)", 
+				      ptr->nodes, ptr->switch_name);
+			}
+			if (switches_bitmap) {
+				tmp_bitmap = bit_copy(switch_ptr->node_bitmap);
+				bit_and(tmp_bitmap, switches_bitmap);
+				bit_or(multi_homed_bitmap, tmp_bitmap);
+				bit_free(tmp_bitmap);
+				bit_or(switches_bitmap, 
+				       switch_ptr->node_bitmap);
+			} else {
+				switches_bitmap = bit_copy(switch_ptr->
+							   node_bitmap);
+			}
+		} else if (ptr->switches) {
+			switch_ptr->level = -1;	/* determine later */
+			switch_ptr->switches = xstrdup(ptr->switches);
+		} else {
+			fatal("Switch configuration (%s) lacks children",
+			      ptr->switch_name);
+		}
+	}
+
+	for (depth=1; ; depth++) {
+		bool resolved = true;
+		switch_ptr = switch_record_table;
+		for (i=0; i<switch_record_cnt; i++, switch_ptr++) {
+			if (switch_ptr->level != -1)
+				continue;
+			hl = hostlist_create(switch_ptr->switches);
+			if (!hl)
+				fatal("hostlist_create: malloc failure");
+			while ((child = hostlist_pop(hl))) {
+				j = _get_switch_inx(child);
+				if ((j < 0) || (j == i)) {
+					fatal("Switch configuration %s has "
+					      "invalid child (%s)",
+					      switch_ptr->name, child);
+				}
+				if (switch_record_table[j].level == -1) {
+					/* Children not resolved */
+					resolved = false;
+					switch_ptr->level = -1;
+					FREE_NULL_BITMAP(switch_ptr->
+							 node_bitmap);
+					free(child);
+					break;
+				}
+				if (switch_ptr->level == -1) {
+					switch_ptr->level = 1 +
+						switch_record_table[j].level;
+					switch_ptr->node_bitmap = 
+						bit_copy(switch_record_table[j].
+							 node_bitmap);
+				} else {
+					switch_ptr->level = 
+						MAX(switch_ptr->level,
+						     (switch_record_table[j].
+						      level + 1));
+					bit_or(switch_ptr->node_bitmap,
+					       switch_record_table[j].
+					       node_bitmap);
+				}
+				free(child);
+			}
+			hostlist_destroy(hl);
+		}
+		if (resolved)
+			break;
+	}
+
+	switch_ptr = switch_record_table;
+	for (i=0; i<switch_record_cnt; i++, switch_ptr++) {
+		if (switch_ptr->node_bitmap == NULL)
+			error("switch %s has no nodes", switch_ptr->name);
+	}
+	if (switches_bitmap) {
+		bit_not(switches_bitmap);
+		i = bit_set_count(switches_bitmap);
+		if (i > 0) {
+			child = bitmap2node_name(switches_bitmap);
+			error("WARNING: switches lack access to %d nodes: %s", 
+			      i, child);
+			xfree(child);
+		}
+		bit_free(switches_bitmap);
+	} else
+		fatal("switches contain no nodes");
+
+	/* Report nodes on multiple leaf switches, 
+	 * possibly due to bad configuration file */
+	i = bit_set_count(multi_homed_bitmap);
+	if (i > 0) {
+		child = bitmap2node_name(multi_homed_bitmap);
+		error("WARNING: Multiple leaf switches contain nodes: %s", 
+		      child);
+		xfree(child);
+	}
+	bit_free(multi_homed_bitmap);
+
+	s_p_hashtbl_destroy(conf_hashtbl);
+	_log_switches();
+}
+
+static void _log_switches(void)
+{
+	int i;
+	struct switch_record *switch_ptr;
+
+	switch_ptr = switch_record_table;
+	for (i=0; i<switch_record_cnt; i++, switch_ptr++) {
+		if (!switch_ptr->nodes) {
+			switch_ptr->nodes = bitmap2node_name(switch_ptr->
+							     node_bitmap);
+		}
+		debug("Switch level:%d name:%s nodes:%s switches:%s",
+		      switch_ptr->level, switch_ptr->name,
+		      switch_ptr->nodes, switch_ptr->switches);
+	}
+}
+
+/* Return the index of a given switch name or -1 if not found */
+static int _get_switch_inx(const char *name)
+{
+	int i;
+	struct switch_record *switch_ptr;
+
+	switch_ptr = switch_record_table;
+	for (i=0; i<switch_record_cnt; i++, switch_ptr++) {
+		if (strcmp(switch_ptr->name, name) == 0)
+			return i;
+	}
+
+	return -1;
+}
+
+/* Free all memory associated with switch_record_table structure */
+static void _free_switch_record_table(void)
+{
+	int i;
+
+	if (switch_record_table) {
+		for (i=0; i<switch_record_cnt; i++) {
+			xfree(switch_record_table[i].name);
+			xfree(switch_record_table[i].nodes);
+			xfree(switch_record_table[i].switches);
+			FREE_NULL_BITMAP(switch_record_table[i].node_bitmap);
+		}
+		xfree(switch_record_table);
+		switch_record_cnt = 0;
+	}
+}
+
+static char *_get_topo_conf(void)
+{
+	char *val = getenv("SLURM_CONF");
+	char *rc;
+	int i;
+
+	if (!val)
+		return xstrdup(TOPOLOGY_CONFIG_FILE);
+
+	/* Replace file name on end of path */
+	i = strlen(val) - strlen("slurm.conf") + strlen("topology.conf") + 1;
+	rc = xmalloc(i);
+	strcpy(rc, val);
+	val = strrchr(rc, (int)'/');
+	if (val)	/* absolute path */
+		val++;
+	else		/* not absolute path */
+		val = rc;
+	strcpy(val, "topology.conf");
+	return rc;
+}
+
+/* Return count of switch configuration entries read */
+extern int  _read_topo_file(slurm_conf_switches_t **ptr_array[])
+{
+	static s_p_options_t switch_options[] = {
+		{"SwitchName", S_P_ARRAY, _parse_switches, _destroy_switches},
+		{NULL}
+	};
+	int count;
+	slurm_conf_switches_t **ptr;
+
+	debug("Reading the topology.conf file");
+	if (!topo_conf)
+		topo_conf = _get_topo_conf();
+
+	conf_hashtbl = s_p_hashtbl_create(switch_options);
+	if(s_p_parse_file(conf_hashtbl, topo_conf) == SLURM_ERROR)
+		fatal("something wrong with opening/reading %s: %m", topo_conf);
+
+	if (s_p_get_array((void ***)&ptr, &count, "SwitchName", conf_hashtbl)) {
+		*ptr_array = ptr;
+	} else {
+		*ptr_array = NULL;
+		count = 0;
+	}
+	return count;
+}
+
+static int  _parse_switches(void **dest, slurm_parser_enum_t type,
+			    const char *key, const char *value,
+			    const char *line, char **leftover)
+{
+	s_p_hashtbl_t *tbl;
+	slurm_conf_switches_t *s;
+	static s_p_options_t _switch_options[] = {
+		{"LinkSpeed", S_P_UINT32},
+		{"Nodes", S_P_STRING},
+		{"Switches", S_P_STRING},
+		{NULL}
+	};
+
+	tbl = s_p_hashtbl_create(_switch_options);
+	s_p_parse_line(tbl, *leftover, leftover);
+
+	s = xmalloc(sizeof(slurm_conf_switches_t));
+	s->switch_name = xstrdup(value);
+	if (!s_p_get_uint32(&s->link_speed, "LinkSpeed", tbl))
+		s->link_speed = 1;
+	s_p_get_string(&s->nodes, "Nodes", tbl);
+	s_p_get_string(&s->switches, "Switches", tbl);
+	s_p_hashtbl_destroy(tbl);
+
+	if (s->nodes && s->switches) {
+		error("switch %s has both child switches and nodes",
+		      s->switch_name);
+		_destroy_switches(s);
+		return -1;
+	}
+	if (!s->nodes && !s->switches) {
+		error("switch %s has neither child switches nor nodes",
+		      s->switch_name);
+		_destroy_switches(s);
+		return -1;
+	}
+
+	*dest = (void *)s;
+
+	return 1;
+}
+
+static void _destroy_switches(void *ptr)
+{
+	slurm_conf_switches_t *s = (slurm_conf_switches_t *)ptr;
+	xfree(s->nodes);
+	xfree(s->switch_name);
+	xfree(s->switches);
+	xfree(ptr);
+}
+
diff --git a/src/sacct/Makefile.am b/src/sacct/Makefile.am
index c51d734350da5744ededafa052bfec953e581fc1..46a22ae5cc4d23d66f1fa470e4f3822f5621dd40 100644
--- a/src/sacct/Makefile.am
+++ b/src/sacct/Makefile.am
@@ -12,7 +12,7 @@ sacct_LDADD = 	$(top_builddir)/src/api/libslurm.o -ldl
 sacct_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 
 noinst_HEADERS = sacct.c 
-sacct_SOURCES = sacct.c process.c print.c options.c sacct_stat.c
+sacct_SOURCES = sacct.c process.c print.c options.c
 
 force:
 $(sacct_LDADD) : force
diff --git a/src/sacct/Makefile.in b/src/sacct/Makefile.in
index c187cf5bd070ca1ca3d2a0d33780b66562e7a476..56d88609f1e86770f3d045fd4be68ba58af3129b 100644
--- a/src/sacct/Makefile.in
+++ b/src/sacct/Makefile.in
@@ -46,14 +46,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,7 +78,7 @@ am__installdirs = "$(DESTDIR)$(bindir)"
 binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(bin_PROGRAMS)
 am_sacct_OBJECTS = sacct.$(OBJEXT) process.$(OBJEXT) print.$(OBJEXT) \
-	options.$(OBJEXT) sacct_stat.$(OBJEXT)
+	options.$(OBJEXT)
 sacct_OBJECTS = $(am_sacct_OBJECTS)
 sacct_DEPENDENCIES = $(top_builddir)/src/api/libslurm.o
 sacct_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
@@ -108,6 +112,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -273,7 +281,7 @@ INCLUDES = -I$(top_srcdir)
 sacct_LDADD = $(top_builddir)/src/api/libslurm.o -ldl
 sacct_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 noinst_HEADERS = sacct.c 
-sacct_SOURCES = sacct.c process.c print.c options.c sacct_stat.c
+sacct_SOURCES = sacct.c process.c print.c options.c
 all: all-am
 
 .SUFFIXES:
@@ -349,7 +357,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/process.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sacct.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sacct_stat.Po@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
diff --git a/src/sacct/options.c b/src/sacct/options.c
index 8418346781364c1b7bc9f0a88fc59818fd4c07d8..00cde2f074515b779f3a4f727d9a472564a340a7 100644
--- a/src/sacct/options.c
+++ b/src/sacct/options.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -51,15 +52,10 @@ List selected_parts = NULL;
 List selected_steps = NULL;
 void *acct_db_conn = NULL;
 
-void _show_rec(char *f[])
-{
-	int 	i;
-	fprintf(stderr, "rec>");
-	for (i=0; f[i]; i++)
-		fprintf(stderr, " %s", f[i]);
-	fprintf(stderr, "\n");
-	return;
-}
+List print_fields_list = NULL;
+ListIterator print_fields_itr = NULL;
+int field_count = 0;
+List qos_list = NULL;
 
 void _help_fields_msg(void)
 {
@@ -67,10 +63,10 @@ void _help_fields_msg(void)
 
 	for (i = 0; fields[i].name; i++) {
 		if (i & 3)
-			printf("  ");
-		else
+			printf(" ");
+		else if(i)
 			printf("\n");
-		printf("%-10s", fields[i].name);
+		printf("%-13s", fields[i].name);
 	}
 	printf("\n");
 	return;
@@ -385,153 +381,122 @@ static int _addto_step_list(List step_list, char *names)
 
 void _help_msg(void)
 {
-	slurm_ctl_conf_t *conf = slurm_conf_lock();
-	printf("\n"
-	       "By default, sacct displays accounting data for all jobs and job\n"
-	       "steps that are present in the log.\n"
-	       "\n"
-	       "Notes:\n"
-	       "\n"
-	       "    * If --dump is specified,\n"
-	       "          * The field selection options (--brief, --fields, ...)\n"
-	       "	    have no effect\n"
-	       "	  * Elapsed time fields are presented as 2 fields, integral\n"
-	       "	    seconds and integral microseconds\n"
-	       "    * If --dump is not specified, elapsed time fields are presented\n"
-	       "      as [[days-]hours:]minutes:seconds.hundredths\n"
-	       "    * The default input file is the file named in the \"jobacct_logfile\"\n"
-	       "      parameter in %s.\n"
-	       "\n"
-	       "Options:\n"
-	       "\n"
-	       "-a, --all\n"
-	       "    Display job accounting data for all users. By default, only\n"
-	       "    data for the current user is displayed for users other than\n"
-	       "    root.\n"
-	       "-A, --accounts\n"
-	       "    Only send data about these accounts.  Default is all.\n"
-	       "-b, --brief\n"
-	       "    Equivalent to \"--fields=jobstep,state,error\". This option\n"
-	       "    has no effect if --dump is specified.\n"
-	       "-c, --completion\n"
-	       "    Use job completion instead of accounting data.\n"
-	       "-C, --clusters\n"
-	       "    Only send data about these clusters.  -1 for all clusters.\n"
-	       "-d, --dump\n"
-	       "    Dump the raw data records\n"
-	       "--duplicates\n"
-	       "    If SLURM job ids are reset, but the job accounting log file\n"
-	       "    isn't reset at the same time (with -e, for example), some\n"
-	       "    job numbers will probably appear more than once in the\n"
-	       "    accounting log file to refer to different jobs; such jobs\n"
-	       "    can be distinguished by the \"submit\" time stamp in the\n"
-	       "    data records.\n"
-	       "      When data for specific jobs are requested with\n"
-	       "    the --jobs option, we assume that the user\n"
-	       "    wants to see only the most recent job with that number. This\n"
-	       "    behavior can be overridden by specifying --duplicates, in\n"
-	       "    which case all records that match the selection criteria\n"
-	       "    will be returned.\n"
-	       "      When --jobs is not specified, we report\n"
-	       "    data for all jobs that match the selection criteria, even if\n"
-	       "    some of the job numbers are reused. Specify that you only\n"
-	       "    want the most recent job for each selected job number with\n"
-	       "    the --noduplicates option.\n"
-	       "-e <timespec>, --expire=<timespec>\n"
-	       "    Remove jobs from SLURM's current accounting log file (or the\n"
-	       "    file specified with --file) that completed more than <timespec>\n"
-	       "    ago.  If <timespec> is an integer, it is interpreted as\n" 
-	       "    minutes. If <timespec> is an integer followed by \"h\", it is\n"
-	       "    interpreted as a number of hours. If <timespec> is an integer\n"
-	       "    followed by \"d\", it is interpreted as number of days. For\n"
-	       "    example, \"--expire=14d\" means that you wish to purge the job\n"
-	       "    accounting log of all jobs that completed more than 14 days ago.\n" 
-	       "--endtime:                                                   \n"
-               "    Select jobs eligible before this time.                   \n"
-	       "-F <format-list>, --format=<format-list>\n"
-	       "    Display the specified data (use \"--helpformat\" for a\n"
-	       "    list of available fields). If no format option is specified,\n"
-	       "    we use \"--format=jobstep,jobname,partition,alloc_cpus,state,error\".\n"
-	       "-f<file>, --file=<file>\n"
-	       "    Read data from the specified file, rather than SLURM's current\n"
-	       "    accounting log file.\n"
-	       "-l, --long\n"
-	       "    Equivalent to specifying\n"
-	       "    \"--fields=jobstep,usercpu,systemcpu,minflt,majflt,nprocs,\n"
-	       "    alloc_cpus,elapsed,state,exitcode\"\n"
-	       "-O, --formatted_dump\n"
-	       "    Dump accounting records in an easy-to-read format, primarily\n"
-	       "    for debugging.\n"
-	       "-g <gid>, --gid <gid>\n"
-	       "    Select only jobs submitted from the <gid> group.\n"
-	       "-h, --help\n"
-	       "    Print a general help message.\n"
-	       "--help-fields\n"
-	       "    Print a list of fields that can be specified with the\n"
-	       "    \"--fields\" option\n"
-	       "-j <job(.step)>, --jobs=<job(.step)>\n"
-	       "    Display information about this job or comma-separated\n"
-	       "    list of jobs. The default is all jobs. Adding .step will\n"
-	       "    display the specfic job step of that job.\n"
-	       "--noduplicates\n"
-	       "    See the discussion under --duplicates.\n"
-	       "--noheader\n"
-	       "    Print (or don't print) a header. The default is to print a\n"
-	       "    header; the option has no effect if --dump is specified\n"
-	       "-p <part_list>, --partition=<part_list>\n"
-	       "    Display or purge information about jobs and job steps in the\n"
-	       "    <part_list> partition(s). The default is all partitions.\n"
-	       "-P --purge\n"
-	       "    Used in conjunction with --expire to remove invalid data\n"
-	       "    from the job accounting log.\n"
-	       "-s <state-list>, --state=<state-list>\n"
-	       "    Select jobs based on their current state: running (r),\n"
-	       "    completed (cd), failed (f), timeout (to), and node_fail (nf).\n"
-	       "-S, --stat\n"
-	       "    Get real time state of a jobstep supplied by the -j\n"
-	       "    option\n" 
-	       "--starttime:                                                 \n"
-               "    Select jobs eligible after this time.                    \n"
-	       "-t, --total\n"
-	       "    Only show cumulative statistics for each job, not the\n"
-	       "    intermediate steps\n"
-	       "-u <uid>, --uid <uid>\n"
-	       "    Select only jobs submitted by the user with uid <uid>.  Only\n"
-	       "    root users are allowed to specify a uid other than their own -1 for all users.\n"
-	       "--usage\n"
-	       "    Pointer to this message.\n"
-	       "-v, --verbose\n"
-	       "    Primarily for debugging purposes, report the state of various\n"
-	       "    variables during processing.\n"
-	       "-W, --wckeys\n"
-	       "    Only send data about these wckeys.  Default is all.\n"
-	       "\n"
-	       "Note, valid start/end time formats are...\n"
-	       "    HH:MM[:SS] [AM|PM]\n"
-	       "    MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]\n"
-	       "    MM/DD[/YY]-HH:MM[:SS]\n"
-	       , conf->slurm_conf);
-
-	slurm_conf_unlock();
+	printf("\
+sacct [<OPTION>]                                                            \n\
+    Valid <OPTION> values are:                                              \n\
+     -a, --allusers:                                                        \n\
+	           Display jobs for all users. By default, only the         \n\
+                   current user's jobs are displayed.  If ran by user root  \n\
+                   this is the default.                                     \n\
+     -A, --accounts:                                                        \n\
+	           Use this comma seperated list of accounts to select jobs \n\
+                   to display.  By default, all accounts are selected.      \n\
+     -b, --brief:                                                           \n\
+	           Equivalent to '--format=jobstep,state,error'. This option\n\
+	           has no effect if --dump is specified.                    \n\
+     -c, --completion: Use job completion instead of accounting data.       \n\
+     -C, --clusters:                                                        \n\
+                   Only send data about these clusters. -1 for all clusters.\n\
+     -d, --dump:   Dump the raw data records                                \n\
+     -D, --duplicates:                                                      \n\
+	           If SLURM job ids are reset, some job numbers will        \n\
+	           probably appear more than once refering to different jobs.\n\
+	           Without this option only the most recent jobs will be    \n\
+                   displayed.                                               \n\
+     -e, --helpformat:                                                      \n\
+	           Print a list of fields that can be specified with the    \n\
+	           '--format' option                                        \n\
+     -E, --endtime:                                                         \n\
+                   Select jobs started before this time.                    \n\
+     -f, --file=file:                                                       \n\
+	           Read data from the specified file, rather than SLURM's   \n\
+                   current accounting log file. (Only appliciable when      \n\
+                   running the filetxt plugin.)                             \n\
+     -g, --gid, --group:                                                    \n\
+	           Use this comma seperated list of gids or group names     \n\
+                   to select jobs to display.  By default, all groups are   \n\
+                   selected.                                                \n\
+     -h, --help:   Print this description of use.                           \n\
+     -j, --jobs:                                                            \n\
+	           Format is <job(.step)>. Display information about this   \n\
+                   job or comma-separated list of jobs. The default is all  \n\
+                   jobs. Adding .step will display the specfic job step of  \n\
+                   that job.                                                \n\
+     -l, --long:                                                            \n\
+	           Equivalent to specifying                                 \n\
+	           '--fields=jobid,jobname,partition,maxvsize,maxvsizenode, \n\
+                             maxvsizetask,avevsize,maxrss,maxrssnode,       \n\
+                             maxrsstask,averss,maxpages,maxpagesnode,       \n\
+                             maxpagestask,avepages,mincpu,mincpunode,       \n\
+                             mincputask,avecpu,ntasks,alloccpus,elapsed,    \n\
+	                     state,exitcode'                                \n\
+     -L, --allclusters:                                                     \n\
+	           Display jobs ran on all clusters. By default, only jobs  \n\
+                   ran on the cluster from where sacct is called are        \n\
+                   displayed.                                               \n\
+     -n, --noheader:                                                        \n\
+	           No header will be added to the beginning of output.      \n\
+                   The default is to print a header; the option has no effect\n\
+                   if --dump is specified                                   \n\
+     -N, --nodes:                                                           \n\
+                   A comma separated list of nodes where jobs ran           \n\
+     -o, --format:                                                          \n\
+	           Comma seperated list of fields. (use \"--helpformat\"    \n\
+                   for a list of available fields).                         \n\
+     -O, --formatted_dump:                                                  \n\
+	           Dump accounting records in an easy-to-read format,       \n\
+                   primarily for debugging.                                 \n\
+     -p, --parsable: output will be '|' delimited with a '|' at the end     \n\
+     -P, --parsable2: output will be '|' delimited without a '|' at the end \n\
+     -r, --partition:                                                       \n\
+	           Comma seperated list of partitions to select jobs and    \n\
+                   job steps from. The default is all partitions.           \n\
+     -s, --state:                                                           \n\
+	           Select jobs based on their current state: running (r),   \n\
+	           completed (cd), failed (f), timeout (to), and            \n\
+                   node_fail (nf).                                          \n\
+     -S, --starttime:                                                       \n\
+                   Select jobs eligible after this time.  Default is        \n\
+                   midnight of current day.                                 \n\
+     -T, --truncate:                                                        \n\
+                   Truncate time.  So if a job started before --starttime   \n\
+                   the start time would be truncated to --starttime.        \n\
+                   The same for end time and --endtime.                     \n\
+     -u, --uid, --user:                                                     \n\
+	           Use this comma seperated list of uids or user names      \n\
+                   to select jobs to display.  By default, the running      \n\
+                   user's uid is used.                                      \n\
+     --usage:      Display brief usage message.                             \n\
+     -v, --verbose:                                                         \n\
+	           Primarily for debugging purposes, report the state of    \n\
+                   various variables during processing.                     \n\
+     -V, --version: Print version.                                          \n\
+     -W, --wckeys:                                                          \n\
+                   Only send data about these wckeys.  Default is all.      \n\
+     -X, --allocations:                                                     \n\
+	           Only show cumulative statistics for each job, not the    \n\
+	           intermediate steps.                                      \n\
+	                                                                    \n\
+     Note, valid start/end time formats are...                              \n\
+	           HH:MM[:SS] [AM|PM]                                       \n\
+	           MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]                     \n\
+	           MM/DD[/YY]-HH:MM[:SS]                                    \n\
+	           YYYY-MM-DD[THH[:MM[:SS]]]                                \n\
+\n");
 
 	return;
 }
 
 void _usage(void)
 {
-	printf("\nUsage: sacct [options]\n\tUse --help for help\n");
+	printf("Usage: sacct [options]\n\tUse --help for help\n");
 }
 
 void _init_params()
 {
 	memset(&params, 0, sizeof(sacct_parameters_t));
-	params.arch_cond = xmalloc(sizeof(acct_archive_cond_t));
-	params.arch_cond->archive_jobs = (uint16_t)NO_VAL;
-	params.arch_cond->archive_steps = (uint16_t)NO_VAL;
-	params.arch_cond->job_purge = (uint16_t)NO_VAL;
-	params.arch_cond->step_purge = (uint16_t)NO_VAL;
-
-	params.arch_cond->job_cond = xmalloc(sizeof(acct_job_cond_t));
+	params.job_cond = xmalloc(sizeof(acct_job_cond_t));
+	params.job_cond->without_usage_truncation = 1;
 }
 
 int decode_state_char(char *state)
@@ -563,7 +528,7 @@ int get_data(void)
 
 	ListIterator itr = NULL;
 	ListIterator itr_step = NULL;
-	acct_job_cond_t *job_cond = params.arch_cond->job_cond;
+	acct_job_cond_t *job_cond = params.job_cond;
 	
 	if(params.opt_completion) {
 		jobs = g_slurm_jobcomp_get_jobs(job_cond);
@@ -630,41 +595,40 @@ void parse_command_line(int argc, char **argv)
 	bool brief_output = FALSE, long_output = FALSE;
 	bool all_users = 0;
 	bool all_clusters = 0;
-	acct_archive_cond_t *arch_cond = params.arch_cond;
-	acct_job_cond_t *job_cond = arch_cond->job_cond;
+	acct_job_cond_t *job_cond = params.job_cond;
+	log_options_t opts = LOG_OPTS_STDERR_ONLY ;
+	int verbosity;		/* count of -v options */
 
 	static struct option long_options[] = {
-		{"all", 0,0, 'a'},
+		{"allusers", 0,0, 'a'},
 		{"accounts", 1, 0, 'A'},
-		{"begin", 1, 0, 'B'},
+		{"allocations", 0, &params.opt_allocs,  1},
 		{"brief", 0, 0, 'b'},
-		{"clusters", 1, 0, 'C'},
 		{"completion", 0, &params.opt_completion, 'c'},
-		{"duplicates", 0, &params.opt_dup, 1},
+		{"clusters", 1, 0, 'C'},
 		{"dump", 0, 0, 'd'},
-		{"end", 1, 0, 'E'},
+		{"duplicates", 0, &params.opt_dup, 1},
+		{"helpformat", 0, 0, 'e'},
+		{"help-fields", 0, 0, 'e'},
 		{"endtime", 1, 0, 'E'},
-		{"expire", 1, 0, 'e'},
-		{"fields", 1, 0, 'F'},
-		{"format", 1, 0, 'F'},
 		{"file", 1, 0, 'f'},
-		{"formatted_dump", 0, 0, 'O'},
 		{"gid", 1, 0, 'g'},
 		{"group", 1, 0, 'g'},
-		{"help", 0, &params.opt_help, 1},
-		{"help-fields", 0, &params.opt_help, 2},
+		{"help", 0, 0, 'h'},
 		{"helpformat", 0, &params.opt_help, 2},
 		{"jobs", 1, 0, 'j'},
 		{"long", 0, 0, 'l'},
-		{"big_logfile", 0, &params.opt_lowmem, 1},
-		{"noduplicates", 0, &params.opt_dup, 0},
-		{"noheader", 0, &params.opt_noheader, 1},
-		{"partition", 1, 0, 'p'},
-		{"purge", 0, 0, 'P'},
+		{"nodes", 1, 0, 'N'},
+		{"noheader", 0, 0, 'n'},
+		{"fields", 1, 0, 'o'},
+		{"format", 1, 0, 'o'},
+		{"formatted_dump", 0, 0, 'O'},
+		{"parsable", 0, 0, 'p'},
+		{"parsable2", 0, 0, 'P'},
+		{"partition", 1, 0, 'r'},
 		{"state", 1, 0, 's'},
-		{"stat", 0, 0, 'S'},
-		{"starttime", 1, 0, 'B'},
-		{"total", 0, 0,  't'},
+		{"starttime", 1, 0, 'S'},
+		{"truncate", 0, 0, 'T'},
 		{"uid", 1, 0, 'u'},
 		{"usage", 0, &params.opt_help, 3},
 		{"user", 1, 0, 'u'},
@@ -676,11 +640,13 @@ void parse_command_line(int argc, char **argv)
 	params.opt_uid = getuid();
 	params.opt_gid = getgid();
 
+	verbosity         = 0;
+	log_init("sacct", opts, SYSLOG_FACILITY_DAEMON, NULL);
 	opterr = 1;		/* Let getopt report problems to the user */
 
 	while (1) {		/* now cycle through the command line */
 		c = getopt_long(argc, argv,
-				"aA:bB:cC:deE:F:f:g:hj:lOP:p:s:StUu:VvW:",
+				"aA:bcC:deE:f:g:hj:lnN:o:OpPr:s:S:tu:vVW:X",
 				long_options, &optionIndex);
 		if (c == -1)
 			break;
@@ -697,9 +663,6 @@ void parse_command_line(int argc, char **argv)
 		case 'b':
 			brief_output = true;
 			break;
-		case 'B':
-			job_cond->usage_start = parse_time(optarg, 1);
-			break;
 		case 'c':
 			params.opt_completion = 1;
 			break;
@@ -717,35 +680,28 @@ void parse_command_line(int argc, char **argv)
 		case 'd':
 			params.opt_dump = 1;
 			break;	
+		case 'D':
+			params.opt_dup = 1;
+			break;
 		case 'e':
-			params.opt_expire = 1;
+			params.opt_help = 2;
 			break;
 		case 'E':
 			job_cond->usage_end = parse_time(optarg, 1);
 			break;
-		case 'F':
-			if(params.opt_stat)
-				xfree(params.opt_field_list);
-			
-			xstrfmtcat(params.opt_field_list, "%s,", optarg);
-			break;
-
 		case 'f':
-			xfree(arch_cond->archive_dir);
-			arch_cond->archive_dir = xstrdup(optarg);
+			xfree(params.opt_filein);
+			params.opt_filein = xstrdup(optarg);
 			break;
-
 		case 'g':
 			if(!job_cond->groupid_list)
 				job_cond->groupid_list = 
 					list_create(slurm_destroy_char);
 			_addto_id_char_list(job_cond->groupid_list, optarg, 1);
 			break;
-
 		case 'h':
 			params.opt_help = 1;
 			break;
-
 		case 'j':
 			if ((strspn(optarg, "0123456789, ") < strlen(optarg))
 			    && (strspn(optarg, ".0123456789, ") 
@@ -760,23 +716,38 @@ void parse_command_line(int argc, char **argv)
 					destroy_jobacct_selected_step);
 			_addto_step_list(job_cond->step_list, optarg);
 			break;
-
+		case 'L':
+			all_clusters = 1;
+			break;
 		case 'l':
 			long_output = true;
 			break;
-
+		case 'o':
+			xstrfmtcat(params.opt_field_list, "%s,", optarg);
+			break;
 		case 'O':
 			params.opt_fdump = 1;
 			break;
-
-		case 'P':
-			
-			arch_cond->step_purge = 
-				arch_cond->job_purge = atoi(optarg);
-			
+		case 'n':
+			print_fields_have_header = 0;
+			break;
+		case 'N':
+			if(job_cond->used_nodes) {
+				error("Aleady asked for nodes '%s'",
+				      job_cond->used_nodes);
+				break;
+			}
+			job_cond->used_nodes = xstrdup(optarg);
 			break;
-
 		case 'p':
+			print_fields_parsable_print = 
+				PRINT_FIELDS_PARSABLE_ENDING;
+			break;
+		case 'P':
+			print_fields_parsable_print = 
+				PRINT_FIELDS_PARSABLE_NO_ENDING;
+			break;
+		case 'r':
 			if(!job_cond->partition_list)
 				job_cond->partition_list =
 					list_create(slurm_destroy_char);
@@ -792,21 +763,14 @@ void parse_command_line(int argc, char **argv)
 			_addto_state_char_list(job_cond->state_list, optarg);
 			break;
 		case 'S':
-			if(!params.opt_field_list) {
-				xstrfmtcat(params.opt_field_list, "%s,",
-					   STAT_FIELDS);
-			}
-			params.opt_stat = 1;
+			job_cond->usage_start = parse_time(optarg, 1);
 			break;
-
-		case 't':
-			params.opt_total = 1;
+		case 'T':
+			job_cond->without_usage_truncation = 0;
 			break;
-
 		case 'U':
 			params.opt_help = 3;
 			break;
-
 		case 'u':
 			if(!strcmp(optarg, "-1")) {
 				all_users = 1;
@@ -818,17 +782,11 @@ void parse_command_line(int argc, char **argv)
 					list_create(slurm_destroy_char);
 			_addto_id_char_list(job_cond->userid_list, optarg, 0);
 			break;
-
 		case 'v':
 			/* Handle -vvv thusly...
-			 * 0 - report only normal messages and errors
-			 * 1 - report options selected and major operations
-			 * 2 - report data anomalies probably not errors
-			 * 3 - blather on and on
 			 */
-			params.opt_verbose++;
+			verbosity++;
 			break;
-
 		case 'W':
 			if(!job_cond->wckey_list) 
 				job_cond->wckey_list =
@@ -838,13 +796,23 @@ void parse_command_line(int argc, char **argv)
 		case 'V':
 			printf("%s %s\n", PACKAGE, SLURM_VERSION);
 			exit(0);
-
+		case 't':
+		case 'X':
+			params.opt_allocs = 1;
+			break;
 		case ':':
 		case '?':	/* getopt() has explained it */
 			exit(1); 
 		}
 	}
 
+	if (verbosity) {
+		opts.stderr_level += verbosity;
+		opts.prefix_level = 1;
+		log_alter(opts, 0, NULL);
+	}
+
+
 	/* Now set params.opt_dup, unless they've already done so */
 	if (params.opt_dup < 0)	/* not already set explicitly */
 		params.opt_dup = 0;
@@ -854,62 +822,73 @@ void parse_command_line(int argc, char **argv)
 
 	job_cond->duplicates = params.opt_dup;
 
+	if(!job_cond->usage_start) {
+		job_cond->usage_start = time(NULL);
+		struct tm start_tm;
+
+		if(!localtime_r(&job_cond->usage_start, &start_tm)) {
+			error("Couldn't get localtime from %d", 
+			      job_cond->usage_start);
+			return;
+		}
+		start_tm.tm_sec = 0;
+		start_tm.tm_min = 0;
+		start_tm.tm_hour = 0;
+		start_tm.tm_isdst = -1;
+		job_cond->usage_start = mktime(&start_tm);
+	}
+	
+	if(verbosity > 0) {
+		char *start_char =NULL, *end_char = NULL;
+		
+		start_char = xstrdup(ctime(&job_cond->usage_start));
+		/* remove the new line */
+		start_char[strlen(start_char)-1] = '\0';
+		if(job_cond->usage_end) {
+			end_char = xstrdup(ctime(&job_cond->usage_end));
+			/* remove the new line */
+			end_char[strlen(end_char)-1] = '\0';
+		} else
+			end_char = xstrdup("Now");
+		info("Jobs eligible from %s - %s\n", start_char, end_char);
+		xfree(start_char);
+		xfree(end_char);
+	}
+
 	debug("Options selected:\n"
-	      "\topt_archive_jobs=%d\n"
-	      "\topt_archve_steps=%d\n"
 	      "\topt_completion=%d\n"
 	      "\topt_dump=%d\n"
 	      "\topt_dup=%d\n"
-	      "\topt_expire=%d\n"
 	      "\topt_fdump=%d\n"
-	      "\topt_stat=%d\n"
 	      "\topt_field_list=%s\n"
-	      "\topt_filein=%s\n"
-	      "\topt_noheader=%d\n"
 	      "\topt_help=%d\n"
-	      "\topt_long=%d\n"
-	      "\topt_lowmem=%d\n"
-	      "\topt_job_purge=%d\n"
-	      "\topt_step_purge=%d\n"
-	      "\topt_total=%d\n"
-	      "\topt_verbose=%d\n",
-	      arch_cond->archive_jobs,
-	      arch_cond->archive_steps,
+	      "\topt_allocs=%d\n",
 	      params.opt_completion,
 	      params.opt_dump,
 	      params.opt_dup,
-	      params.opt_expire,
 	      params.opt_fdump,
-	      params.opt_stat,
 	      params.opt_field_list,
-	      arch_cond->archive_dir,
-	      params.opt_noheader,
 	      params.opt_help,
-	      params.opt_long,
-	      params.opt_lowmem,
-	      arch_cond->job_purge,
-	      arch_cond->step_purge,
-	      params.opt_total,
-	      params.opt_verbose);
+	      params.opt_allocs);
 
 
 	if(params.opt_completion) {
-		g_slurm_jobcomp_init(arch_cond->archive_dir);
+		g_slurm_jobcomp_init(params.opt_filein);
 
 		acct_type = slurm_get_jobcomp_type();
 		if ((strcmp(acct_type, "jobcomp/none") == 0)
-		    &&  (stat(arch_cond->archive_dir, &stat_buf) != 0)) {
+		    &&  (stat(params.opt_filein, &stat_buf) != 0)) {
 			fprintf(stderr, "SLURM job completion is disabled\n");
 			exit(1);
 		}
 		xfree(acct_type);
 	} else {
-		slurm_acct_storage_init(arch_cond->archive_dir);
+		slurm_acct_storage_init(params.opt_filein);
 		acct_db_conn = acct_storage_g_get_connection(false, 0, false);
 		
 		acct_type = slurm_get_accounting_storage_type();
 		if ((strcmp(acct_type, "accounting_storage/none") == 0)
-		    &&  (stat(arch_cond->archive_dir, &stat_buf) != 0)) {
+		    &&  (stat(params.opt_filein, &stat_buf) != 0)) {
 			fprintf(stderr,
 				"SLURM accounting storage is disabled\n");
 			exit(1);
@@ -924,116 +903,102 @@ void parse_command_line(int argc, char **argv)
 			list_destroy(job_cond->cluster_list);
 			job_cond->cluster_list = NULL;
 		}
-		if(params.opt_verbose)
-			fprintf(stderr, "Clusters requested:\n\t: all\n");
-	} else if (params.opt_verbose && job_cond->cluster_list 
-	    && list_count(job_cond->cluster_list)) {
-		fprintf(stderr, "Clusters requested:\n");
+		debug2("Clusters requested:\tall\n");
+	} else if (job_cond->cluster_list 
+		   && list_count(job_cond->cluster_list)) {
+		debug2( "Clusters requested:\n");
 		itr = list_iterator_create(job_cond->cluster_list);
 		while((start = list_next(itr))) 
-			fprintf(stderr, "\t: %s\n", start);
+			debug2("\t: %s\n", start);
 		list_iterator_destroy(itr);
 	} else if(!job_cond->cluster_list 
 		  || !list_count(job_cond->cluster_list)) {
 		if(!job_cond->cluster_list)
 			job_cond->cluster_list =
 				list_create(slurm_destroy_char);
-		if((start = slurm_get_cluster_name()))
+		if((start = slurm_get_cluster_name())) {
 			list_append(job_cond->cluster_list, start);
-		if(params.opt_verbose) {
-			fprintf(stderr, "Clusters requested:\n");
-			fprintf(stderr, "\t: %s\n", start);
+			debug2("Clusters requested:\t%s", start);
 		}
 	}
 
-	/* if any jobs are specified set to look for all users if none
+	/* if any jobs or nodes are specified set to look for all users if none
 	   are set */
-	if((job_cond->step_list && list_count(job_cond->step_list))
-	   && (!job_cond->userid_list || !list_count(job_cond->userid_list)))
-		all_users=1;
+	if(!job_cond->userid_list || !list_count(job_cond->userid_list)) 
+		if((job_cond->step_list && list_count(job_cond->step_list)) 
+		   || job_cond->used_nodes)
+			all_users=1;      
 
 	if(all_users) {
-		if(job_cond->userid_list 
-		   && list_count(job_cond->userid_list)) {
+		if(job_cond->userid_list && list_count(job_cond->userid_list)) {
 			list_destroy(job_cond->userid_list);
 			job_cond->userid_list = NULL;
 		}
-		if(params.opt_verbose)
-			fprintf(stderr, "Userids requested:\n\t: all\n");
-	} else if (params.opt_verbose && job_cond->userid_list 
-	    && list_count(job_cond->userid_list)) {
-		fprintf(stderr, "Userids requested:\n");
+		debug2("Userids requested:\tall\n");
+	} else if (job_cond->userid_list && list_count(job_cond->userid_list)) {
+		debug2("Userids requested:");
 		itr = list_iterator_create(job_cond->userid_list);
 		while((start = list_next(itr))) 
-			fprintf(stderr, "\t: %s\n", start);
+			debug2("\t: %s", start);
 		list_iterator_destroy(itr);
 	} else if(!job_cond->userid_list 
-		      || !list_count(job_cond->userid_list)) {
+		  || !list_count(job_cond->userid_list)) {
 		if(!job_cond->userid_list)
-			job_cond->userid_list =
-				list_create(slurm_destroy_char);
+			job_cond->userid_list = list_create(slurm_destroy_char);
 		start = xstrdup_printf("%u", params.opt_uid);
 		list_append(job_cond->userid_list, start);
-		if(params.opt_verbose) {
-			fprintf(stderr, "Userids requested:\n");
-			fprintf(stderr, "\t: %s\n", start);
-		}
+		debug2("Userid requested\t: %s", start);
 	}
 
-	if (params.opt_verbose && job_cond->groupid_list 
-	    && list_count(job_cond->groupid_list)) {
-		fprintf(stderr, "Groupids requested:\n");
+	if (job_cond->groupid_list && list_count(job_cond->groupid_list)) {
+		debug2("Groupids requested:\n");
 		itr = list_iterator_create(job_cond->groupid_list);
 		while((start = list_next(itr))) 
-			fprintf(stderr, "\t: %s\n", start);
+			debug2("\t: %s\n", start);
 		list_iterator_destroy(itr);
 	} 
 
 	/* specific partitions requested? */
-	if (params.opt_verbose && job_cond->partition_list 
-	    && list_count(job_cond->partition_list)) {
-		fprintf(stderr, "Partitions requested:\n");
+	if (job_cond->partition_list && list_count(job_cond->partition_list)) {
+		debug2("Partitions requested:");
 		itr = list_iterator_create(job_cond->partition_list);
 		while((start = list_next(itr))) 
-			fprintf(stderr, "\t: %s\n", start);
+			debug2("\t: %s\n", start);
 		list_iterator_destroy(itr);
 	}
 
 	/* specific jobs requested? */
-	if (params.opt_verbose && job_cond->step_list
-	    && list_count(job_cond->step_list)) { 
-		fprintf(stderr, "Jobs requested:\n");
+	if (job_cond->step_list && list_count(job_cond->step_list)) { 
+		debug2("Jobs requested:");
 		itr = list_iterator_create(job_cond->step_list);
 		while((selected_step = list_next(itr))) {
 			if(selected_step->stepid != NO_VAL) 
-				fprintf(stderr, "\t: %d.%d\n",
+				debug2("\t: %d.%d",
 					selected_step->jobid,
 					selected_step->stepid);
 			else	
-				fprintf(stderr, "\t: %d\n", 
+				debug2("\t: %d", 
 					selected_step->jobid);
 		}
 		list_iterator_destroy(itr);
 	}
 
 	/* specific states (completion state) requested? */
-	if (params.opt_verbose && job_cond->state_list
-	    && list_count(job_cond->state_list)) {
-		fprintf(stderr, "States requested:\n");
+	if (job_cond->state_list && list_count(job_cond->state_list)) {
+		debug2("States requested:");
 		itr = list_iterator_create(job_cond->state_list);
 		while((start = list_next(itr))) {
-			fprintf(stderr, "\t: %s\n", 
+			debug2("\t: %s", 
 				job_state_string(atoi(start)));
 		}
 		list_iterator_destroy(itr);
 	}
 
-	if (params.opt_verbose && job_cond->wckey_list 
-	    && list_count(job_cond->wckey_list)) {
-		fprintf(stderr, "Wckeys requested:\n");
+	if (job_cond->wckey_list && list_count(job_cond->wckey_list)) {
+		debug2("Wckeys requested:");
 		itr = list_iterator_create(job_cond->wckey_list);
 		while((start = list_next(itr))) 
-			fprintf(stderr, "\t: %s\n", start);
+			debug2("\t: %s\n", start);
 		list_iterator_destroy(itr);
 	} 
 
@@ -1057,7 +1022,7 @@ void parse_command_line(int argc, char **argv)
 	} 
 	
 	if (params.opt_field_list==NULL) {
-		if (params.opt_dump || params.opt_expire)
+		if (params.opt_dump)
 			goto endopt;
 		if(params.opt_completion)
 			dot = DEFAULT_COMP_FIELDS;
@@ -1078,29 +1043,19 @@ void parse_command_line(int argc, char **argv)
 			if (!strcasecmp(fields[i].name, start))
 				goto foundfield;
 		}
-		fprintf(stderr,
-			"Invalid field requested: \"%s\"\n",
-			start);
+		error("Invalid field requested: \"%s\"", start);
 		exit(1);
 	foundfield:
-		printfields[nprintfields++] = i;
+		list_append(print_fields_list, &fields[i]);
 		start = end + 1;
 	}
-	if (params.opt_verbose) {
-		fprintf(stderr, "%d field%s selected:\n",
-			nprintfields,
-			(nprintfields==1? "" : "s"));
-		for (i = 0; i < nprintfields; i++)
-			fprintf(stderr,
-				"\t%s\n",
-				fields[printfields[i]].name);
-	} 
+	field_count = list_count(print_fields_list);
 endopt:
 	if (optind < argc) {
-		fprintf(stderr, "Error: Unknown arguments:");
+		debug2("Error: Unknown arguments:");
 		for (i=optind; i<argc; i++)
-			fprintf(stderr, " %s", argv[i]);
-		fprintf(stderr, "\n");
+			debug2(" %s", argv[i]);
+		debug2("\n");
 		exit(1);
 	}
 	return;
@@ -1312,18 +1267,6 @@ void do_dump_completion(void)
 	list_iterator_destroy(itr);
 }
 
-/* do_expire() -- purge expired data from the accounting log file
- */
-
-void do_expire()
-{
-	if(params.opt_completion) 
-		g_slurm_jobcomp_archive(params.arch_cond);
-	else {
-		jobacct_storage_g_archive(acct_db_conn, params.arch_cond);
-	}
-}
-
 void do_help(void)
 {
 	switch (params.opt_help) {
@@ -1337,7 +1280,7 @@ void do_help(void)
 		_usage();
 		break;
 	default:
-		fprintf(stderr, "sacct bug: params.opt_help=%d\n", 
+		debug2("sacct bug: params.opt_help=%d\n", 
 			params.opt_help);
 	}
 }
@@ -1352,8 +1295,6 @@ void do_help(void)
  */
 void do_list(void)
 {
-	int do_jobsteps = 1;
-	
 	ListIterator itr = NULL;
 	ListIterator itr_step = NULL;
 	jobacct_job_rec_t *job = NULL;
@@ -1362,8 +1303,6 @@ void do_list(void)
 	if(!jobs)
 		return;
 
-	if (params.opt_total)
-		do_jobsteps = 0;
 	itr = list_iterator_create(jobs);
 	while((job = list_next(itr))) {
 		if(job->sacct.min_cpu == NO_VAL)
@@ -1376,18 +1315,15 @@ void do_list(void)
 			job->sacct.ave_pages /= list_count(job->steps);
 		}
 
-		if (job->show_full) {
+		if (job->show_full) 
 			print_fields(JOB, job);
-		}
 		
-		if (do_jobsteps && (job->track_steps || !job->show_full)) {
+		if (!params.opt_allocs
+		    && (job->track_steps || !job->show_full)) {
 			itr_step = list_iterator_create(job->steps);
 			while((step = list_next(itr_step))) {
 				if(step->end == 0)
 					step->end = job->end;
-				step->associd = job->associd;
-				step->cluster = job->cluster;
-				step->account = job->account;
 				print_fields(JOBSTEP, step);
 			} 
 			list_iterator_destroy(itr_step);
@@ -1419,44 +1355,31 @@ void do_list_completion(void)
 	list_iterator_destroy(itr);
 }
 
-void do_stat()
-{
-	ListIterator itr = NULL;
-	uint32_t stepid = 0;
-	jobacct_selected_step_t *selected_step = NULL;
-	acct_job_cond_t *job_cond = params.arch_cond->job_cond;
-
-	if(!job_cond->step_list || !list_count(job_cond->step_list)) {
-		fprintf(stderr, "No job list given to stat.\n");
-		return;
-	}
-
-	itr = list_iterator_create(job_cond->step_list);
-	while((selected_step = list_next(itr))) {
-		if(selected_step->stepid != NO_VAL)
-			stepid = selected_step->stepid;
-		else
-			stepid = 0;
-		sacct_stat(selected_step->jobid, stepid);
-	}
-	list_iterator_destroy(itr);
-}
-
 void sacct_init()
 {
 	_init_params();
+	print_fields_list = list_create(NULL);
+	print_fields_itr = list_iterator_create(print_fields_list);
 }
 
 void sacct_fini()
 {
+	if(print_fields_itr)
+		list_iterator_destroy(print_fields_itr);
+	if(print_fields_list)
+		list_destroy(print_fields_list);
 	if(jobs)
 		list_destroy(jobs);
+	if(qos_list)
+		list_destroy(qos_list);
+
 	if(params.opt_completion)
 		g_slurm_jobcomp_fini();
 	else {
 		acct_storage_g_close_connection(&acct_db_conn);
 		slurm_acct_storage_fini();
 	}
-
-	destroy_acct_archive_cond(params.arch_cond);
+	xfree(params.opt_field_list);
+	xfree(params.opt_filein);
+	destroy_acct_job_cond(params.job_cond);
 }
diff --git a/src/sacct/print.c b/src/sacct/print.c
index 700701329ff9cd18c1e3347b311376e82f3d8e4b..b3ff938bc825d25b02df55939c2e014a19de116d 100644
--- a/src/sacct/print.c
+++ b/src/sacct/print.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -39,21 +40,19 @@
 
 #include "sacct.h"
 #include "src/common/parse_time.h"
-#include "src/common/hostlist.h"
 #include "slurm.h"
-#define FORMAT_STRING_SIZE 34
 
-void _elapsed_time(long secs, long usecs, char *str);
+char *_elapsed_time(long secs, long usecs);
 
-void _elapsed_time(long secs, long usecs, char *str)
+char *_elapsed_time(long secs, long usecs)
 {
 	long	days, hours, minutes, seconds;
 	long    subsec = 0;
+	char *str = NULL;
+
+	if(secs < 0 || secs == NO_VAL)
+		return NULL;
 	
-	if(secs < 0) {
-		snprintf(str, FORMAT_STRING_SIZE, "'N/A'");
-		return;
-	}
 	
 	while (usecs >= 1E6) {
 		secs++;
@@ -69,1306 +68,1233 @@ void _elapsed_time(long secs, long usecs, char *str)
 	days    =  secs / 86400;
 
 	if (days) 
-		snprintf(str, FORMAT_STRING_SIZE,
-			 "%2.2ld-%2.2ld:%2.2ld:%2.2ld",
-		         days, hours, minutes, seconds);
+		str = xstrdup_printf("%ld-%2.2ld:%2.2ld:%2.2ld",
+				     days, hours, minutes, seconds);
 	else if (hours)
-		snprintf(str, FORMAT_STRING_SIZE,
-			 "%2.2ld:%2.2ld:%2.2ld",
-		         hours, minutes, seconds);
+		str = xstrdup_printf("%2.2ld:%2.2ld:%2.2ld",
+				     hours, minutes, seconds);
 	else if(subsec)
-		snprintf(str, FORMAT_STRING_SIZE,
-			 "%2.2ld:%2.2ld.%3.3ld",
-		         minutes, seconds, subsec);
+		str = xstrdup_printf("%2.2ld:%2.2ld.%3.3ld",
+				     minutes, seconds, subsec);
 	else
-		snprintf(str, FORMAT_STRING_SIZE,
-			 "00:%2.2ld:%2.2ld",
-		         minutes, seconds);
-}
-
-void print_fields(type_t type, void *object)
-{
-	int f, pf;
-	for (f=0; f<nprintfields; f++) {
-		pf = printfields[f];
-		if (f)
-			printf(" ");
-		(fields[pf].print_routine)(type, object);
-	}
-	printf("\n");
-}
-
-/* Field-specific print routines */
-
-void print_cpu(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char str[FORMAT_STRING_SIZE];
-	
-	switch(type) {
-	case HEADLINE:
-		printf("%-15s", "Cpu");
-		break;
-	case UNDERSCORE:
-		printf("%-15s", "---------------");
-		break;
-	case JOB:
-		_elapsed_time(job->tot_cpu_sec, job->tot_cpu_usec, str);
-		printf("%-15s", str);
-		break;
-	case JOBSTEP:
-		_elapsed_time(step->tot_cpu_sec, step->tot_cpu_usec, str);
-		printf("%-15s", str);
-		break;
-	default:
-		printf("%-15s", "n/a");
-		break;
-	} 
-}
-
-void print_elapsed(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char str[FORMAT_STRING_SIZE];
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-15s", "Elapsed");
-		break;
-	case UNDERSCORE:
-		printf("%-15.15s", "---------------");
-		break;
-	case JOB:
-		_elapsed_time(job->elapsed, 0, str);
-		printf("%-15s", str);
-		break;
-	case JOBSTEP:
-		_elapsed_time(step->elapsed, 0, str);
-		printf("%-15s", str);
-		break;
-	default:
-		printf("%-15s", "n/a");
-		break;
-	} 
+		str = xstrdup_printf("00:%2.2ld:%2.2ld",
+				     minutes, seconds);
+	return str;
 }
 
-void print_exitcode(type_t type, void *object)
+static char *_find_qos_name_from_list(
+	List qos_list, int qosid)
 {
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char tmp[9];
-	uint16_t term_sig = 0;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-8s", "ExitCode");
-		break;
-	case UNDERSCORE:
-		printf("%-8s", "--------");
-		break;
-	case JOB:
-		if (WIFSIGNALED(job->exitcode))
-			term_sig = WTERMSIG(job->exitcode);
+	ListIterator itr = NULL;
+	acct_qos_rec_t *qos = NULL;
 	
-		snprintf(tmp, sizeof(tmp), "%u:%u",
-			 WEXITSTATUS(job->exitcode), term_sig);
-		printf("%-8s", tmp);
-		break;
-	case JOBSTEP:
-		if (WIFSIGNALED(step->exitcode))
-			term_sig = WTERMSIG(step->exitcode);
+	if(!qos_list || qosid == NO_VAL)
+		return NULL;
 	
-		snprintf(tmp, sizeof(tmp), "%u:%u",
-			 WEXITSTATUS(step->exitcode), term_sig);
-		printf("%-8s", tmp);
-		break;
-	default:
-		printf("%-8s", "n/a");
-		break;
-	} 
-}
-
-void print_gid(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	int32_t gid = -1;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-5s", "Gid");
-		break;
-	case UNDERSCORE:
-		printf("%-5s", "-----");
-		break;
-	case JOB:
-		gid = job->gid;
-		break;
-	case JOBCOMP:
-		printf("%-5u", jobcomp->gid);
-		break;
-	case JOBSTEP:
-		printf("%-5s", " ");
-		break;
-	default:
-		printf("%-5s", "n/a");
-		break;
-	} 
-
-	if(gid != -1) 
-		printf("%-5d", gid);
-}
-
-void print_group(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	int gid = -1;
-	char	*tmp="(unknown)";
-	struct	group *gr = NULL;
-			
-	switch(type) {
-	case HEADLINE:
-		printf("%-9s", "Group");
-		break;
-	case UNDERSCORE:
-		printf("%-9s", "---------");
-		break;
-	case JOB:
-		gid = job->gid;
-		break;
-	case JOBCOMP:
-		printf("%-9s", jobcomp->gid_name);
-		break;
-	case JOBSTEP:
-		printf("%-9s", " ");
-		break;
-	default:
-		printf("%-9s", "n/a");
-		break;
+	itr = list_iterator_create(qos_list);
+	while((qos = list_next(itr))) {
+		if(qosid == qos->id)
+			break;
 	}
-	if(gid != -1) {
-		if ((gr=getgrgid(gid)))
-			tmp=gr->gr_name;
-		printf("%-9s", tmp);
-	} 
-}
-
-void print_job(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
+	list_iterator_destroy(itr);
 
-	switch(type) {
-	case HEADLINE:
-		printf("%-12s", "Job");
-		break;
-	case UNDERSCORE:
-		printf("%-12s", "------------");
-		break;
-	case JOB:
-		printf("%-12u", job->jobid);
-		break;
-	case JOBSTEP:
-		printf("%-12s", " ");
-		break;
-	default:
-		printf("%-12s", "n/a");
-		break;
-	} 
+	if(qos)
+		return qos->name;
+	else
+		return "Unknown";
 }
 
-void print_name(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
 
-	switch(type) {
-	case HEADLINE:
-		printf("%-18s", "Jobname");
-		break;
-	case UNDERSCORE:
-		printf("%-18s", "------------------");
-		break;
-	case JOB:
-		if(!job->jobname)
-			printf("%-18s", "unknown");			     
-		else if(strlen(job->jobname)<19)
-			printf("%-18s", job->jobname);
-		else
-			printf("%-15.15s...", job->jobname);
-			
-		break;
-	case JOBCOMP:
-		if(!jobcomp->jobname)
-			printf("%-18s", "unknown");			     
-		else if(strlen(jobcomp->jobname)<19)
-			printf("%-18s", jobcomp->jobname);
-		else
-			printf("%-15.15s...", jobcomp->jobname);
-			
-		break;
-	case JOBSTEP:
-		if(!step->stepname)
-			printf("%-18s", "unknown");			     
-		else if(strlen(step->stepname)<19)
-			printf("%-18s", step->stepname);
-		else
-			printf("%-15.15s...", step->stepname);
-		break;
-	default:
-		printf("%-18s", "n/a");
-		break;
-	} 
-}
 
-void print_jobid(type_t type, void *object)
+void print_fields(type_t type, void *object)
 {
 	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char outbuf[10];
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-10s", "JobID");
-		break;
-	case UNDERSCORE:
-		printf("%-10s", "----------");
-		break;
-	case JOB:
-		printf("%-10u", job->jobid);
-		break;
-	case JOBCOMP:
-		printf("%-10u", jobcomp->jobid);
-		break;
-	case JOBSTEP:
-		snprintf(outbuf, sizeof(outbuf), "%u.%u",
-			 step->jobid,
-			 step->stepid);
-		printf("%-10s", outbuf);
-		break;
-	default:
-		printf("%-10s", "n/a");
-		break;
-	} 
-
-}
-
-void print_ncpus(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-7s", "Ncpus");
-		break;
-	case UNDERSCORE:
-		printf("%-7s", "-------");
-		break;
-	case JOB:
-		printf("%-7u", job->alloc_cpus);
-		break;
-	case JOBSTEP:
-		printf("%-7u", step->ncpus);
-		break;
-	default:
-		printf("%-7s", "n/a");
-		break;
-	} 
-}
-
-void print_nodes(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
 	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
+	jobcomp_job_rec_t *job_comp = (jobcomp_job_rec_t *)object;
+	print_field_t *field = NULL;
+	int curr_inx = 1;
+	struct passwd *pw = NULL;
+	struct	group *gr = NULL;
+	char outbuf[FORMAT_STRING_SIZE];
 	
 	switch(type) {
-	case HEADLINE:
-		printf("%-30s", "Nodes");
-		break;
-	case UNDERSCORE:
-		printf("%-30s", "------------------------------");
-		break;
-	case JOB:
-		printf("%-30s", job->nodes);
-		break;
-	case JOBCOMP:
-		printf("%-30s", jobcomp->nodelist);
-		break;
-	case JOBSTEP:
-		printf("%-30s", step->nodes);
-		break;
-	default:
-		printf("%-30s", "n/a");
-		break;
-	} 
-}
-
-void print_nnodes(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char *tmp_char = NULL;
-	int tmp_int = NO_VAL;
-	hostlist_t hl = NULL;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-8s", "Node Cnt");
-		tmp_int = INFINITE;
-		break;
-	case UNDERSCORE:
-		printf("%-8s", "--------");
-		tmp_int = INFINITE;
-		break;
-	case JOB:
-		tmp_char = job->nodes;
-		break;
-	case JOBSTEP:
-		tmp_char = step->nodes;
-		break;
-	case JOBCOMP:
-		tmp_int = jobcomp->node_cnt;
-		break;
-	default:
-		break;
-	}
-	if(tmp_char) {
-		hl = hostlist_create(tmp_char);
-		tmp_int = hostlist_count(hl);
-		hostlist_destroy(hl);
-	}
-
-	if(tmp_int == INFINITE)
-		return;
-	else if(tmp_int == NO_VAL) 
-		printf("%-8s", "n/a");
-	else {
-		char outbuf[FORMAT_STRING_SIZE];
-		convert_num_unit((float)tmp_int, 
-				 outbuf, sizeof(outbuf), UNIT_NONE);
-		printf("%-8s", outbuf);
-	}
-}
-
-void print_ntasks(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-7s", "Ntasks");
-		break;
-	case UNDERSCORE:
-		printf("%-7s", "-------");
-		break;
-	case JOB:
-		printf("%-7u", job->alloc_cpus);
-		break;
-	case JOBSTEP:
-		printf("%-7u", step->ncpus);
-		break;
-	default:
-		printf("%-7s", "n/a");
-		break;
-	} 
-}
-
-void print_partition(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-10s", "Partition");
-		break;
-	case UNDERSCORE:
-		printf("%-10s", "----------");
-		break;
 	case JOB:
-		if(!job->partition)
-			printf("%-10s", "unknown");			     
-		else if(strlen(job->partition)<11)
-			printf("%-10s", job->partition);
-		else
-			printf("%-7.7s...", job->partition);
-		
-		break;
-	case JOBCOMP:
-		if(!jobcomp->partition)
-			printf("%-10s", "unknown");			     
-		else if(strlen(jobcomp->partition)<11)
-			printf("%-10s", jobcomp->partition);
-		else
-			printf("%-7.7s...", jobcomp->partition);
+		step = NULL;
+		if(!job->track_steps) 
+			step = (jobacct_step_rec_t *)job->first_step_ptr;
+		/* set this to avoid printing out info for things that
+		   don't mean anything.  Like an allocation that never
+		   ran anything.
+		*/
+		if(!step) 
+			job->track_steps = 1;		
 		
-		break;
-	case JOBSTEP:
-		printf("%-10s", " ");
 		break;
 	default:
-		printf("%-10s", "n/a");
 		break;
-	} 
-}
-
-void print_blockid(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
+	}
 
-	switch(type) {
-	case HEADLINE:
-		printf("%-16s", "BlockID");
-		break;
-	case UNDERSCORE:
-		printf("%-16s", "----------------");
-		break;
-	case JOB:
-		if(!job->blockid)
-			printf("%-16s", "unknown");			     
-		else if(strlen(job->blockid)<17)
-			printf("%-16s", job->blockid);
-		else
-			printf("%-13.13s...", job->blockid);
-		
-		break;
-	case JOBCOMP:
-		if(!jobcomp->blockid)
-			printf("%-16s", "unknown");			     
-		else if(strlen(jobcomp->blockid)<17)
-			printf("%-16s", jobcomp->blockid);
-		else
-			printf("%-13.13s...", jobcomp->blockid);
+	list_iterator_reset(print_fields_itr);
+	while((field = list_next(print_fields_itr))) {
+		char *tmp_char = NULL;
+		int tmp_int = NO_VAL, tmp_int2 = NO_VAL;
+
+		switch(field->type) {
+		case PRINT_ALLOC_CPUS:
+			switch(type) {
+			case JOB:
+				tmp_int = job->alloc_cpus;
+				// we want to use the step info
+				if(!step) 
+					break;
+			case JOBSTEP:
+				tmp_int = step->ncpus;
+				break;
+			case JOBCOMP:
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_ACCOUNT:
+			switch(type) {
+			case JOB:
+				tmp_char = job->account;
+				break;
+			case JOBSTEP:
+				tmp_char = step->job_ptr->account;
+				break;
+			case JOBCOMP:
+			default:
+				tmp_char = "n/a";
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_ASSOCID:
+			switch(type) {
+			case JOB:
+				tmp_int = job->associd;
+				break;
+			case JOBSTEP:
+				tmp_int = step->job_ptr->associd;
+				break;
+			case JOBCOMP:
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_AVECPU:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps) 
+					tmp_int = job->sacct.ave_cpu;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.ave_cpu;
+				break;
+			case JOBCOMP:
+			default:
+				break;
+			}
+			tmp_char = _elapsed_time((int)tmp_int, 0);
+			
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_AVEPAGES:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = job->sacct.ave_pages;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.ave_pages;
+				break;
+			case JOBCOMP:
+			default:
+				break;
+			}
+			if(tmp_int != NO_VAL)
+				convert_num_unit((float)tmp_int,
+						 outbuf, sizeof(outbuf),
+						 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_AVERSS:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = job->sacct.ave_rss;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.ave_rss;
+				break;
+			case JOBCOMP:
+			default:
+				break;
+			}
+			if(tmp_int != NO_VAL)
+				convert_num_unit((float)tmp_int,
+						 outbuf, sizeof(outbuf),
+						 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_AVEVSIZE:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = job->sacct.ave_vsize;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.ave_vsize;
+				break;
+			case JOBCOMP:
+			default:
+				break;
+			}
+			if(tmp_int != NO_VAL)
+				convert_num_unit((float)tmp_int,
+						 outbuf, sizeof(outbuf),
+						 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_BLOCKID:
+			switch(type) {
+			case JOB:
+				tmp_char = job->blockid;
+				break;
+			case JOBSTEP:
+				break;
+			case JOBCOMP:
+				tmp_char = job_comp->blockid;
+				break;
+			default:
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER:
+			switch(type) {
+			case JOB:
+				tmp_char = job->cluster;
+				break;
+			case JOBSTEP:
+				tmp_char = step->job_ptr->cluster;
+				break;
+			case JOBCOMP:
+			default:
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CPU_TIME:
+			switch(type) {
+			case JOB:
+				tmp_int = job->elapsed * job->alloc_cpus;
+				break;
+			case JOBSTEP:
+				tmp_int = step->elapsed * step->ncpus;
+				break;
+			case JOBCOMP:
+				break;
+			default:
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CPU_TIME_RAW:
+			switch(type) {
+			case JOB:
+				tmp_int = job->elapsed * job->alloc_cpus;
+				break;
+			case JOBSTEP:
+				tmp_int = step->elapsed * step->ncpus;
+				break;
+			case JOBCOMP:
+				break;
+			default:
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_ELAPSED:
+			switch(type) {
+			case JOB:
+				tmp_int = job->elapsed;
+				break;
+			case JOBSTEP:
+				tmp_int = step->elapsed;
+				break;
+			case JOBCOMP:
+				tmp_int = job_comp->end_time 
+					- job_comp->start_time;
+				break;
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_ELIGIBLE:
+			switch(type) {
+			case JOB:
+				tmp_int = job->eligible;
+				break;
+			case JOBSTEP:
+				tmp_int = step->start;
+				break;
+			case JOBCOMP:
+				break;
+			default:
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_END:
+			switch(type) {
+			case JOB:
+				tmp_int = job->end;
+				break;
+			case JOBSTEP:
+				tmp_int = step->end;
+				break;
+			case JOBCOMP:
+				tmp_int = parse_time(job_comp->end_time, 1);
+				break;
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_EXITCODE:
+			tmp_int = 0;
+			tmp_int2 = 0;
+			switch(type) {
+			case JOB:
+				tmp_int = job->exitcode;
+				break;
+			case JOBSTEP:
+				tmp_int = step->exitcode;
+				break;
+			case JOBCOMP:
+			default:
+				break;
+			}
+			if (WIFSIGNALED(tmp_int))
+				tmp_int2 = WTERMSIG(tmp_int);
+			
+			snprintf(outbuf, sizeof(outbuf), "%d:%d",
+				 WEXITSTATUS(tmp_int), tmp_int2);
+
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_GID:
+			switch(type) {
+			case JOB:
+				tmp_int = job->gid;
+				break;
+			case JOBSTEP:
+				tmp_int = NO_VAL;
+				break;
+			case JOBCOMP:
+				tmp_int = job_comp->gid;
+				break;
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_GROUP:
+			switch(type) {
+			case JOB:
+				tmp_int = job->gid;
+				break;
+			case JOBSTEP:
+				tmp_int = NO_VAL;
+				break;
+			case JOBCOMP:
+				tmp_int = job_comp->gid;
+				break;
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			tmp_char = NULL;
+			if ((gr=getgrgid(tmp_int)))
+				tmp_char=gr->gr_name;
+
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_JOBID:
+			switch(type) {
+			case JOB:
+				tmp_char = xstrdup_printf("%u", job->jobid);
+				break;
+			case JOBSTEP:
+				tmp_char = xstrdup_printf("%u.%u",
+							  step->job_ptr->jobid,
+							  step->stepid);
+				break;
+			case JOBCOMP:
+				tmp_char = xstrdup_printf("%u",
+							  job_comp->jobid);
+				break;
+			default:
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_JOBNAME:
+			switch(type) {
+			case JOB:
+				tmp_char = job->jobname;
+				break;
+			case JOBSTEP:
+				tmp_char = step->stepname;
+				break;
+			case JOBCOMP:
+				tmp_char = job_comp->jobname;
+				break;
+			default:
+				tmp_char = NULL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_LAYOUT:
+			switch(type) {
+			case JOB:
+				/* below really should be step.  It is
+				   not a typo */
+				if(!job->track_steps) 
+					tmp_char = slurm_step_layout_type_name(
+						step->task_dist);
+				break;
+			case JOBSTEP:
+				tmp_char = slurm_step_layout_type_name(
+					step->task_dist);
+				break;
+			case JOBCOMP:
+				break;
+			default:
+				tmp_char = NULL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXPAGES:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = job->sacct.max_pages;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.max_pages;
+				break;
+			case JOBCOMP:
+			default:
+				break;
+			}
+			if(tmp_int != NO_VAL)
+				convert_num_unit((float)tmp_int,
+						 outbuf, sizeof(outbuf),
+						 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXPAGESNODE:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_char = find_hostname(
+						job->sacct.max_pages_id.nodeid,
+						job->nodes);
+				break;
+			case JOBSTEP:
+				tmp_char = find_hostname(
+					step->sacct.max_pages_id.nodeid,
+					step->nodes);
+				break;
+			case JOBCOMP:
+			default:
+				tmp_char = NULL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MAXPAGESTASK:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = 
+						job->sacct.max_pages_id.taskid;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.max_pages_id.taskid;
+				break;
+			case JOBCOMP:
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXRSS:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = job->sacct.max_rss;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.max_rss;
+				break;
+			case JOBCOMP:
+			default:
+				break;
+			}
+			if(tmp_int != NO_VAL)
+				convert_num_unit((float)tmp_int,
+						 outbuf, sizeof(outbuf),
+						 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXRSSNODE:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_char = find_hostname(
+						job->sacct.max_rss_id.nodeid,
+						job->nodes);
+				break;
+			case JOBSTEP:
+				tmp_char = find_hostname(
+					step->sacct.max_rss_id.nodeid,
+					step->nodes);
+				break;
+			case JOBCOMP:
+			default:
+				tmp_char = NULL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MAXRSSTASK:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = job->sacct.max_rss_id.taskid;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.max_rss_id.taskid;
+				break;
+			case JOBCOMP:
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXVSIZE:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = job->sacct.max_vsize;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.max_vsize;
+				break;
+			case JOBCOMP:
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			if(tmp_int != NO_VAL)
+				convert_num_unit((float)tmp_int,
+						 outbuf, sizeof(outbuf),
+						 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXVSIZENODE:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_char = find_hostname(
+						job->sacct.max_vsize_id.nodeid,
+						job->nodes);
+				break;
+			case JOBSTEP:
+				tmp_char = find_hostname(
+					step->sacct.max_vsize_id.nodeid,
+					step->nodes);
+				break;
+			case JOBCOMP:
+			default:
+				tmp_char = NULL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MAXVSIZETASK:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int =
+						job->sacct.max_vsize_id.taskid;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.max_vsize_id.taskid;
+				break;
+			case JOBCOMP:
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MINCPU:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = job->sacct.min_cpu;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.min_cpu;
+				break;
+			case JOBCOMP:
+			default:
+				break;
+			}
+			tmp_char = _elapsed_time((int)tmp_int, 0);
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MINCPUNODE:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_char = find_hostname(
+						job->sacct.min_cpu_id.nodeid,
+						job->nodes);
+				break;
+			case JOBSTEP:
+				tmp_char = find_hostname(
+					step->sacct.min_cpu_id.nodeid,
+					step->nodes);
+				break;
+			case JOBCOMP:
+			default:
+				tmp_char = NULL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MINCPUTASK:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps)
+					tmp_int = job->sacct.min_cpu_id.taskid;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sacct.min_cpu_id.taskid;
+				break;
+			case JOBCOMP:
+			default:
+				tmp_int = NO_VAL;
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_NODELIST:
+			switch(type) {
+			case JOB:
+				tmp_char = job->nodes;
+				break;
+			case JOBSTEP:
+				tmp_char = step->nodes;
+				break;
+			case JOBCOMP:
+				tmp_char = job_comp->nodelist;
+				break;
+			default:
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_NNODES:
+			switch(type) {
+			case JOB:
+				tmp_int = job->alloc_nodes;
+				tmp_char = job->nodes;
+				break;
+			case JOBSTEP:
+				tmp_int = step->nnodes;
+				tmp_char = step->nodes;
+				break;
+			case JOBCOMP:
+				tmp_int = job_comp->node_cnt;
+				tmp_char = job_comp->nodelist;
+				break;
+			default:
+				break;
+			}
+			
+			if(!tmp_int) {
+				hostlist_t hl = hostlist_create(tmp_char);
+				tmp_int = hostlist_count(hl);
+				hostlist_destroy(hl);
+			}
+			convert_num_unit((float)tmp_int, 
+					 outbuf, sizeof(outbuf), UNIT_NONE);
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_NTASKS:
+			switch(type) {
+			case JOB:
+				if(!job->track_steps && !step)
+					tmp_int = job->alloc_cpus;
+				// we want to use the step info
+				if(!step) 
+					break;
+			case JOBSTEP:
+				tmp_int = step->ntasks;
+				break;
+			case JOBCOMP:
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_PRIO:
+			switch(type) {
+			case JOB:
+				tmp_int = job->priority;
+				break;
+			case JOBSTEP:
+
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_PARTITION:
+			switch(type) {
+			case JOB:
+				tmp_char = job->partition;
+				break;
+			case JOBSTEP:
+
+				break;
+			case JOBCOMP:
+				tmp_char = job_comp->partition;
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_QOS:
+			switch(type) {
+			case JOB:
+				tmp_int = job->qos;
+				break;
+			case JOBSTEP:
+
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			if(!qos_list) 
+				qos_list = acct_storage_g_get_qos(
+					acct_db_conn, getuid(), NULL);
 		
-		break;
-	case JOBSTEP:
-		printf("%-16s", " ");
-		break;
-	default:
-		printf("%-16s", "n/a");
-		break;
-	} 
-}
-
-void print_pages(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char outbuf[FORMAT_STRING_SIZE];
-	char buf1[FORMAT_STRING_SIZE];
-	char buf2[FORMAT_STRING_SIZE];
-	char buf3[FORMAT_STRING_SIZE];
-	sacct_t sacct;
-	char *nodes = NULL;
-	uint32_t pos;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-34s", "MaxPages/Node:Task - Ave");
-		break;
-	case UNDERSCORE:
-		printf("%-34s", "----------------------------------");
-		break;
-	case JOB:
-		sacct = job->sacct;
-		nodes = job->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_pages, 
-				 buf1, sizeof(buf1), UNIT_NONE);
-
-		if(job->track_steps)
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
-		else {
-			convert_num_unit((float)sacct.ave_pages,
-					 buf2, sizeof(buf2), UNIT_NONE);
-			find_hostname(pos, nodes, buf3);
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-				 buf1,
-				 buf3,
-				 sacct.max_pages_id.taskid, 
-				 buf2);
-		}
-		printf("%-34s", outbuf);
-		break;
-	case JOBSTEP:
-		sacct = step->sacct;
-		nodes = step->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_pages, buf1, sizeof(buf1),
-				 UNIT_NONE);
-		convert_num_unit((float)sacct.ave_pages, buf2, sizeof(buf2),
-				 UNIT_NONE);
-		find_hostname(pos, nodes, buf3);
-		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-			 buf1,
-			 buf3,
-			 sacct.max_pages_id.taskid, 
-			 buf2);
-		printf("%-34s", outbuf);
-		break;
-	default:
-		printf("%-34s", "n/a");
-		break;
-	} 
-}
-
-void print_rss(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char outbuf[FORMAT_STRING_SIZE];
-	char buf1[FORMAT_STRING_SIZE];
-	char buf2[FORMAT_STRING_SIZE];
-	char buf3[FORMAT_STRING_SIZE];
-	sacct_t sacct;
-	char *nodes = NULL;
-	uint32_t pos;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-34s", "MaxRSS/Node:Task - Ave");
-		break;
-	case UNDERSCORE:
-		printf("%-34s", "----------------------------------");
-		break;
-	case JOB:
-		sacct = job->sacct;
-		nodes = job->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1),
-				 UNIT_KILO);
-
-		if(job->track_steps)
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
-		else {
-			convert_num_unit((float)sacct.ave_rss, 
-					 buf2, sizeof(buf2), UNIT_KILO);
-			find_hostname(pos, nodes, buf3);
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-				 buf1,
-				 buf3, 
-				 sacct.max_rss_id.taskid, 
-				 buf2);
+			tmp_char = _find_qos_name_from_list(qos_list,
+							    tmp_int);
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_QOSRAW:
+			switch(type) {
+			case JOB:
+				tmp_int = job->qos;
+				break;
+			case JOBSTEP:
+
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_REQ_CPUS:
+			switch(type) {
+			case JOB:
+				tmp_int = job->req_cpus;
+				break;
+			case JOBSTEP:
+				tmp_int = step->ncpus;
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV:
+			switch(type) {
+			case JOB:
+				if(job->start)
+					tmp_int = job->start - job->eligible;
+				else
+					tmp_int = time(NULL) - job->eligible;
+				break;
+			case JOBSTEP:
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_CPU:
+			switch(type) {
+			case JOB:
+				if(job->start)
+					tmp_int = (job->start - job->eligible)
+						* job->req_cpus;
+				else
+					tmp_int = (time(NULL) - job->eligible)
+						* job->req_cpus;
+				break;
+			case JOBSTEP:
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_CPU_RAW:
+			switch(type) {
+			case JOB:
+				if(job->start)
+					tmp_int = (job->start - job->eligible)
+						* job->req_cpus;
+				else
+					tmp_int = (time(NULL) - job->eligible)
+						* job->req_cpus;
+				break;
+			case JOBSTEP:
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_START:
+			switch(type) {
+			case JOB:
+				tmp_int = job->start;
+				break;
+			case JOBSTEP:
+				tmp_int = step->start;
+				break;
+			case JOBCOMP:
+				tmp_int = parse_time(job_comp->start_time, 1);
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_STATE:
+			switch(type) {
+			case JOB:
+				tmp_int = job->state;
+				tmp_int2 = job->requid;
+				break;
+			case JOBSTEP:
+				tmp_int = step->state;
+				tmp_int2 = step->requid;
+				break;
+			case JOBCOMP:
+				tmp_char = job_comp->state;
+				break;
+			default:
+
+				break;
+			}
+			
+			if ((tmp_int == JOB_CANCELLED) && (tmp_int2 != NO_VAL)) 
+				snprintf(outbuf, FORMAT_STRING_SIZE,
+					 "%s by %d",
+					 job_state_string(tmp_int),
+					 tmp_int2);
+			else if(tmp_int != NO_VAL)
+				snprintf(outbuf, FORMAT_STRING_SIZE,
+					 "%s",
+					 job_state_string(tmp_int));
+			else if(tmp_char)
+				snprintf(outbuf, FORMAT_STRING_SIZE,
+					 "%s",
+					 tmp_char);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_SUBMIT:
+			switch(type) {
+			case JOB:
+				tmp_int = job->submit;
+				break;
+			case JOBSTEP:
+				tmp_int = step->start;
+				break;
+			case JOBCOMP:
+				tmp_int = parse_time(job_comp->start_time, 1);
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_SUSPENDED:
+			switch(type) {
+			case JOB:
+				tmp_int = job->suspended;
+				break;
+			case JOBSTEP:
+				tmp_int = step->suspended;
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_SYSTEMCPU:
+			switch(type) {
+			case JOB:
+				tmp_int = job->sys_cpu_sec;
+				tmp_int2 = job->sys_cpu_usec;
+				break;
+			case JOBSTEP:
+				tmp_int = step->sys_cpu_sec;
+				tmp_int2 = step->sys_cpu_usec;
+
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			tmp_char = _elapsed_time(tmp_int, tmp_int2);
+
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_TIMELIMIT:
+			switch(type) {
+			case JOB:
+				
+				break;
+			case JOBSTEP:
+
+				break;
+			case JOBCOMP:
+				tmp_char = job_comp->timelimit;
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_TOTALCPU:
+			switch(type) {
+			case JOB:
+				tmp_int = job->tot_cpu_sec;
+				tmp_int2 = job->tot_cpu_usec;
+				break;
+			case JOBSTEP:
+				tmp_int = step->tot_cpu_sec;
+				tmp_int2 = step->tot_cpu_usec;
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			tmp_char = _elapsed_time(tmp_int, tmp_int2);
+
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_UID:
+			switch(type) {
+			case JOB:
+				if(job->user) {
+					if ((pw=getpwnam(job->user)))
+						tmp_int = pw->pw_uid;
+				} else 
+					tmp_int = job->uid;
+				break;
+			case JOBSTEP:
+				break;
+			case JOBCOMP:
+				tmp_int = job_comp->uid;
+				break;
+			default:
+
+				break;
+			}
+			
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_USER:
+			switch(type) {
+			case JOB:
+				if(job->user)
+					tmp_char = job->user;
+				else if(job->uid != -1) {
+					if ((pw=getpwuid(job->uid)))
+						tmp_char = pw->pw_name;	
+				}				
+				break;
+			case JOBSTEP:
+
+				break;
+			case JOBCOMP:
+				tmp_char = job_comp->uid_name;
+				break;
+			default:
+
+				break;
+			}
+
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_USERCPU:
+			switch(type) {
+			case JOB:
+				tmp_int = job->user_cpu_sec;
+				tmp_int2 = job->tot_cpu_usec;
+				break;
+			case JOBSTEP:
+				tmp_int = step->user_cpu_sec;
+				tmp_int2 = step->user_cpu_usec;
+
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			tmp_char = _elapsed_time(tmp_int, tmp_int2);
+
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_WCKEY:
+			switch(type) {
+			case JOB:
+				tmp_char = job->wckey;
+				break;
+			case JOBSTEP:
+
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_WCKEYID:
+			switch(type) {
+			case JOB:
+				tmp_int = job->wckeyid;
+				break;
+			case JOBSTEP:
+
+				break;
+			case JOBCOMP:
+
+				break;
+			default:
+
+				break;
+			}
+			field->print_routine(field,
+					     tmp_int,
+					     (curr_inx == field_count));
+			break;
+		default:
+			break;
 		}
-		printf("%-34s", outbuf);
-		break;
-	case JOBSTEP:
-		sacct = step->sacct;
-		nodes = step->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1),
-				 UNIT_KILO);
-		convert_num_unit((float)sacct.ave_rss, buf2, sizeof(buf2),
-				 UNIT_KILO);
-		find_hostname(pos, nodes, buf3);
-		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-			 buf1,
-			 buf3, 
-			 sacct.max_rss_id.taskid, 
-			 buf2);
-		printf("%-34s", outbuf);
-		break;
-	default:
-		printf("%-34s", "n/a");
-		break;
-	} 
+		curr_inx++;
+	}
+	printf("\n");
 }
-
-void print_state(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-20s", "State");
-		break;
-	case UNDERSCORE:
-		printf("%-20s", "--------------------");
-		break;
-	case JOB:
-		if ( job->state == JOB_CANCELLED) {
-			printf ("%-10s by %6d",
-				job_state_string(job->state), job->requid);
-		}
-		else {
-			printf("%-20s", job_state_string(job->state));
-		}
-		break;
-	case JOBCOMP:
-		printf("%-20s", jobcomp->state);
-		break;
-	case JOBSTEP:
-		if ( step->state == JOB_CANCELLED) {
-			printf ("%-10s by %6d",
-				job_state_string(step->state), step->requid);
-		}
-		else {
-			printf("%-20s", job_state_string(step->state));
-		}
-		break;
-	default:
-		printf("%-20s", "n/a");
-		break;
-	} 
-}
-
-void print_submit(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char time_str[32];
-		
-	switch(type) {
-	case HEADLINE:
-		printf("%-14s", "Submit Time");
-		break;
-	case UNDERSCORE:
-		printf("%-14.14s", "--------------");
-		break;
-	case JOB:
-		slurm_make_time_str(&job->submit, 
-				    time_str, 
-				    sizeof(time_str));
-		printf("%-14s", time_str);
-		break;
-	case JOBSTEP:
-		slurm_make_time_str(&step->start, 
-				    time_str, 
-				    sizeof(time_str));
-		printf("%-14s", time_str);
-		break;
-	default:
-		printf("%-14s", "n/a");
-		break;
-	} 
-}
-
-void print_eligible(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char time_str[32];
-		
-	switch(type) {
-	case HEADLINE:
-		printf("%-14s", "Eligible Time");
-		break;
-	case UNDERSCORE:
-		printf("%-14.14s", "--------------");
-		break;
-	case JOB:
-		slurm_make_time_str(&job->eligible, 
-				    time_str, 
-				    sizeof(time_str));
-		printf("%-14s", time_str);
-		break;
-	case JOBSTEP:
-		slurm_make_time_str(&step->start, 
-				    time_str, 
-				    sizeof(time_str));
-		printf("%-14s", time_str);
-		break;
-	default:
-		printf("%-14s", "n/a");
-		break;
-	} 
-}
-
-void print_start(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char time_str[19];
-	
-	switch(type) {
-	case HEADLINE:
-		printf("%-14s", "Start Time");
-		break;
-	case UNDERSCORE:
-		printf("%-14.14s", "-------------------");
-		break;
-	case JOB:
-		slurm_make_time_str(&job->start, 
-				    time_str, 
-				    sizeof(time_str));
-		printf("%-14s", time_str);
-		break;
-	case JOBCOMP:
-		printf("%-14s", jobcomp->start_time);
-		break;
-	case JOBSTEP:
-		slurm_make_time_str(&step->start, 
-				    time_str, 
-				    sizeof(time_str));
-		printf("%-14s", time_str);
-		break;
-	default:
-		printf("%-14s", "n/a");
-		break;
-	} 
-}
-
-void print_timelimit(type_t type, void *object)
-{ 
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	
-	switch(type) {
-	case HEADLINE:
-		printf("%-10s", "Time Limit");
-		break;
-	case UNDERSCORE:
-		printf("%-10s", "----------");
-		break;
-	case JOBCOMP:
-		printf("%-10s", jobcomp->timelimit);
-		break;
-	default:
-		printf("%-10s", "n/a");
-		break;
-	} 
-}
-
-void print_end(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char time_str[32];
-	
-	switch(type) {
-	case HEADLINE:
-		printf("%-14s", "End Time");
-		break;
-	case UNDERSCORE:
-		printf("%-14.14s", "--------------------");
-		break;
-	case JOB:
-		slurm_make_time_str(&job->end, 
-				    time_str, 
-				    sizeof(time_str));
-		printf("%-14s", time_str);
-		break;
-	case JOBCOMP:
-		printf("%-14s", jobcomp->end_time);
-		break;
-	case JOBSTEP:
-		slurm_make_time_str(&step->end, 
-				    time_str, 
-				    sizeof(time_str));
-		printf("%-14s", time_str);
-		break;
-	default:
-		printf("%-14s", "n/a");
-		break;
-	} 
-}
-
-void print_systemcpu(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char str[FORMAT_STRING_SIZE];
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-15s", "SystemCpu");
-		break;
-	case UNDERSCORE:
-		printf("%-15s", "---------------");
-		break;
-	case JOB:
-		_elapsed_time(job->sys_cpu_sec,
-			      job->sys_cpu_usec, str);
-		printf("%-15s", str);
-		break;
-	case JOBSTEP:
-		_elapsed_time(step->sys_cpu_sec,
-			      step->sys_cpu_usec, str);
-		printf("%-15s", str);
-		break;
-	default:
-		printf("%-15s", "n/a");
-		break;
-	} 
-}
-
-void print_uid(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	int32_t uid = -1;
-	struct passwd *pw = NULL;
-	
-	switch(type) {
-	case HEADLINE:
-		printf("%-5s", "Uid");
-		break;
-	case UNDERSCORE:
-		printf("%-5s", "-----");
-		break;
-	case JOB:
-		if(job->user) {
-			if ((pw=getpwnam(job->user)))
-				uid = pw->pw_uid;
-		} else 
-			uid = job->uid;
-		break;
-	case JOBCOMP:
-		printf("%-5u", jobcomp->uid);
-		break;
-	case JOBSTEP:
-		printf("%-5s", " ");
-		break;
-	} 
-
-	if(uid != -1) 
-		printf("%-5d", uid);
-}
-
-void print_user(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	int uid = -1;
-	char	*tmp="(unknown)";
-	struct	passwd *pw = NULL;		 
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-9s", "User");
-		break;
-	case UNDERSCORE:
-		printf("%-9s", "---------");
-		break;
-	case JOB:
-		if(job->user) 
-			printf("%-9s", job->user);
-		else
-			uid = job->uid;
-		break;
-	case JOBCOMP:
-		printf("%-9s", jobcomp->uid_name);
-		break;
-	case JOBSTEP:
-		printf("%-9s", " ");
-		break;
-	default:
-		printf("%-9s", "n/a");
-		break;
-	} 
-	if(uid != -1) {
-		if ((pw=getpwuid(uid)))
-			tmp=pw->pw_name;
-		printf("%-9s", tmp);
-	}
-}
-
-void print_usercpu(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char str[FORMAT_STRING_SIZE];
-	
-	switch(type) {
-	case HEADLINE:
-		printf("%-15s", "UserCpu");
-		break;
-	case UNDERSCORE:
-		printf("%-15s", "---------------");
-		break;
-	case JOB:
-		_elapsed_time(job->user_cpu_sec,
-			      job->user_cpu_usec, str);
-		printf("%-15s", str);
-		break;
-	case JOBSTEP:
-		_elapsed_time(step->user_cpu_sec,
-			      step->user_cpu_usec, str);
-		printf("%-15s", str);
-		break;
-	default:
-		printf("%-15s", "n/a");
-		break;
-	} 
-
-}
-
-void print_vsize(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char outbuf[FORMAT_STRING_SIZE];
-	char buf1[FORMAT_STRING_SIZE];
-	char buf2[FORMAT_STRING_SIZE];
-	char buf3[FORMAT_STRING_SIZE];
-	sacct_t sacct;
-	char *nodes = NULL;
-	uint32_t pos;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-34s", "MaxVSIZE/Node:Task - Ave");
-		break;
-	case UNDERSCORE:
-		printf("%-34s", "----------------------------------");
-		break;
-	case JOB:
-		sacct = job->sacct;
-		nodes = job->nodes;
-		pos = sacct.min_cpu_id.nodeid;
-		convert_num_unit((float)sacct.max_vsize, 
-				 buf1, sizeof(buf1), UNIT_KILO);
-		if(job->track_steps)
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
-		else {
-			convert_num_unit((float)sacct.ave_vsize,
-					 buf2, sizeof(buf2), UNIT_KILO);
-			find_hostname(pos, nodes, buf3);
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-				 buf1,
-				 buf3, 
-				 sacct.max_vsize_id.taskid, 
-				 buf2);
-		}
-		printf("%-34s", outbuf);
-		break;
-	case JOBSTEP:
-		sacct = step->sacct;
-		nodes = step->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_vsize, buf1, sizeof(buf1), 
-				 UNIT_KILO);
-		convert_num_unit((float)sacct.ave_vsize, buf2, sizeof(buf2),
-				 UNIT_KILO);
-		find_hostname(pos, nodes, buf3);
-		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-			 buf1,
-			 buf3, 
-			 sacct.max_vsize_id.taskid, 
-			 buf2);
-		printf("%-34s", outbuf);
-		break;
-	default:
-		printf("%-34s", "n/a");
-		break;
-	} 
-}
-
-void print_cputime(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char outbuf[FORMAT_STRING_SIZE];
-	char buf1[FORMAT_STRING_SIZE];
-	char buf2[FORMAT_STRING_SIZE];
-	char buf3[FORMAT_STRING_SIZE];
-	sacct_t sacct;
-	char *nodes = NULL;
-	uint32_t pos;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-37s", "MinCPUtime/Node:Task - Ave");
-		break;
-	case UNDERSCORE:
-		printf("%-37s", "-------------------------------------");
-		break;
-	case JOB:
-		sacct = job->sacct;
-		nodes = job->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		_elapsed_time((int)sacct.min_cpu, 0, buf1);
-		if(job->track_steps)
-			snprintf(outbuf, FORMAT_STRING_SIZE, 
-				 "%s/- - -", buf1);
-		else {
-			_elapsed_time((int)sacct.ave_cpu, 0, buf2);
-			find_hostname(pos, nodes, buf3);
-			snprintf(outbuf, FORMAT_STRING_SIZE, 
-				 "%s/%s:%u - %s", 
-				 buf1,
-				 buf3, 
-				 sacct.min_cpu_id.taskid, 
-				 buf2);
-		}
-		printf("%-37s", outbuf);
-		break;
-	case JOBSTEP:
-		sacct = step->sacct;
-		nodes = step->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		_elapsed_time((int)sacct.min_cpu, 0, buf1);
-		_elapsed_time((int)sacct.ave_cpu, 0, buf2);
-		find_hostname(pos, nodes, buf3);
-		snprintf(outbuf, FORMAT_STRING_SIZE, 
-			 "%s/%s:%u - %s", 
-			 buf1,
-			 buf3, 
-			 sacct.min_cpu_id.taskid, 
-			 buf2);
-		printf("%-37s", outbuf);
-		break;
-	default:
-		printf("%-37s", "n/a");
-		break;
-	} 
-}
-
-void print_account(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-16s", "Account");
-		break;
-	case UNDERSCORE:
-		printf("%-16s", "----------------");
-		break;
-	case JOB:
-		if(!job->account)
-			printf("%-16s", "unknown");
-		else if(strlen(job->account)<17)
-			printf("%-16s", job->account);
-		else
-			printf("%-13.13s...", job->account);
-		break;
-	case JOBSTEP:
-		if(!step->account)
-			printf("%-16s", "unknown");
-		else if(strlen(step->account)<17)
-			printf("%-16s", step->account);
-		else
-			printf("%-13.13s...", step->account);
-		break;
-	default:
-		printf("%-16s", "n/a");
-		break;
-		break;
-	}
-}
-
-void print_assoc(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-16s", "AssociationID");
-		break;
-	case UNDERSCORE:
-		printf("%-16s", "----------------");
-		break;
-	case JOB:
-		if(!job->associd)
-			printf("%-16s", "unknown");
-		else 
-			printf("%-16u", job->associd);
-		break;
-	case JOBSTEP:
-		if(!step->associd)
-			printf("%-16s", "unknown");
-		else 
-			printf("%-16u", step->associd);
-		break;
-	default:
-		printf("%-16s", "n/a");
-		break;
-		break;
-	}
-}
-
-void print_cluster(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-16s", "Cluster");
-		break;
-	case UNDERSCORE:
-		printf("%-16s", "----------------");
-		break;
-	case JOB:
-		if(!job->cluster)
-			printf("%-16s", "unknown");
-		else if(strlen(job->cluster)<17)
-			printf("%-16s", job->cluster);
-		else
-			printf("%-13.13s...", job->cluster);
-		break;
-	case JOBSTEP:
-		if(!step->cluster)
-			printf("%-16s", "unknown");
-		else if(strlen(step->cluster)<17)
-			printf("%-16s", step->cluster);
-		else
-			printf("%-13.13s...", step->cluster);
-		break;
-	default:
-		printf("%-16s", "n/a");
-		break;
-		break;
-	}
-}
-
-void print_connection(type_t type, void *object)
-{
-	jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-10s", "Connection");
-		break;
-	case UNDERSCORE:
-		printf("%-10s", "----------");
-		break;
-	case JOBCOMP:
-		printf("%-10s", job->connection);
-		break;
-	default:
-		printf("%-10s", "n/a");
-		break;
-	}
-}
-void print_geo(type_t type, void *object)
-{
-	jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-8s", "Geometry");
-		break;
-	case UNDERSCORE:
-		printf("%-8s", "--------");
-		break;
-	case JOBCOMP:
-		printf("%-8s", job->geo);
-		break;
-	default:
-		printf("%-8s", "n/a");
-		break;
-	}
-}
-void print_max_procs(type_t type, void *object)
-{
-	jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-9s", "Max Procs");
-		break;
-	case UNDERSCORE:
-		printf("%-9s", "---------");
-		break;
-	case JOBCOMP:
-		printf("%-9d", job->max_procs);
-		break;
-	default:
-		printf("%-9s", "n/a");
-		break;
-	}
-}
-void print_reboot(type_t type, void *object)
-{
-	jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-6s", "Reboot");
-		break;
-	case UNDERSCORE:
-		printf("%-6s", "------");
-		break;
-	case JOBCOMP:
-		printf("%-6s", job->reboot);
-		break;
-	default:
-		printf("%-6s", "n/a");
-		break;
-	}
-}
-void print_rotate(type_t type, void *object)
-{
-	jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-6s", "Rotate");
-		break;
-	case UNDERSCORE:
-		printf("%-6s", "------");
-		break;
-	case JOBCOMP:
-		printf("%-6s", job->rotate);
-		break;
-	default:
-		printf("%-6s", "n/a");
-		break;
-	}
-}
-
-void print_bg_start_point(type_t type, void *object)
-{
-	jobcomp_job_rec_t *job = (jobcomp_job_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-14s", "BG Start Point");
-		break;
-	case UNDERSCORE:
-		printf("%-14s", "--------------");
-		break;
-	case JOBCOMP:
-		printf("%-14s", job->bg_start_point);
-		break;
-	default:
-		printf("%-14s", "n/a");
-		break;
-	}
-}
-
-void print_wckey(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-16s", "WCKey");
-		break;
-	case UNDERSCORE:
-		printf("%-16s", "----------------");
-		break;
-	case JOB:
-		if(!job->wckey)
-			printf("%-16s", "");
-		else if(strlen(job->wckey)<17)
-			printf("%-16s", job->wckey);
-		else
-			printf("%-13.13s...", job->wckey);
-		break;
-	case JOBSTEP:
-		printf("%-16s", "");
-		break;
-	default:
-		printf("%-16s", "n/a");
-		break;
-		break;
-	}
-}
-
diff --git a/src/sacct/process.c b/src/sacct/process.c
index 52808e35ecb4264e54eaf4808385dc28f8f91a91..701a62d61b366bad5c5f470687c631f0e96df4ac 100644
--- a/src/sacct/process.c
+++ b/src/sacct/process.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -40,25 +41,22 @@
 #include "sacct.h"
 
 
-void find_hostname(uint32_t pos, char *hosts, char *host)
+char *find_hostname(uint32_t pos, char *hosts)
 {
 	hostlist_t hostlist = NULL;
-	char *temp = NULL;
+	char *temp = NULL, *host = NULL;
 
-	if(pos == (uint32_t)NO_VAL) {
-		snprintf(host, 50, "'N/A'");
-		return;
-	}
+	if(!hosts || (pos == (uint32_t)NO_VAL))
+		return NULL;
+	
 	hostlist = hostlist_create(hosts);
 	temp = hostlist_nth(hostlist, pos);
 	if(temp) {
-		snprintf(host, 50, "%s", temp);
+		host = xstrdup(temp);
 		free(temp);
-	} else {
-		snprintf(host, 50, "'N/A'");
-	}
+	} 
 	hostlist_destroy(hostlist);
-	return;
+	return host;
 }
 
 void aggregate_sacct(sacct_t *dest, sacct_t *from)
diff --git a/src/sacct/sacct.c b/src/sacct/sacct.c
index 5f8a560620e46f0dec5c6197e7348db55ac366ec..6445cbd7cad169e5c39f96a0c40efa22418001b0 100644
--- a/src/sacct/sacct.c
+++ b/src/sacct/sacct.c
@@ -2,10 +2,12 @@
  *  sacct.c - job accounting reports for SLURM's jobacct/log plugin
  *****************************************************************************
  *
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -33,182 +35,81 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-/*
- * HISTORY
- * $Log$
- * Revision 1.8  2006/06/09   ciclouston
- * Added new account entry to begining of fields[].
- *
- * Revision 1.7  2005/06/29 20:41:23  da
- * New Tag HP's patch applied for mutex issue in jobacct.
- *
- * Revision 1.6  2005/06/24 01:19:52  jette
- * Additional documenation for job accounting. Some bug fixes too. All from
- * Andy Riebs/HP.
- *
- * Revision 1.5  2005/06/11 00:49:43  jette
- * Get all the latest accounting software patches.
- *
- * Revision 1.1  2005/06/01 17:26:11  jette
- * Extensive mods checked it for HP work, see NEWS for details.
- *
- * Revision 1.4  2005/05/31 20:28:20  riebs
- * Include "errors" in the default sacct display.
- *
- * Revision 1.3  2005/05/27 17:37:43  riebs
- * Don't discard JOB_START and JOB_END records when selecting on job
- * steps. ("sacct -J 246.1" would report "Error: No JOB_START record for
- * job 246"). This was not a problem when --dump was specified.
- *
- * Revision 1.2  2005/05/19 20:42:11  riebs
- * 1. Fix problem of double-flush of .expired records when scontrol is
- *    unavailable
- * 2. Handle "--expire=1d" as "expire everything through yesterday,"
- *    rather than "expire everything up to  exactly 24 hours ago."
- *
- * Revision 1.1  2005/05/13 20:11:14  riebs
- * Add the jobacct plugins and the sacct utility, and upgrade to
- * slurm-0.4.22-1.
- *
- * Revision 1.9  2005/05/03 12:38:35  riebs
- * Implement "sacct --expire" to facilitate logfile rotation.
- *
- * Revision 1.8  2005/04/15 23:01:39  riebs
- * Check in the changes for dynamic SLURM job accounting (that is, the
- * code to capture runtime data for psize and vsize).
- *
- * Revision 1.7  2005/04/11 21:05:44  riebs
- * Check in a work-around for a getopt_long() bug.
- *
- * Revision 1.6  2005/04/07 18:43:46  riebs
- * Fix a hand full of off-by-one problems, and add --version
- *
- * Revision 1.2  2005/04/07 18:41:42  riebs
- * updat the rev
- *
- * Revision 1.1  2005/04/07 18:33:08  riebs
- * Initial revision
- *
- * Revision 1.5  2005/04/06 19:37:40  riebs
- * Clean up sacct output.
- *
- * Revision 1.4  2005/04/05 15:28:01  riebs
- * - Implement --all
- * - Clean up output formatting for elapsed time
- * - Expand output field for jobname
- *
- * Revision 1.3  2005/04/02 19:46:44  riebs
- * Remove the setuid-related code, initialize job[].cstatus properly, fix
- * formatting of the JOB_STEP record, and fix printing of elapsed time.
- *
- * Revision 1.2  2005/04/01 17:10:43  riebs
- * Replace the Perl version of sacct with sacct.c
- *
- * Revision 1.1  2005/03/31 21:57:45  riebs
- * Initial revision
- *
- * Revision 1.1  2005/03/31 21:19:28  riebs
- * Add the .c version of sacct to CVS in anticipation of retiring the
- * .pl version.
- *
- * Revision 1.8  2005/03/31 19:25:19  riebs
- * Solid version of sacct with all functionality!
- *
- * Revision 1.7  2005/03/31 13:24:41  riebs
- * Good version of formatted_dump implemented.
- *
- * Revision 1.6  2005/03/31 00:33:45  riebs
- * Pretty good implementation of fdump now.
- *
- * Revision 1.5  2005/03/30 23:57:31  riebs
- * Version that handles all print fields.
- *
- * Revision 1.4  2005/03/30 20:51:13  riebs
- * A precautionary version before I radically change
- * the fields struct.
- *
- * Revision 1.3  2005/03/30 18:26:24  riebs
- * Pretty solid version of --dump
- *
- * Revision 1.2  2005/03/29 14:43:20  riebs
- * All data are aggregated; just need to print it now!
- *
- * Revision 1.1  2005/03/28 18:21:26  andy
- * Initial revision
- *
- * Revision 1.1  2005/03/28 16:18:38  riebs
- * Initial revision
- *
- *
- * $EndLog$
- */
-
 #include "sacct.h"
 
 void invalidSwitchCombo(char *good, char *bad);
-void _print_header(void);
 
 /*
  * Globals
  */
 sacct_parameters_t params;
-fields_t fields[] = {{"account", print_account},
-		     {"associd", print_assoc},
-		     {"cluster", print_cluster},
-		     {"cpu", print_cpu},
-		     {"cputime", print_cputime}, 
-		     {"elapsed", print_elapsed},
-		     {"eligible", print_eligible},
-		     {"end", print_end}, 
-		     {"exitcode", print_exitcode},
-		     {"finished", print_end},		/* Defunct name */ 
-		     {"gid", print_gid}, 
-		     {"group", print_group}, 
-		     {"job", print_job},
-		     {"jobid", print_jobid}, 
-		     {"jobname", print_name}, 
-		     {"ncpus", print_ncpus}, 
-		     {"nodes", print_nodes}, 
-		     {"nnodes", print_nnodes}, 
-		     {"nprocs", print_ntasks},
-		     {"ntasks", print_ntasks}, 
-		     {"pages", print_pages}, 
-		     {"partition", print_partition}, 
-		     {"rss", print_rss},
-		     {"start", print_start}, 
-		     {"state", print_state}, 
-		     {"status", print_state}, 
-		     {"submit", print_submit}, 
-		     {"timelimit", print_timelimit}, 
-		     {"submitted", print_submit},	/* Defunct name */
-		     {"systemcpu", print_systemcpu}, 
-		     {"uid", print_uid}, 
-		     {"user", print_user}, 
-		     {"usercpu", print_usercpu}, 
-		     {"vsize", print_vsize}, 
-		     {"blockid", print_blockid}, 
-		     {"connection", print_connection}, 
-		     {"geo", print_geo}, 
-		     {"max_procs", print_max_procs}, 
-		     {"reboot", print_reboot}, 
-		     {"rotate", print_rotate}, 
-		     {"bg_start_point", print_bg_start_point}, 		     
-		     {"wckey", print_wckey}, 		     
-		     {NULL, NULL}};
+print_field_t fields[] = {
+	{10, "AllocCPUS", print_fields_int, PRINT_ALLOC_CPUS},
+	{10, "Account", print_fields_str, PRINT_ACCOUNT},
+	{7, "AssocID", print_fields_int, PRINT_ASSOCID},
+	{10, "AveCPU", print_fields_str, PRINT_AVECPU}, 
+	{10, "AvePages", print_fields_str, PRINT_AVEPAGES}, 
+	{10, "AveRSS", print_fields_str, PRINT_AVERSS}, 
+	{10, "AveVMSize", print_fields_str, PRINT_AVEVSIZE}, 
+	{16, "BlockID", print_fields_str, PRINT_BLOCKID}, 
+	{10, "Cluster", print_fields_str, PRINT_CLUSTER},
+	{10, "CPUTime", print_fields_time_from_secs, PRINT_CPU_TIME},
+	{10, "CPUTimeRAW", print_fields_int, PRINT_CPU_TIME_RAW},
+	{10, "Elapsed", print_fields_time_from_secs, PRINT_ELAPSED},
+	{19, "Eligible", print_fields_date, PRINT_ELIGIBLE},
+	{19, "End", print_fields_date, PRINT_END},
+	{8, "ExitCode", print_fields_str, PRINT_EXITCODE},
+	{6, "GID", print_fields_int, PRINT_GID}, 
+	{9, "Group", print_fields_str, PRINT_GROUP}, 
+	{10, "JobID", print_fields_str, PRINT_JOBID}, 
+	{10, "JobName", print_fields_str, PRINT_JOBNAME},
+	{9,  "Layout", print_fields_str, PRINT_LAYOUT},
+	{8, "MaxPages", print_fields_str, PRINT_MAXPAGES}, 
+	{12, "MaxPagesNode", print_fields_str, PRINT_MAXPAGESNODE}, 
+	{14, "MaxPagesTask", print_fields_int, PRINT_MAXPAGESTASK}, 
+	{10, "MaxRSS", print_fields_str, PRINT_MAXRSS},
+	{10, "MaxRSSNode", print_fields_str, PRINT_MAXRSSNODE},
+	{10, "MaxRSSTask", print_fields_int, PRINT_MAXRSSTASK},
+	{10, "MaxVMSize", print_fields_str, PRINT_MAXVSIZE}, 
+	{14, "MaxVMSizeNode", print_fields_str, PRINT_MAXVSIZENODE}, 
+	{14, "MaxVMSizeTask", print_fields_int, PRINT_MAXVSIZETASK}, 
+	{10, "MinCPU", print_fields_str, PRINT_MINCPU}, 
+	{10, "MinCPUNode", print_fields_str, PRINT_MINCPUNODE}, 
+	{10, "MinCPUTask", print_fields_int, PRINT_MINCPUTASK}, 
+	{10, "NCPUS", print_fields_int, PRINT_ALLOC_CPUS},
+	{15, "NodeList", print_fields_str, PRINT_NODELIST}, 
+	{8, "NNodes", print_fields_str, PRINT_NNODES}, 
+	{8, "NTasks", print_fields_int, PRINT_NTASKS},
+	{10, "Priority", print_fields_int, PRINT_PRIO}, 
+	{10, "Partition", print_fields_str, PRINT_PARTITION}, 
+	{10, "QOS", print_fields_str, PRINT_QOS},
+	{6, "QOSRAW", print_fields_int, PRINT_QOSRAW},
+	{8, "ReqCPUS", print_fields_int, PRINT_REQ_CPUS},
+	{10, "Reserved", print_fields_time_from_secs, PRINT_RESV},
+	{10, "ResvCPU", print_fields_time_from_secs, PRINT_RESV_CPU},
+	{10, "ResvCPURAW", print_fields_int, PRINT_RESV_CPU},
+	{19, "Start", print_fields_date, PRINT_START}, 
+	{10, "State", print_fields_str, PRINT_STATE}, 
+	{19, "Submit", print_fields_date, PRINT_SUBMIT}, 
+	{10, "Suspended", print_fields_time_from_secs, PRINT_SUSPENDED}, 
+	{10, "SystemCPU", print_fields_str, PRINT_SYSTEMCPU}, 
+	{10, "Timelimit", print_fields_time_from_secs, PRINT_TIMELIMIT},
+	{10, "TotalCPU", print_fields_str, PRINT_TOTALCPU}, 
+	{6, "UID", print_fields_int, PRINT_UID}, 
+	{9, "User", print_fields_str, PRINT_USER}, 
+	{10, "UserCPU", print_fields_str, PRINT_USERCPU}, 
+	{10, "WCKey", print_fields_str, PRINT_WCKEY}, 		     
+	{10, "WCKeyID", print_fields_int, PRINT_WCKEYID}, 		     
+	{0, NULL, NULL, 0}};
 
 List jobs = NULL;
 
-int printfields[MAX_PRINTFIELDS],	/* Indexed into fields[] */
-	nprintfields = 0;
-
 int main(int argc, char **argv)
 {
 	enum {
 		SACCT_DUMP,
-		SACCT_EXPIRE,
 		SACCT_FDUMP,
 		SACCT_LIST,
-		SACCT_STAT,
 		SACCT_HELP,
 		SACCT_USAGE
 	} op;
@@ -225,54 +126,8 @@ int main(int argc, char **argv)
 		op = SACCT_HELP;
 	else if (params.opt_dump) {
 		op = SACCT_DUMP;
-		if (params.opt_long || params.opt_total 
-		    || params.opt_field_list || params.opt_expire) {
-			if (params.opt_verbose)
-				fprintf(stderr,
-					"Switch conflict,\n"
-					"\topt_long=%d\n"
-					"\topt_total=%d\n"
-					"\topt_field_list=%s\n",
-					params.opt_long, 
-					params.opt_total, 
-					params.opt_field_list);
-			invalidSwitchCombo("--dump",
-					   "--brief, --long, "
-					   "--fields, --total");
-			rc = 1;
-			goto finished;
-		}
 	} else if (params.opt_fdump) {
 		op = SACCT_FDUMP;
-	} else if (params.opt_stat) {
-		op = SACCT_STAT;
-	} else if (params.opt_expire) {
-		op = SACCT_EXPIRE;
-		if (params.opt_long || params.opt_total 
-		    || params.opt_field_list || 
-		    //(params.opt_gid != 0) || (params.opt_uid != 0) ||
-		    params.arch_cond->job_cond->step_list
-		    || params.arch_cond->job_cond->state_list ) {
-			if (params.opt_verbose)
-				fprintf(stderr,
-					"Switch conflict,\n"
-					"\topt_long=%d\n"
-					"\topt_total=%d\n"
-					"\topt_field_list=%s\n"
-					"\topt_gid=%d\n"
-					"\topt_uid=%d\n",
-					params.opt_long, 
-					params.opt_total, 
-					params.opt_field_list,
-					params.opt_gid, 
-					params.opt_uid);
-			invalidSwitchCombo("--expire",
-					   "--brief, --long, --fields, "
-					   "--total, --gid, --uid, --jobs, "
-					   "--state");
-			rc = 1;
-			goto finished;
-		}
 	} else
 		op = SACCT_LIST;
 
@@ -286,16 +141,12 @@ int main(int argc, char **argv)
 		else 
 			do_dump();
 		break;
-	case SACCT_EXPIRE:
-		do_expire();
-		break;
 	case SACCT_FDUMP:
 		if(get_data() == SLURM_ERROR)
 			exit(errno);
 		break;
 	case SACCT_LIST:
-		if (!params.opt_noheader)/* give them something to look */
-			_print_header();/* at while we think...        */
+		print_fields_header(print_fields_list);
 		if(get_data() == SLURM_ERROR)
 			exit(errno);
 		if(params.opt_completion) 
@@ -303,16 +154,6 @@ int main(int argc, char **argv)
 		else 
 			do_list();
 		break;
-	case SACCT_STAT:
-		fprintf(stderr,
-			"This functionality has been replaced with 'sstat' "
-			"in the future please make note this will "
-			"not be supported.\n");
-		
-		if (!params.opt_noheader)/* give them something to look */
-			_print_header();/* at while we think...        */
-		do_stat();
-		break;
 	case SACCT_HELP:
 		do_help();
 		break;
@@ -321,7 +162,7 @@ int main(int argc, char **argv)
 		sacct_fini();
 		exit(2);
 	}
-finished:
+
 	sacct_fini();
 	return (rc);
 }
@@ -332,22 +173,3 @@ void invalidSwitchCombo(char *good, char *bad)
 	fprintf(stderr, "\"%s\" may not be used with %s\n", good, bad);
 	return;
 }
-
-void _print_header(void)
-{
-	int	i,j;
-	for (i=0; i<nprintfields; i++) {
-		if (i)
-			printf(" ");
-		j=printfields[i];
-		(fields[j].print_routine)(HEADLINE, 0);
-	}
-	printf("\n");
-	for (i=0; i<nprintfields; i++) {
-		if (i)
-			printf(" ");
-		j=printfields[i];
-		(fields[j].print_routine)(UNDERSCORE, 0);
-	}
-	printf("\n");
-}
diff --git a/src/sacct/sacct.h b/src/sacct/sacct.h
index 27db34813f0d58290e1b2b045c8a13a848cec15c..f9a4014e59b1009f622e6935b116255986666b52 100644
--- a/src/sacct/sacct.h
+++ b/src/sacct/sacct.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -60,26 +61,23 @@
 #include "src/common/slurm_jobacct_gather.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_jobcomp.h"
+#include "src/common/print_fields.h"
 
 #define ERROR 2
 
 #define BRIEF_FIELDS "jobid,state,exitcode"
 #define BRIEF_COMP_FIELDS "jobid,uid,state"
-#define DEFAULT_FIELDS "jobid,jobname,partition,ncpus,state,exitcode"
+#define DEFAULT_FIELDS "jobid,jobname,partition,account,alloccpus,state,exitcode"
 #define DEFAULT_COMP_FIELDS "jobid,uid,jobname,partition,nnodes,nodes,state,end"
-#define STAT_FIELDS "jobid,vsize,rss,pages,cputime,ntasks,state"
-#define LONG_FIELDS "jobid,jobname,partition,vsize,rss,pages,cputime,ntasks,ncpus,elapsed,state,exitcode"
+#define LONG_FIELDS "jobid,jobname,partition,maxvmsize,maxvmsizenode,maxvmsizetask,avevmsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks,alloccpus,elapsed,state,exitcode"
 
-#ifdef HAVE_BG
-#define LONG_COMP_FIELDS "jobid,uid,jobname,partition,blockid,nnodes,nodes,state,start,end,timelimit,connection,reboot,rotate,max_procs,geo,bg_start_point"
-#else
 #define LONG_COMP_FIELDS "jobid,uid,jobname,partition,nnodes,nodes,state,start,end,timelimit"
-#endif
 
 #define BUFFER_SIZE 4096
 #define STATE_COUNT 10
 
 #define MAX_PRINTFIELDS 100
+#define FORMAT_STRING_SIZE 34
 
 #define SECONDS_IN_MINUTE 60
 #define SECONDS_IN_HOUR (60*SECONDS_IN_MINUTE)
@@ -94,84 +92,95 @@ typedef enum {	HEADLINE,
 		JOBCOMP
 } type_t;
 
+typedef enum {
+		PRINT_ALLOC_CPUS,
+		PRINT_ACCOUNT,
+		PRINT_ASSOCID,
+		PRINT_AVECPU,
+		PRINT_AVEPAGES,
+		PRINT_AVERSS,
+		PRINT_AVEVSIZE,
+		PRINT_BLOCKID,
+		PRINT_CLUSTER,
+		PRINT_CPU_TIME,
+		PRINT_CPU_TIME_RAW,
+		PRINT_ELAPSED,
+		PRINT_ELIGIBLE,
+		PRINT_END,
+		PRINT_EXITCODE,
+		PRINT_GID,
+		PRINT_GROUP,
+		PRINT_JOBID,
+		PRINT_JOBNAME,
+		PRINT_LAYOUT,
+		PRINT_MAXPAGES,
+		PRINT_MAXPAGESNODE,
+		PRINT_MAXPAGESTASK,
+		PRINT_MAXRSS,
+		PRINT_MAXRSSNODE,
+		PRINT_MAXRSSTASK,
+		PRINT_MAXVSIZE,
+		PRINT_MAXVSIZENODE,
+		PRINT_MAXVSIZETASK,
+		PRINT_MINCPU,
+		PRINT_MINCPUNODE,
+		PRINT_MINCPUTASK,
+		PRINT_NODELIST,
+		PRINT_NNODES,
+		PRINT_NTASKS,
+		PRINT_PRIO,
+		PRINT_PARTITION,
+		PRINT_QOS,
+		PRINT_QOSRAW,
+		PRINT_REQ_CPUS,
+		PRINT_RESV,
+		PRINT_RESV_CPU,
+		PRINT_RESV_CPU_RAW,
+		PRINT_START,
+		PRINT_STATE,
+		PRINT_SUBMIT,
+		PRINT_SUSPENDED,
+		PRINT_SYSTEMCPU,
+		PRINT_TIMELIMIT,
+		PRINT_TOTALCPU,
+		PRINT_UID,
+		PRINT_USER,
+		PRINT_USERCPU,
+		PRINT_WCKEY,
+		PRINT_WCKEYID,
+} sacct_print_types_t;
+
 typedef struct {
-	acct_archive_cond_t *arch_cond;
+	acct_job_cond_t *job_cond;
 	int opt_completion;	/* --completion */
 	int opt_dump;		/* --dump */
 	int opt_dup;		/* --duplicates; +1 = explicitly set */
 	int opt_fdump;		/* --formattted_dump */
-	int opt_expire;		/* --expire */
 	char *opt_field_list;	/* --fields= */
 	int opt_gid;		/* running persons gid */
 	int opt_help;		/* --help */
-	int opt_long;		/* --long */
-	int opt_lowmem;		/* --low_memory */
+	char *opt_filein;
 	int opt_noheader;	/* can only be cleared */
-	int opt_raw;		/* --raw */
-	int opt_stat;		/* --stat */
-	int opt_total;		/* --total */
+	int opt_allocs;		/* --total */
 	int opt_uid;		/* running persons uid */
-	int opt_verbose;	/* --verbose */
 } sacct_parameters_t;
 
-typedef struct fields {
-	char *name;		/* Specified in --fields= */
-	void (*print_routine) ();	/* Who gets to print it? */
-} fields_t;
-
-extern fields_t fields[];
+extern print_field_t fields[];
 extern sacct_parameters_t params;
 
 extern List jobs;
 
-extern int printfields[MAX_PRINTFIELDS],	/* Indexed into fields[] */
-	nprintfields;
+extern List print_fields_list;
+extern ListIterator print_fields_itr;
+extern int field_count;
+extern List qos_list;
 
 /* process.c */
-void find_hostname(uint32_t pos, char *hosts, char *host);
+char *find_hostname(uint32_t pos, char *hosts);
 void aggregate_sacct(sacct_t *dest, sacct_t *from);
 
 /* print.c */
 void print_fields(type_t type, void *object);
-void print_cpu(type_t type, void *object);
-void print_elapsed(type_t type, void *object);
-void print_exitcode(type_t type, void *object);
-void print_gid(type_t type, void *object);
-void print_group(type_t type, void *object);
-void print_job(type_t type, void *object);
-void print_name(type_t type, void *object);
-void print_jobid(type_t type, void *object);
-void print_ncpus(type_t type, void *object);
-void print_nodes(type_t type, void *object);
-void print_nnodes(type_t type, void *object);
-void print_ntasks(type_t type, void *object);
-void print_partition(type_t type, void *object);
-void print_blockid(type_t type, void *object);
-void print_pages(type_t type, void *object);
-void print_rss(type_t type, void *object);
-void print_state(type_t type, void *object);
-void print_submit(type_t type, void *object);
-void print_eligible(type_t type, void *object);
-void print_start(type_t type, void *object);
-void print_end(type_t type, void *object);
-void print_systemcpu(type_t type, void *object);
-void print_timelimit(type_t type, void *object);
-void print_uid(type_t type, void *object);
-void print_user(type_t type, void *object);
-void print_usercpu(type_t type, void *object);
-void print_vsize(type_t type, void *object);
-void print_cputime(type_t type, void *object);
-void print_account(type_t type, void *object);
-void print_assoc(type_t type, void *object);
-void print_cluster(type_t type, void *object);
-
-void print_connection(type_t type, void *object);
-void print_geo(type_t type, void *object);
-void print_max_procs(type_t type, void *object);
-void print_reboot(type_t type, void *object);
-void print_rotate(type_t type, void *object);
-void print_bg_start_point(type_t type, void *object);
-void print_wckey(type_t type, void *object);
 
 /* options.c */
 int decode_state_char(char *state);
@@ -180,15 +189,10 @@ int get_data(void);
 void parse_command_line(int argc, char **argv);
 void do_dump(void);
 void do_dump_completion(void);
-void do_expire();
 void do_help(void);
 void do_list(void);
 void do_list_completion(void);
-void do_stat(void);
 void sacct_init();
 void sacct_fini();
 
-/* sacct_stat.c */
-extern int sacct_stat(uint32_t jobid, uint32_t stepid);
-
 #endif /* !_SACCT_H */
diff --git a/src/sacct/sacct_stat.c b/src/sacct/sacct_stat.c
deleted file mode 100644
index 0b8b67622a7343460c5f3ee99e2db23d05b06c3e..0000000000000000000000000000000000000000
--- a/src/sacct/sacct_stat.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/*****************************************************************************\
- *  sacct_stat.c - stat slurmd for percise job information
- *
- *  $Id: options.c 7541 2006-03-18 01:44:58Z da $
- *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>.
- *  LLNL-CODE-402394.
- *  
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
- *  
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
- *  the program, then also delete it here.
- *  
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *  
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include "sacct.h"
-#include <pthread.h>
-#include "src/common/forward.h"
-#include "src/common/slurm_auth.h"
-
-jobacct_step_rec_t step;
-	
-int thr_finished = 0;
-	
-void *_stat_thread(void *args);
-int _sacct_query(slurm_step_layout_t *step_layout, uint32_t job_id, 
-		 uint32_t step_id);
-int _process_results();
-
-
-int _sacct_query(slurm_step_layout_t *step_layout, uint32_t job_id,
-		 uint32_t step_id)
-{
-	slurm_msg_t msg;
-	stat_jobacct_msg_t r;
-	stat_jobacct_msg_t *jobacct_msg = NULL;
-	ListIterator itr;
-	List ret_list = NULL;
-	sacct_t temp_sacct;
-	ret_data_info_t *ret_data_info = NULL;
-	int rc = SLURM_SUCCESS;
-	int ntasks = 0;
-	int tot_tasks = 0;
-	debug("getting the stat of job %d on %d nodes", 
-	      job_id, step_layout->node_cnt);
-
-	memset(&temp_sacct, 0, sizeof(sacct_t));
-	temp_sacct.min_cpu = (float)NO_VAL;
-	memset(&step.sacct, 0, sizeof(sacct_t));
-	step.sacct.min_cpu = (float)NO_VAL;
-
-	step.stepid = step_id;
-	step.nodes = step_layout->node_list;
-	step.stepname = NULL;
-	step.state = JOB_RUNNING;
-	slurm_msg_t_init(&msg);
-	/* Common message contents */
-	r.job_id      = job_id;
-	r.step_id     = step_id;
-	r.jobacct     = jobacct_gather_g_create(NULL);
-	msg.msg_type        = MESSAGE_STAT_JOBACCT;
-	msg.data            = &r;
-	
-	
-	ret_list = slurm_send_recv_msgs(step_layout->node_list, &msg, 0, false);
-	if (!ret_list) {
-		error("got an error no list returned");
-		goto cleanup;
-	}
-	
-	itr = list_iterator_create(ret_list);		
-	while((ret_data_info = list_next(itr))) {
-		switch (ret_data_info->type) {
-		case MESSAGE_STAT_JOBACCT:
-			jobacct_msg = (stat_jobacct_msg_t *)
-				ret_data_info->data;
-			if(jobacct_msg) {
-				debug2("got it back for job %d", 
-				       jobacct_msg->job_id);
-				jobacct_gather_g_2_sacct(
-					&temp_sacct, 
-					jobacct_msg->jobacct);
-				ntasks += jobacct_msg->num_tasks;
-				aggregate_sacct(&step.sacct, &temp_sacct);
-			}
-			break;
-		case RESPONSE_SLURM_RC:
-			rc = slurm_get_return_code(ret_data_info->type, 
-						   ret_data_info->data);
-			error("there was an error with the request rc = %s", 
-			      slurm_strerror(rc));
-			break;
-		default:
-			rc = slurm_get_return_code(ret_data_info->type, 
-						   ret_data_info->data);
-			error("unknown return given %d rc = %s", 
-			      ret_data_info->type, slurm_strerror(rc));
-			break;
-		}
-	}
-	list_iterator_destroy(itr);
-	list_destroy(ret_list);
-
-	tot_tasks += ntasks;		
-cleanup:
-	
-	if(tot_tasks) {
-		step.sacct.ave_rss *= 1024;
-		step.sacct.max_rss *= 1024;
-		step.sacct.ave_vsize *= 1024;
-		step.sacct.max_vsize *= 1024;
-
-		step.sacct.ave_cpu /= tot_tasks;
-		step.sacct.ave_cpu /= 100;
-		step.sacct.min_cpu /= 100;
-		step.sacct.ave_rss /= tot_tasks;
-		step.sacct.ave_vsize /= tot_tasks;
-		step.sacct.ave_pages /= tot_tasks;
-	}
-	jobacct_gather_g_destroy(r.jobacct);	
-	return SLURM_SUCCESS;
-}
-
-int _process_results()
-{
-	print_fields(JOBSTEP, &step);
-	return SLURM_SUCCESS;
-}
-
-int sacct_stat(uint32_t jobid, uint32_t stepid)
-{
-	slurm_msg_t req_msg;
-	slurm_msg_t resp_msg;
-	job_step_id_msg_t req;
-	slurm_step_layout_t *step_layout = NULL;
-	int rc = SLURM_SUCCESS;
-
-	slurm_msg_t_init(&req_msg);
-	slurm_msg_t_init(&resp_msg);
-	debug("requesting info for job %u.%u", jobid, stepid);
-	req.job_id = jobid;
-	req.step_id = stepid;
-	req_msg.msg_type = REQUEST_STEP_LAYOUT;
-	req_msg.data     = &req;
-	
-	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0) {
-		return SLURM_ERROR;
-	}
-		
-	switch (resp_msg.msg_type) {
-	case RESPONSE_STEP_LAYOUT:
-		step_layout = (slurm_step_layout_t *)resp_msg.data;
-		break;
-	case RESPONSE_SLURM_RC:
-		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
-		slurm_free_return_code_msg(resp_msg.data);	
-		printf("problem getting job: %s\n", slurm_strerror(rc));
-		slurm_seterrno_ret(rc);
-		break;
-	default:
-		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
-		break;
-	}
-		
-	if(!step_layout) {
-		error("didn't get the job record rc = %s", slurm_strerror(rc));
-		return rc;
-	}
-
-	_sacct_query(step_layout, jobid, stepid);
-	
-	_process_results();
-	
-	slurm_step_layout_destroy(step_layout);	
-	
-	return rc;
-}
diff --git a/src/sacctmgr/Makefile.am b/src/sacctmgr/Makefile.am
index 2ae61cf8e4fba7388c5b6c36da56c9b024198a81..ef447494a6da0044ae268b7da83fc6d8a4249dfa 100644
--- a/src/sacctmgr/Makefile.am
+++ b/src/sacctmgr/Makefile.am
@@ -15,6 +15,7 @@ sacctmgr_SOURCES =	\
 	account_functions.c	\
 	archive_functions.c	\
 	association_functions.c	\
+	config_functions.c	\
 	cluster_functions.c	\
 	common.c                \
 	file_functions.c	\
diff --git a/src/sacctmgr/Makefile.in b/src/sacctmgr/Makefile.in
index 2a6e6c101b4ad94f71e4e7b3438531c1d2399764..a83a12a89357f53824442678c5d4f58590e76e93 100644
--- a/src/sacctmgr/Makefile.in
+++ b/src/sacctmgr/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,8 +77,8 @@ binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(bin_PROGRAMS)
 am_sacctmgr_OBJECTS = account_functions.$(OBJEXT) \
 	archive_functions.$(OBJEXT) association_functions.$(OBJEXT) \
-	cluster_functions.$(OBJEXT) common.$(OBJEXT) \
-	file_functions.$(OBJEXT) sacctmgr.$(OBJEXT) \
+	config_functions.$(OBJEXT) cluster_functions.$(OBJEXT) \
+	common.$(OBJEXT) file_functions.$(OBJEXT) sacctmgr.$(OBJEXT) \
 	qos_functions.$(OBJEXT) txn_functions.$(OBJEXT) \
 	user_functions.$(OBJEXT) wckey_functions.$(OBJEXT)
 sacctmgr_OBJECTS = $(am_sacctmgr_OBJECTS)
@@ -111,6 +115,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -281,6 +289,7 @@ sacctmgr_SOURCES = \
 	account_functions.c	\
 	archive_functions.c	\
 	association_functions.c	\
+	config_functions.c	\
 	cluster_functions.c	\
 	common.c                \
 	file_functions.c	\
@@ -367,6 +376,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/association_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cluster_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/config_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/file_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/qos_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sacctmgr.Po@am__quote@
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index 40fc5f7113e4f172e06ab7380cccfd5066718b5a..427bf1e36abfd2c8e868f9f7d1f9d3e7d947c852 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2008 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -364,7 +365,7 @@ static int _set_rec(int *start, int argc, char *argv[],
 					 MAX(command_len, 1))) {
 			if(!assoc)
 				continue;
-			if (get_uint(argv[i]+end, &assoc->fairshare, 
+			if (get_uint(argv[i]+end, &assoc->shares_raw, 
 				     "FairShare") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "GrpCPUMins", 
@@ -575,10 +576,14 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 	
 	init_acct_association_rec(start_assoc);
 
-	for (i=0; i<argc; i++) 
-		limit_set = _set_rec(&i, argc, argv, name_list, cluster_list,
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		limit_set += _set_rec(&i, argc, argv, name_list, cluster_list,
 				     start_acct, start_assoc);
-
+	}
 	if(exit_code) 
 		return SLURM_ERROR;
 
@@ -780,7 +785,7 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 			assoc->acct = xstrdup(name);
 			assoc->cluster = xstrdup(cluster);
 			assoc->parent_acct = xstrdup(start_assoc->parent_acct);
-			assoc->fairshare = start_assoc->fairshare;
+			assoc->shares_raw = start_assoc->shares_raw;
 
 			assoc->grp_cpu_mins = start_assoc->grp_cpu_mins;
 			assoc->grp_cpus = start_assoc->grp_cpus;
@@ -949,7 +954,13 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 
 	acct_cond->with_assocs = with_assoc_flag;
 
-	set = _set_cond(&i, argc, argv, acct_cond, format_list);
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		set += _set_cond(&i, argc, argv, acct_cond, format_list);
+	}
 
 	if(exit_code) {
 		destroy_acct_account_cond(acct_cond);
@@ -995,6 +1006,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 
 		field = xmalloc(sizeof(print_field_t));
 		if(!strncasecmp("Account", object, MAX(command_len, 1))
+		   || !strncasecmp("Acct", object, MAX(command_len, 4))
 		   || !strncasecmp("Name", object, MAX(command_len, 2))) {
 			field->type = PRINT_ACCOUNT;
 			field->name = xstrdup("Account");
@@ -1149,7 +1161,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 			continue;
 		}
 		
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
@@ -1216,7 +1228,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 					case PRINT_FAIRSHARE:
 						field->print_routine(
 							field, 
-							assoc->fairshare,
+							assoc->shares_raw,
 							(curr_inx == 
 							 field_count));
 						break;
@@ -1480,13 +1492,13 @@ extern int sacctmgr_modify_account(int argc, char *argv[])
 		int command_len = strlen(argv[i]);
 		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))) {
 			i++;
-			cond_set = _set_cond(&i, argc, argv, acct_cond, NULL);
+			cond_set += _set_cond(&i, argc, argv, acct_cond, NULL);
 		} else if (!strncasecmp (argv[i], "Set", MAX(command_len, 3))) {
 			i++;
-			rec_set = _set_rec(&i, argc, argv, NULL, NULL, 
+			rec_set += _set_rec(&i, argc, argv, NULL, NULL, 
 					   acct, assoc);
 		} else {
-			cond_set = _set_cond(&i, argc, argv, acct_cond, NULL);
+			cond_set += _set_cond(&i, argc, argv, acct_cond, NULL);
 		}
 	}
 
@@ -1610,7 +1622,15 @@ extern int sacctmgr_delete_account(int argc, char *argv[])
 	ListIterator itr = NULL;
 	int set = 0;
 	
-	if(!(set = _set_cond(&i, argc, argv, acct_cond, NULL))) {
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		set += _set_cond(&i, argc, argv, acct_cond, NULL);
+	}
+
+	if(!set) {
 		exit_code=1;
 		fprintf(stderr, 
 			" No conditions given to remove, not executing.\n");
diff --git a/src/sacctmgr/archive_functions.c b/src/sacctmgr/archive_functions.c
index b4aad579907bbd0c9b2f27d2caa106fb4a3f0d4f..134cd96d1b8c441d8c4f6de4d9e1131a76c01427 100644
--- a/src/sacctmgr/archive_functions.c
+++ b/src/sacctmgr/archive_functions.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -38,6 +39,8 @@
 \*****************************************************************************/
 
 #include "src/sacctmgr/sacctmgr.h"
+#include <sys/param.h>		/* MAXPATHLEN */
+#include "src/common/proc_args.h"
 
 /* returns number of objects added to list */
 extern int _addto_uid_char_list(List char_list, char *names)
@@ -175,6 +178,10 @@ static int _set_cond(int *start, int argc, char *argv[],
 		if(!end && !strncasecmp(argv[i], "where",
 					MAX(command_len, 5))) {
 			continue;
+		} else if(!end && !strncasecmp(argv[i], "events",
+					  MAX(command_len, 1))) {
+			arch_cond->archive_events = 1;
+			set = 1;
 		} else if(!end && !strncasecmp(argv[i], "jobs",
 					  MAX(command_len, 1))) {
 			arch_cond->archive_jobs = 1;
@@ -183,6 +190,10 @@ static int _set_cond(int *start, int argc, char *argv[],
 					  MAX(command_len, 1))) {
 			arch_cond->archive_steps = 1;
 			set = 1;
+		} else if(!end && !strncasecmp(argv[i], "suspend",
+					  MAX(command_len, 1))) {
+			arch_cond->archive_suspend = 1;
+			set = 1;
 		} else if(!end 
 			  || !strncasecmp (argv[i], "Clusters",
 					   MAX(command_len, 1))) {
@@ -261,18 +272,34 @@ static int _set_cond(int *start, int argc, char *argv[],
 			slurm_addto_char_list(job_cond->partition_list,
 					      argv[i]+end);
 			set = 1;
-		} else if (!strncasecmp (argv[i], "PurgeJobsBefore",
+		} else if (!strncasecmp (argv[i], "PurgeEventMonths",
 					 MAX(command_len, 6))) {
-			if (get_uint16(argv[i]+end, &arch_cond->job_purge,
-				       "PurgeJobsBefore")
+			if (get_uint16(argv[i]+end, &arch_cond->purge_event,
+				       "PurgeEventMonths")
 			    != SLURM_SUCCESS) {
 				exit_code = 1;
 			} else
 				set = 1;
-		} else if (!strncasecmp (argv[i], "PurgeStepsBefore",
+		} else if (!strncasecmp (argv[i], "PurgeJobMonths",
 					 MAX(command_len, 6))) {
-			if (get_uint16(argv[i]+end, &arch_cond->step_purge,
-				       "PurgeStepsBefore")
+			if (get_uint16(argv[i]+end, &arch_cond->purge_job,
+				       "PurgeJobMonths")
+			    != SLURM_SUCCESS) {
+				exit_code = 1;
+			} else
+				set = 1;
+		} else if (!strncasecmp (argv[i], "PurgeStepMonths",
+					 MAX(command_len, 7))) {
+			if (get_uint16(argv[i]+end, &arch_cond->purge_step,
+				       "PurgeStepMonths")
+			    != SLURM_SUCCESS) {
+				exit_code = 1;
+			} else
+				set = 1;
+		} else if (!strncasecmp (argv[i], "PurgeSuspendMonths",
+					 MAX(command_len, 7))) {
+			if (get_uint16(argv[i]+end, &arch_cond->purge_suspend,
+				       "PurgeSuspendMonths")
 			    != SLURM_SUCCESS) {
 				exit_code = 1;
 			} else
@@ -309,15 +336,26 @@ extern int sacctmgr_archive_dump(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
 	acct_archive_cond_t *arch_cond = xmalloc(sizeof(acct_archive_cond_t));
-	int i=0, set=0;
+	int i=0;
 	struct stat st;
 
+	arch_cond->archive_events = (uint16_t)NO_VAL;
 	arch_cond->archive_jobs = (uint16_t)NO_VAL;
 	arch_cond->archive_steps = (uint16_t)NO_VAL;
-	arch_cond->job_purge = (uint16_t)NO_VAL;
-	arch_cond->step_purge = (uint16_t)NO_VAL;
+	arch_cond->archive_suspend = (uint16_t)NO_VAL;
+	arch_cond->purge_event = (uint16_t)NO_VAL;
+	arch_cond->purge_job = (uint16_t)NO_VAL;
+	arch_cond->purge_step = (uint16_t)NO_VAL;
+	arch_cond->purge_suspend = (uint16_t)NO_VAL;
+
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		_set_cond(&i, argc, argv, arch_cond);
+	}
 
-	set = _set_cond(&i, argc, argv, arch_cond);
 	if(exit_code) {
 		destroy_acct_archive_cond(arch_cond);
 		return SLURM_ERROR;
@@ -426,20 +464,34 @@ extern int sacctmgr_archive_load(int argc, char *argv[])
 			fprintf(stderr, " Unknown option: %s\n", argv[i]);
 		}		
 	}
-
+	
 	if(exit_code) {
 		destroy_acct_archive_rec(arch_rec);
 		return SLURM_ERROR;
 	} 
+	
+	if (arch_rec->archive_file) {
+		char *fullpath;
+		char cwd[MAXPATHLEN + 1];
+		int  mode = R_OK;
 
-	if (arch_rec->archive_file 
-	    && (stat(arch_rec->archive_file, &st) < 0)) {
-		exit_code = errno;
-		fprintf(stderr, " load: Failed to stat %s: %m\n "
-			"Note: For archive load, the file must be on "
-			"the calling host.\n",
-			arch_rec->archive_file);
-		return SLURM_ERROR;
+		if ((getcwd(cwd, MAXPATHLEN)) == NULL) 
+			fatal("getcwd failed: %m");		
+		
+		if ((fullpath = search_path(cwd, arch_rec->archive_file,
+					    true, mode))) {
+			xfree(arch_rec->archive_file);
+			arch_rec->archive_file = fullpath;
+		} 
+		
+		if(stat(arch_rec->archive_file, &st) < 0) {
+			exit_code = errno;
+			fprintf(stderr, " load: Failed to stat %s: %m\n "
+				"Note: For archive load, the file must be on "
+				"the calling host.\n",
+				arch_rec->archive_file);
+			return SLURM_ERROR;
+		}
 	}
 
 	rc = jobacct_storage_g_archive_load(db_conn, arch_rec);
diff --git a/src/sacctmgr/association_functions.c b/src/sacctmgr/association_functions.c
index 9f6c0ba08824685293c2f73d696d0530ff1f78d2..12d8c937e0a58314b47e7cbe3b66390f6ae08a31 100644
--- a/src/sacctmgr/association_functions.c
+++ b/src/sacctmgr/association_functions.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2008 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -349,7 +350,13 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 		PRINT_USER
 	};
 
-	_set_cond(&i, argc, argv, assoc_cond, format_list);
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		_set_cond(&i, argc, argv, assoc_cond, format_list);
+	}
 
 	if(exit_code) {
 		destroy_acct_association_cond(assoc_cond);
@@ -357,7 +364,8 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 		return SLURM_ERROR;
 	} else if(!list_count(format_list)) 
 		slurm_addto_char_list(format_list,
-				      "C,A,U,Part,F,GrpJ,GrpN,GrpS,"
+				      "C,A,U,Part,F,"
+				      "GrpCPUMins,GrpJ,GrpN,GrpS,GrpWall,"
 				      "MaxJ,MaxN,MaxS,MaxW,QOS");
 
 	print_fields_list = list_create(destroy_print_field);
@@ -382,7 +390,7 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			field->type = PRINT_ACCOUNT;
 			field->name = xstrdup("Account");
 			if(tree_display)
-				field->len = 20;
+				field->len = -20;
 			else
 				field->len = 10;
 			field->print_routine = print_fields_str;
@@ -546,7 +554,7 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			continue;
 		}
 
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
@@ -608,8 +616,7 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 					}
 					print_acct = get_tree_acct_name(
 						local_acct,
-						parent_acct,
-						assoc->cluster, tree_list);
+						parent_acct, tree_list);
 					xfree(local_acct);
 				} else {
 					print_acct = assoc->acct;
@@ -628,7 +635,7 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			case PRINT_FAIRSHARE:
 				field->print_routine(
 					field,
-					assoc->fairshare,
+					assoc->shares_raw,
 					(curr_inx == field_count));
 				break;
 			case PRINT_GRPCM:
diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c
index 23a9d5a547dc52358babe74b6eed3508ae28a3ec..18c8c1d3e7ff23bb68c670e709517b0c3263a329 100644
--- a/src/sacctmgr/cluster_functions.c
+++ b/src/sacctmgr/cluster_functions.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -42,7 +43,8 @@
 
 static int _set_cond(int *start, int argc, char *argv[],
 		     List cluster_list,
-		     List format_list)
+		     List format_list,
+		     uint16_t *classification)
 {
 	int i;
 	int set = 0;
@@ -71,12 +73,20 @@ static int _set_cond(int *start, int argc, char *argv[],
 		} else if(!end || !strncasecmp (argv[i], "Names",
 						MAX(command_len, 1))
 			  || !strncasecmp (argv[i], "Clusters",
-					   MAX(command_len, 1))) {
+					   MAX(command_len, 3))) {
 			if(cluster_list) {
 				if(slurm_addto_char_list(cluster_list,
 							 argv[i]+end))
 					set = 1;
 			}
+		} else if (!strncasecmp (argv[i], "Classification", 
+					 MAX(command_len, 3))) {
+			if(classification) {
+				*classification = 
+					str_2_classification(argv[i]+end);
+				if(*classification)
+					set = 1;
+			}
 		} else if (!strncasecmp (argv[i], "Format",
 					 MAX(command_len, 1))) {
 			if(format_list)
@@ -95,7 +105,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 
 static int _set_rec(int *start, int argc, char *argv[],
 		    List name_list,
-		    acct_association_rec_t *assoc)
+		    acct_association_rec_t *assoc,
+		    uint16_t *classification)
 {
 	int i, mins;
 	int set = 0;
@@ -126,22 +137,31 @@ static int _set_rec(int *start, int argc, char *argv[],
 			  || !strncasecmp (argv[i], "Names",
 					   MAX(command_len, 1)) 
 			  || !strncasecmp (argv[i], "Clusters", 
-					   MAX(command_len, 1))) {
+					   MAX(command_len, 3))) {
 			if(name_list)
-				slurm_addto_char_list(name_list, argv[i]+end);
+				slurm_addto_char_list(name_list,
+						      argv[i]+end);
+		} else if (!strncasecmp (argv[i], "Classification", 
+					 MAX(command_len, 3))) {
+			if(classification) {
+				*classification = 
+					str_2_classification(argv[i]+end);
+				if(*classification)
+					set = 1;
+			}
 		} else if (!strncasecmp (argv[i], "FairShare", 
 					 MAX(command_len, 1))
 			   || !strncasecmp (argv[i], "Shares",
 					 MAX(command_len, 1))) {
-			if (get_uint(argv[i]+end, &assoc->fairshare, 
+			if (get_uint(argv[i]+end, &assoc->shares_raw, 
 			    "FairShare") == SLURM_SUCCESS)
 				set = 1;
 		} else if (!strncasecmp (argv[i], "GrpCPUMins",
 					 MAX(command_len, 7))) {
-			if (get_uint64(argv[i]+end, 
-				       &assoc->grp_cpu_mins, 
-				       "GrpCPUMins") == SLURM_SUCCESS)
-				set = 1;
+			exit_code=1;
+			fprintf(stderr, "GrpCPUMins is not a valid option "
+				"for the root association of a cluster.\n");
+			break;			
 		} else if (!strncasecmp (argv[i], "GrpCpus",
 					 MAX(command_len, 7))) {
 			if (get_uint(argv[i]+end, &assoc->grp_cpus,
@@ -164,16 +184,9 @@ static int _set_rec(int *start, int argc, char *argv[],
 				set = 1;
 		} else if (!strncasecmp (argv[i], "GrpWall",
 					 MAX(command_len, 4))) {
-			mins = time_str2mins(argv[i]+end);
-			if (mins != NO_VAL) {
-				assoc->grp_wall	= (uint32_t) mins;
-				set = 1;
-			} else {
-				exit_code=1;
-				fprintf(stderr, 
-					" Bad GrpWall time format: %s\n", 
-					argv[i]);
-			}
+			exit_code=1;
+			fprintf(stderr, "GrpWall is not a valid option "
+				"for the root association of a cluster.\n");
 		} else if (!strncasecmp (argv[i], "MaxCPUMinsPerJob",
 					 MAX(command_len, 7))) {
 			if (get_uint64(argv[i]+end, 
@@ -257,12 +270,18 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 	int limit_set = 0;
 	ListIterator itr = NULL, itr_c = NULL;
 	char *name = NULL;
+	uint16_t class = 0;
 
 	init_acct_association_rec(&start_assoc);
 
-	for (i=0; i<argc; i++) 
-		limit_set = _set_rec(&i, argc, argv, name_list, &start_assoc);
-
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		limit_set += _set_rec(&i, argc, argv,
+				     name_list, &start_assoc, &class);
+	}
 	if(exit_code) {
 		list_destroy(name_list);
 		return SLURM_ERROR;
@@ -278,6 +297,7 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 
 		memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t));
 		cluster_cond.cluster_list = name_list;
+		cluster_cond.classification = class;
 
 		temp_list = acct_storage_g_get_clusters(db_conn, my_uid,
 							&cluster_cond);
@@ -322,19 +342,21 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 		
 		list_append(cluster_list, cluster);
 		cluster->name = xstrdup(name);
+		cluster->classification = class;
 		cluster->root_assoc = xmalloc(sizeof(acct_association_rec_t));
 		init_acct_association_rec(cluster->root_assoc);
 		printf("  Name          = %s\n", cluster->name);
+		if(cluster->classification)
+			printf("  Classification= %s\n",
+			       get_classification_str(cluster->classification));
 
-		cluster->root_assoc->fairshare = start_assoc.fairshare;		
+		cluster->root_assoc->shares_raw = start_assoc.shares_raw;
 		
-		cluster->root_assoc->grp_cpu_mins = start_assoc.grp_cpu_mins;
 		cluster->root_assoc->grp_cpus = start_assoc.grp_cpus;
 		cluster->root_assoc->grp_jobs = start_assoc.grp_jobs;
 		cluster->root_assoc->grp_nodes = start_assoc.grp_nodes;
 		cluster->root_assoc->grp_submit_jobs =
 			start_assoc.grp_submit_jobs;
-		cluster->root_assoc->grp_wall = start_assoc.grp_wall;
 
 		cluster->root_assoc->max_cpu_mins_pj = 
 			start_assoc.max_cpu_mins_pj;
@@ -407,6 +429,8 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 		PRINT_CLUSTER,
 		PRINT_CHOST,
 		PRINT_CPORT,
+		PRINT_CLASS,
+		PRINT_CPUS,
 		PRINT_FAIRSHARE,
 		PRINT_GRPCM,
 		PRINT_GRPC,
@@ -420,6 +444,8 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 		PRINT_MAXN,
 		PRINT_MAXS,
 		PRINT_MAXW,
+		PRINT_NODECNT,
+		PRINT_NODES,
 		PRINT_QOS,
 		PRINT_QOS_RAW,
 		PRINT_RPC_VERSION		
@@ -427,7 +453,16 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 
 
 	cluster_cond->cluster_list = list_create(slurm_destroy_char);
-	_set_cond(&i, argc, argv, cluster_cond->cluster_list, format_list);
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;
+		_set_cond(&i, argc, argv, cluster_cond->cluster_list, 
+			  format_list,
+			  &cluster_cond->classification);
+	}
+
 	if(exit_code) {
 		destroy_acct_cluster_cond(cluster_cond);
 		list_destroy(format_list);
@@ -465,27 +500,33 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 		} else if(!strncasecmp("ControlHost", object,
 				       MAX(command_len, 8))) {
 			field->type = PRINT_CHOST;
-			field->name = xstrdup("Control Host");
+			field->name = xstrdup("ControlHost");
 			field->len = 15;
 			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("ControlPort", object,
 				       MAX(command_len, 8))) {
 			field->type = PRINT_CPORT;
-			field->name = xstrdup("Control Port");
+			field->name = xstrdup("ControlPort");
 			field->len = 12;
 			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("Classification", object,
+				       MAX(command_len, 2))) {
+			field->type = PRINT_CPUS;
+			field->name = xstrdup("Class");
+			field->len = 9;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("CPUCount", object,
+				       MAX(command_len, 2))) {
+			field->type = PRINT_CPUS;
+			field->name = xstrdup("CPUCount");
+			field->len = 9;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("FairShare", object, 
 				       MAX(command_len, 1))) {
 			field->type = PRINT_FAIRSHARE;
 			field->name = xstrdup("FairShare");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("GrpCPUMins", object, 
-				       MAX(command_len, 8))) {
-			field->type = PRINT_GRPCM;
-			field->name = xstrdup("GrpCPUMins");
-			field->len = 11;
-			field->print_routine = print_fields_uint64;
 		} else if(!strncasecmp("GrpCPUs", object, 
 				       MAX(command_len, 8))) {
 			field->type = PRINT_GRPC;
@@ -510,12 +551,6 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 			field->name = xstrdup("GrpSubmit");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
-		} else if(!strncasecmp("GrpWall", object,
-				       MAX(command_len, 4))) {
-			field->type = PRINT_GRPW;
-			field->name = xstrdup("GrpWall");
-			field->len = 11;
-			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("MaxCPUMinsPerJob", object,
 				       MAX(command_len, 7))) {
 			field->type = PRINT_MAXCM;
@@ -552,6 +587,18 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 			field->name = xstrdup("MaxWall");
 			field->len = 11;
 			field->print_routine = print_fields_time;
+		} else if(!strncasecmp("NodeCount", object,
+				       MAX(command_len, 5))) {
+			field->type = PRINT_NODECNT;
+			field->name = xstrdup("NodeCount");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("NodeNames", object,
+				       MAX(command_len, 5))) {
+			field->type = PRINT_NODES;
+			field->name = xstrdup("NodeNames");
+			field->len = 20;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("QOSRAWLevel", object, 
 				       MAX(command_len, 4))) {
 			field->type = PRINT_QOS_RAW;
@@ -582,7 +629,7 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 			continue;
 		}
 		
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
@@ -633,16 +680,28 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 						     cluster->control_port,
 						     (curr_inx == field_count));
 				break;
-			case PRINT_FAIRSHARE:
-				field->print_routine(
-					field,
-					cluster->root_assoc->fairshare,
-					(curr_inx == field_count));
+			case PRINT_CLASS:
+				field->print_routine(field,
+						     get_classification_str(
+							     cluster->
+							     classification),
+						     (curr_inx == field_count));
+				break;
+			case PRINT_CPUS:
+			{
+				char tmp_char[9];
+				convert_num_unit((float)cluster->cpu_count,
+						 tmp_char, sizeof(tmp_char),
+						 UNIT_NONE);
+				field->print_routine(field,
+						     tmp_char,
+						     (curr_inx == field_count));
 				break;
-			case PRINT_GRPCM:
+			}
+			case PRINT_FAIRSHARE:
 				field->print_routine(
 					field,
-					assoc->grp_cpu_mins,
+					assoc->shares_raw,
 					(curr_inx == field_count));
 				break;
 			case PRINT_GRPC:
@@ -665,12 +724,6 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 						     assoc->grp_submit_jobs,
 						     (curr_inx == field_count));
 				break;
-			case PRINT_GRPW:
-				field->print_routine(
-					field,
-					assoc->grp_wall,
-					(curr_inx == field_count));
-				break;
 			case PRINT_MAXCM:
 				field->print_routine(
 					field,
@@ -703,6 +756,28 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 					assoc->max_wall_pj,
 					(curr_inx == field_count));
 				break;
+				
+			case PRINT_NODECNT:
+			{
+				hostlist_t hl = hostlist_create(cluster->nodes);
+				int cnt = 0;
+				if(hl) {
+					cnt = hostlist_count(hl);
+					hostlist_destroy(hl);
+				}
+				field->print_routine(
+					field,
+					cnt,
+					(curr_inx == field_count));
+				break;
+			}				
+			case PRINT_NODES:
+				field->print_routine(
+					field,
+					cluster->nodes,
+					(curr_inx == field_count));
+				break;
+				
 			case PRINT_QOS:
 				if(!qos_list) 
 					qos_list = acct_storage_g_get_qos(
@@ -756,7 +831,7 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 		xmalloc(sizeof(acct_association_cond_t));
 	int cond_set = 0, rec_set = 0, set = 0;
 	List ret_list = NULL;
-
+	uint16_t class_cond = 0, class_rec = 0;
 
 	init_acct_association_rec(assoc);
 
@@ -768,15 +843,17 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))) {
 			i++;
 			if(_set_cond(&i, argc, argv,
-				     assoc_cond->cluster_list, NULL))
+				     assoc_cond->cluster_list,
+				     NULL, &class_cond))
 				cond_set = 1;
 		} else if (!strncasecmp (argv[i], "Set", MAX(command_len, 3))) {
 			i++;
-			if(_set_rec(&i, argc, argv, NULL, assoc))
+			if(_set_rec(&i, argc, argv, NULL, assoc, &class_rec))
 				rec_set = 1;
 		} else {
 			if(_set_cond(&i, argc, argv,
-				     assoc_cond->cluster_list, NULL))
+				     assoc_cond->cluster_list,
+				     NULL, &class_cond))
 				cond_set = 1;
 		}
 	}
@@ -800,11 +877,50 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 		destroy_acct_association_cond(assoc_cond);
 		return SLURM_ERROR;		
 	}
+	
+	if(class_cond) {
+		List temp_list = NULL;
+		acct_cluster_cond_t cluster_cond;
 
+		memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t));
+		cluster_cond.cluster_list = assoc_cond->cluster_list;
+		cluster_cond.classification = class_cond;
+		
+		temp_list = acct_storage_g_get_clusters(db_conn, my_uid,
+							&cluster_cond);
+		if(!temp_list) {
+			exit_code=1;
+			fprintf(stderr,
+				" Problem getting clusters from database.  "
+				"Contact your admin.\n");
+			destroy_acct_association_rec(assoc);
+			destroy_acct_association_cond(assoc_cond);
+			return SLURM_ERROR;
+		} else if(!list_count(temp_list)) {
+			fprintf(stderr,
+				" The class you gave %s didn't "
+				"return any clusters.\n", 
+				get_classification_str(class_cond));
+			destroy_acct_association_rec(assoc);
+			destroy_acct_association_cond(assoc_cond);
+			list_destroy(temp_list);
+			return SLURM_ERROR;
+		}
+		/* we are only looking for the clusters returned from
+		   this query, so we free the cluster_list and replace
+		   it */
+		if(assoc_cond->cluster_list)
+			list_destroy(assoc_cond->cluster_list);
+		assoc_cond->cluster_list = temp_list;
+	}
+	
 	printf(" Setting\n");
 	if(rec_set) {
 		printf(" Default Limits =\n");
 		sacctmgr_print_assoc_limits(assoc);
+		if(class_rec) 
+			printf(" Cluster Classification = %s\n", 
+			       get_classification_str(class_rec));
 	}
 
 	list_append(assoc_cond->acct_list, "root");
@@ -831,6 +947,43 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 
 	if(ret_list)
 		list_destroy(ret_list);
+
+	if(class_rec) {
+		acct_cluster_cond_t cluster_cond;
+		acct_cluster_rec_t cluster_rec;
+		
+		memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t));
+		memset(&cluster_rec, 0, sizeof(acct_cluster_rec_t));
+		/* the class has already returned these clusters so
+		   just go with it */
+		cluster_cond.cluster_list = assoc_cond->cluster_list;
+
+		cluster_rec.classification = class_rec;
+
+		ret_list = acct_storage_g_modify_clusters(
+			db_conn, my_uid, &cluster_cond, &cluster_rec);
+	
+		if(ret_list && list_count(ret_list)) {
+			char *object = NULL;
+			ListIterator itr = list_iterator_create(ret_list);
+			printf(" Modified cluster classifications...\n");
+			while((object = list_next(itr))) {
+				printf("  %s\n", object);
+			}
+			list_iterator_destroy(itr);
+			set = 1;
+		} else if(ret_list) {
+			printf(" Nothing modified\n");
+		} else {
+			exit_code=1;
+			fprintf(stderr, " Error with request\n");
+			rc = SLURM_ERROR;
+		}
+		
+		if(ret_list)
+			list_destroy(ret_list);
+	}
+
 	notice_thread_fini();
 
 	if(set) {
@@ -854,10 +1007,25 @@ extern int sacctmgr_delete_cluster(int argc, char *argv[])
 		xmalloc(sizeof(acct_cluster_cond_t));
 	int i=0;
 	List ret_list = NULL;
+	int cond_set = 0;
 
 	cluster_cond->cluster_list = list_create(slurm_destroy_char);
 	
-	if(!_set_cond(&i, argc, argv, cluster_cond->cluster_list, NULL)) {
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;
+		cond_set += _set_cond(&i, argc, argv,
+				      cluster_cond->cluster_list, 
+				      NULL,
+				      &cluster_cond->classification);
+	}
+
+	if(exit_code) {
+		destroy_acct_cluster_cond(cluster_cond);
+		return SLURM_ERROR;
+	} else if(!cond_set) {
 		exit_code=1;
 		fprintf(stderr, 
 			" No conditions given to remove, not executing.\n");
@@ -865,7 +1033,12 @@ extern int sacctmgr_delete_cluster(int argc, char *argv[])
 		return SLURM_ERROR;
 	}
 
-	if(!list_count(cluster_cond->cluster_list)) {
+	if(!list_count(cluster_cond->cluster_list) 
+	   && !cluster_cond->classification) {
+		exit_code=1;
+		fprintf(stderr, 
+			"problem with delete request.  "
+			"Nothing given to delete.\n");
 		destroy_acct_cluster_cond(cluster_cond);
 		return SLURM_SUCCESS;
 	}
@@ -921,6 +1094,7 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	char *line = NULL;
 	int i, command_len = 0;
 	FILE *fd = NULL;
+	char *class_str = NULL;
 
 	for (i=0; i<argc; i++) {
 		int end = parse_option_end(argv[i]);
@@ -965,6 +1139,38 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 		exit_code=1;
 		fprintf(stderr, " We need a cluster to dump.\n");
 		return SLURM_ERROR;
+	} else {
+		List temp_list = NULL;
+		acct_cluster_cond_t cluster_cond;
+		acct_cluster_rec_t *cluster_rec = NULL;
+
+		memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t));
+		cluster_cond.cluster_list = list_create(NULL);
+		list_push(cluster_cond.cluster_list, cluster_name);
+
+		temp_list = acct_storage_g_get_clusters(db_conn, my_uid,
+							&cluster_cond);
+		list_destroy(cluster_cond.cluster_list);
+		if(!temp_list) {
+			exit_code=1;
+			fprintf(stderr,
+				" Problem getting clusters from database.  "
+				"Contact your admin.\n");
+			xfree(cluster_name);
+			return SLURM_ERROR;
+		}
+
+		cluster_rec = list_peek(temp_list);
+		if(!cluster_rec) {
+			exit_code=1;
+			fprintf(stderr, " Cluster %s doesn't exist.\n",
+				cluster_name);
+			xfree(cluster_name);
+			list_destroy(temp_list);
+			return SLURM_ERROR;
+		}
+		class_str = get_classification_str(cluster_rec->classification);
+		list_destroy(temp_list);
 	}
 
 	if(!file_name) {
@@ -991,6 +1197,7 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 		exit_code=1;
 		fprintf(stderr, " Your uid (%u) is not in the "
 			"accounting system, can't dump cluster.\n", my_uid);
+		xfree(cluster_name);
 		xfree(user_name);
 		if(user_list)
 			list_destroy(user_list);
@@ -1003,6 +1210,7 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 			exit_code=1;
 			fprintf(stderr, " Your user does not have sufficient "
 				"privileges to dump clusters.\n");
+			xfree(cluster_name);
 			if(user_list)
 				list_destroy(user_list);
 			xfree(user_name);
@@ -1064,11 +1272,15 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 		   "MaxWallDurationPerJob=1\n") < 0) {
 		exit_code=1;
 		fprintf(stderr, "Can't write to file");
+		xfree(cluster_name);
 		return SLURM_ERROR;
 	}
 
 	line = xstrdup_printf("Cluster - %s", cluster_name);
 
+	if(class_str) 
+		xstrfmtcat(line, ":Classification=%s", class_str);
+
 	acct_hierarchical_rec = list_peek(acct_hierarchical_rec_list);
 	assoc = acct_hierarchical_rec->assoc;
 	if(strcmp(assoc->acct, "root")) 
diff --git a/src/sacctmgr/common.c b/src/sacctmgr/common.c
index c3d1f6f781a4652065364b90ed8178ac07bcb838..99819ca56930795a29079fa6f5ba3fb3138c341d 100644
--- a/src/sacctmgr/common.c
+++ b/src/sacctmgr/common.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -653,6 +654,30 @@ extern int get_uint64(char *in_value, uint64_t *out_value, char *type)
 	return SLURM_SUCCESS;
 }
 
+extern int get_double(char *in_value, double *out_value, char *type)
+{
+	char *ptr = NULL, *meat = NULL;
+	double num;
+	
+	if(!(meat = strip_quotes(in_value, NULL, 1))) {
+		error("Problem with strip_quotes");
+		return SLURM_ERROR;
+	}
+	num = strtod(meat, &ptr);
+	if ((num == 0) && ptr && ptr[0]) {
+		error("Invalid value for %s (%s)", type, meat);
+		xfree(meat);
+		return SLURM_ERROR;
+	}
+	xfree(meat);
+	
+	if (num < 0)
+		*out_value = (double) INFINITE;		/* flag to clear */
+	else
+		*out_value = (double) num;
+	return SLURM_SUCCESS;
+}
+
 extern int addto_qos_char_list(List char_list, List qos_list, char *names, 
 			       int option)
 {
@@ -970,6 +995,7 @@ extern List copy_char_list(List char_list)
 extern void sacctmgr_print_coord_list(
 	print_field_t *field, List value, int last)
 {
+	int abs_len = abs(field->len);
 	ListIterator itr = NULL;
 	char *print_this = NULL;
 	acct_coord_rec_t *object = NULL;
@@ -998,10 +1024,13 @@ extern void sacctmgr_print_coord_list(
 	else if(print_fields_parsable_print)
 		printf("%s|", print_this);
 	else {
-		if(strlen(print_this) > field->len) 
-			print_this[field->len-1] = '+';
+		if(strlen(print_this) > abs_len)
+			print_this[abs_len-1] = '+';
 		
-		printf("%-*.*s ", field->len, field->len, print_this);
+		if(field->len == abs_len)
+			printf("%*.*s ", abs_len, abs_len, print_this);
+		else
+			printf("%-*.*s ", abs_len, abs_len, print_this);
 	}
 	xfree(print_this);
 }
@@ -1009,6 +1038,7 @@ extern void sacctmgr_print_coord_list(
 extern void sacctmgr_print_qos_list(print_field_t *field, List qos_list,
 				    List value, int last)
 {
+	int abs_len = abs(field->len);
 	char *print_this = NULL;
 
 	print_this = get_qos_complete_str(qos_list, value);
@@ -1019,10 +1049,13 @@ extern void sacctmgr_print_qos_list(print_field_t *field, List qos_list,
 	else if(print_fields_parsable_print)
 		printf("%s|", print_this);
 	else {
-		if(strlen(print_this) > field->len) 
-			print_this[field->len-1] = '+';
+		if(strlen(print_this) > abs_len) 
+			print_this[abs_len-1] = '+';
 		
-		printf("%-*.*s ", field->len, field->len, print_this);
+		if(field->len == abs_len)
+			printf("%*.*s ", abs_len, abs_len, print_this);
+		else
+			printf("%-*.*s ", abs_len, abs_len, print_this);
 	}
 	xfree(print_this);
 }
@@ -1032,10 +1065,10 @@ extern void sacctmgr_print_assoc_limits(acct_association_rec_t *assoc)
 	if(!assoc)
 		return;
 
-	if(assoc->fairshare == INFINITE)
+	if(assoc->shares_raw == INFINITE)
 		printf("  Fairshare     = NONE\n");
-	else if(assoc->fairshare != NO_VAL) 
-		printf("  Fairshare     = %u\n", assoc->fairshare);
+	else if(assoc->shares_raw != NO_VAL) 
+		printf("  Fairshare     = %u\n", assoc->shares_raw);
 
 	if(assoc->grp_cpu_mins == INFINITE)
 		printf("  GrpCPUMins    = NONE\n");
diff --git a/src/sacctmgr/config_functions.c b/src/sacctmgr/config_functions.c
new file mode 100644
index 0000000000000000000000000000000000000000..642d993a0d0701e778281e04b4113fac992cd702
--- /dev/null
+++ b/src/sacctmgr/config_functions.c
@@ -0,0 +1,162 @@
+/*****************************************************************************\
+ *  config_functions.c - functions dealing with system configuration.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/common/list.h"
+#include "src/common/read_config.h"
+#include "src/common/slurmdbd_defs.h"
+#include "src/common/uid.h"
+#include "src/common/xstring.h"
+#include "src/sacctmgr/sacctmgr.h"
+
+static char    *acct_storage_backup_host = NULL;
+static char    *acct_storage_host = NULL;
+static char    *acct_storage_loc  = NULL;
+static char    *acct_storage_pass = NULL;
+static uint32_t acct_storage_port;
+static char    *acct_storage_type = NULL;
+static char    *acct_storage_user = NULL;
+static char    *auth_type = NULL;
+static uint16_t msg_timeout;
+static char    *plugin_dir = NULL;
+static uint16_t private_data;
+static uint32_t slurm_user_id;
+static uint16_t track_wckey;
+
+static List dbd_config_list = NULL;
+
+
+static void _load_dbd_config(void)
+{
+	dbd_config_list = acct_storage_g_get_config(db_conn);
+}
+
+static void _print_dbd_config(void)
+{
+	ListIterator iter = NULL;
+	config_key_pair_t *key_pair;
+
+	if (!dbd_config_list)
+		return;
+
+	printf("\nSlurmDBD configuration:\n");
+	iter = list_iterator_create(dbd_config_list);
+	while((key_pair = list_next(iter))) {
+		printf("%-22s = %s\n", key_pair->name, key_pair->value);
+	}
+	list_iterator_destroy(iter);
+}
+
+static void _free_dbd_config(void)
+{
+	if (!dbd_config_list)
+		return;
+
+	list_destroy(dbd_config_list);
+	dbd_config_list = NULL;
+}
+
+static void _load_slurm_config(void)
+{
+	acct_storage_backup_host = slurm_get_accounting_storage_backup_host();
+	acct_storage_host = slurm_get_accounting_storage_host();
+	acct_storage_loc  = slurm_get_accounting_storage_loc();
+	acct_storage_pass = slurm_get_accounting_storage_pass();
+	acct_storage_port = slurm_get_accounting_storage_port();
+	acct_storage_type = slurm_get_accounting_storage_type();
+	acct_storage_user = slurm_get_accounting_storage_user();
+	auth_type = slurm_get_auth_type();
+	msg_timeout = slurm_get_msg_timeout();
+	plugin_dir = slurm_get_plugin_dir();
+	private_data = slurm_get_private_data();
+	slurm_user_id = slurm_get_slurm_user_id();
+	track_wckey = slurm_get_track_wckey();
+}
+
+static void _free_slurm_config(void)
+{
+	xfree(acct_storage_backup_host);
+	xfree(acct_storage_host);
+	xfree(acct_storage_loc);
+	xfree(acct_storage_pass);
+	xfree(acct_storage_type);
+	xfree(acct_storage_user);
+	xfree(auth_type);
+	xfree(plugin_dir);
+}
+
+static void _print_slurm_config(void)
+{
+	time_t now = time(NULL);
+	char tmp_str[128], *user_name = NULL;
+
+	slurm_make_time_str(&now, tmp_str, sizeof(tmp_str));
+	printf("Configuration data as of %s\n", tmp_str);
+	printf("AccountingStorageBackupHost  = %s\n", acct_storage_backup_host);
+	printf("AccountingStorageHost  = %s\n", acct_storage_host);
+	printf("AccountingStorageLoc   = %s\n", acct_storage_loc);
+	printf("AccountingStoragePass  = %s\n", acct_storage_pass);
+	printf("AccountingStoragePort  = %u\n", acct_storage_port);
+	printf("AccountingStorageType  = %s\n", acct_storage_type);
+	printf("AccountingStorageUser  = %s\n", acct_storage_user);
+	printf("AuthType               = %s\n", auth_type);
+	printf("MessageTimeout         = %u sec\n", msg_timeout);
+	printf("PluginDir              = %s\n", plugin_dir);
+	private_data_string(private_data, tmp_str, sizeof(tmp_str));
+	printf("PrivateData            = %s\n", tmp_str);
+	user_name = uid_to_string(slurm_user_id);
+	printf("SlurmUserId            = %s(%u)\n", user_name, slurm_user_id);
+	xfree(user_name);
+	printf("SLURM_CONF             = %s\n", default_slurm_config_file);
+	printf("SLURM_VERSION          = %s\n", SLURM_VERSION);
+	printf("TrackWCKey             = %u\n", track_wckey);
+}
+
+extern int sacctmgr_list_config(bool have_db_conn)
+{
+	_load_slurm_config();
+	_print_slurm_config();
+	_free_slurm_config();
+
+	if (have_db_conn) {
+		_load_dbd_config();
+		_print_dbd_config();
+		_free_dbd_config();
+	}
+
+	return SLURM_SUCCESS;
+}
diff --git a/src/sacctmgr/file_functions.c b/src/sacctmgr/file_functions.c
index 61848f224eaf9f65c7f4648ac74462f3524f0150..8499020fb6138a4bf41904dcf4e14da321f55a6b 100644
--- a/src/sacctmgr/file_functions.c
+++ b/src/sacctmgr/file_functions.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -42,6 +43,7 @@
 
 typedef struct {
 	acct_admin_level_t admin;
+	uint16_t classification;
 	List coord_list; /* char *list */
 	char *def_acct;
 	char *def_wckey;
@@ -71,6 +73,7 @@ typedef struct {
 
 enum {
 	PRINT_ACCOUNT,
+	PRINT_CLASSIFICATION,
 	PRINT_ADMIN,
 	PRINT_CLUSTER,
 	PRINT_COORDS,
@@ -322,6 +325,10 @@ static sacctmgr_file_opts_t *_parse_options(char *options)
 				file_opts->coord_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(file_opts->coord_list, option);
+		} else if (!strncasecmp (sub, "Classification",
+					 MAX(command_len, 2))) {
+			file_opts->classification =
+				str_2_classification(option);
 		} else if (!strncasecmp (sub, "DefaultAccount",
 					 MAX(command_len, 8))) {
 			file_opts->def_acct = xstrdup(option);
@@ -556,11 +563,17 @@ static List _set_up_print_fields(List format_list)
 			field->len = 9;
 			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Cluster", object,
-				       MAX(command_len, 2))) {
+				       MAX(command_len, 3))) {
 			field->type = PRINT_CLUSTER;
 			field->name = xstrdup("Cluster");
 			field->len = 10;
 			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Classification", object,
+				       MAX(command_len, 3))) {
+			field->type = PRINT_CLASSIFICATION;
+			field->name = xstrdup("Classif");
+			field->len = 10;
+			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Coordinators", object,
 				       MAX(command_len, 2))) {
 			field->type = PRINT_COORDS;
@@ -767,7 +780,7 @@ static int _print_out_assoc(List assoc_list, bool user, bool add)
 				break;
 			case PRINT_FAIRSHARE:
 				field->print_routine(field,
-						     assoc->fairshare);
+						     assoc->shares_raw);
 				break;
 			case PRINT_GRPCM:
 				field->print_routine(
@@ -859,497 +872,163 @@ static int _print_out_assoc(List assoc_list, bool user, bool add)
 	return rc;
 }
 
-static int _mod_acct(sacctmgr_file_opts_t *file_opts,
-		     acct_account_rec_t *acct, char *parent)
+static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
+		      acct_association_rec_t *assoc,
+		      sacctmgr_mod_type_t mod_type,
+		      char *parent)
 {
 	int changed = 0;
-	char *desc = NULL, *org = NULL, *my_info = NULL;
-	acct_account_rec_t mod_acct;
-	acct_account_cond_t acct_cond;
+	acct_association_rec_t mod_assoc;
 	acct_association_cond_t assoc_cond;
-	
-	memset(&mod_acct, 0, sizeof(acct_account_rec_t));
-	memset(&acct_cond, 0, sizeof(acct_account_cond_t));
-	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
+	char *type = NULL;
+	char *name = NULL;
+	char *my_info = NULL;
 
-	if(file_opts->desc) 
-		desc = xstrdup(file_opts->desc);
+	switch(mod_type) {
+	case MOD_CLUSTER:
+		type = "Cluster";
+		name = assoc->cluster;
+		break;
+	case MOD_ACCT:
+		type = "Account";
+		name = assoc->acct;
+		break;
+	case MOD_USER:
+		type = "User";
+		name = assoc->user;
+		break;
+	default:
+		return 0;
+		break;
+	}
+	init_acct_association_rec(&mod_assoc);
+	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
 
-	if(desc && strcmp(desc, acct->description)) {
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
-			   " Changed description", "Account",
-			   acct->name,
-			   acct->description,
-			   desc);
-		mod_acct.description = desc;
+	if((file_opts->fairshare != NO_VAL)
+	   && (assoc->shares_raw != file_opts->fairshare)) {
+		mod_assoc.shares_raw = file_opts->fairshare;
 		changed = 1;
-	} else 
-		xfree(desc);
-				
-	if(file_opts->org)
-		org = xstrdup(file_opts->org);
-
-	if(org && strcmp(org, acct->organization)) {
 		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
-			   " Changed organization", "Account",
-			   acct->name,
-			   acct->organization,
-			   org);
-		mod_acct.organization = org;
-		changed = 1;
-	} else
-		xfree(org);
-									
-	if(changed) {
-		List ret_list = NULL;
-					
-		assoc_cond.acct_list = list_create(NULL);
-		list_append(assoc_cond.acct_list, acct->name);
-		acct_cond.assoc_cond = &assoc_cond;
-
-		notice_thread_init();
-		ret_list = acct_storage_g_modify_accounts(db_conn, my_uid,
-							  &acct_cond, 
-							  &mod_acct);
-		notice_thread_fini();
-	
-		list_destroy(assoc_cond.acct_list);
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed fairshare",
+			   type, name,
+			   assoc->shares_raw,
+			   file_opts->fairshare);
+	}
 
-/* 		if(ret_list && list_count(ret_list)) { */
-/* 			char *object = NULL; */
-/* 			ListIterator itr = list_iterator_create(ret_list); */
-/* 			printf(" Modified account defaults for " */
-/* 			       "associations...\n"); */
-/* 			while((object = list_next(itr)))  */
-/* 				printf("  %s\n", object); */
-/* 			list_iterator_destroy(itr); */
-/* 		} */
- 
-		if(ret_list) {
-			printf("%s", my_info);
-			list_destroy(ret_list);
-		} else
-			changed = 0;
-		xfree(my_info);
+	if((file_opts->grp_cpu_mins != NO_VAL)
+	   && (assoc->grp_cpu_mins != file_opts->grp_cpu_mins)) {
+		mod_assoc.grp_cpu_mins = file_opts->grp_cpu_mins;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8llu -> %llu\n",
+			   " Changed GrpCPUMins",
+			   type, name,
+			   assoc->grp_cpu_mins,
+			   file_opts->grp_cpu_mins);
 	}
-	xfree(desc);
-	xfree(org);
-	return changed;
-}
 
-static int _mod_user(sacctmgr_file_opts_t *file_opts,
-		     acct_user_rec_t *user, char *cluster, char *parent)
-{
-	int rc;
-	int set = 0;
-	int changed = 0;
-	char *def_acct = NULL, *def_wckey = NULL, *my_info = NULL;
-	acct_user_rec_t mod_user;
-	acct_user_cond_t user_cond;
-	List ret_list = NULL;
-	acct_association_cond_t assoc_cond;
+	if((file_opts->grp_cpus != NO_VAL)
+	   && (assoc->grp_cpus != file_opts->grp_cpus)) {
+		mod_assoc.grp_cpus = file_opts->grp_cpus;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpCpus",
+			   type, name,
+			   assoc->grp_cpus, 
+			   file_opts->grp_cpus);
+	}
 
-	if(!user || !user->name) {
-		fatal(" We need a user name in _mod_user");
+	if((file_opts->grp_jobs != NO_VAL)
+	   && (assoc->grp_jobs != file_opts->grp_jobs)) {
+		mod_assoc.grp_jobs = file_opts->grp_jobs;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpJobs",
+			   type, name,
+			   assoc->grp_jobs,
+			   file_opts->grp_jobs);
 	}
 
-	memset(&mod_user, 0, sizeof(acct_user_rec_t));
-	memset(&user_cond, 0, sizeof(acct_user_cond_t));
-	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
-				
-	assoc_cond.user_list = list_create(NULL);
-	list_append(assoc_cond.user_list, user->name);
-	user_cond.assoc_cond = &assoc_cond;
+	if((file_opts->grp_nodes != NO_VAL)
+	   && (assoc->grp_nodes != file_opts->grp_nodes)) {
+		mod_assoc.grp_nodes = file_opts->grp_nodes;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpNodes",
+			   type, name,
+			   assoc->grp_nodes, 
+			   file_opts->grp_nodes);
+	}
 
-	if(file_opts->def_acct)
-		def_acct = xstrdup(file_opts->def_acct);
+	if((file_opts->grp_submit_jobs != NO_VAL)
+	   && (assoc->grp_submit_jobs != file_opts->grp_submit_jobs)) {
+		mod_assoc.grp_submit_jobs = file_opts->grp_submit_jobs;
+		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpSubmitJobs",
+			   type, name,
+			   assoc->grp_submit_jobs,
+			   file_opts->grp_submit_jobs);
+	}
 
-	if(def_acct && 
-	   (!user->default_acct || strcmp(def_acct, user->default_acct))) {
+	if((file_opts->grp_wall != NO_VAL)
+	   && (assoc->grp_wall != file_opts->grp_wall)) {
+		mod_assoc.grp_wall = file_opts->grp_wall;
+		changed = 1;
 		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
-			   " Changed Default Account", "User",
-			   user->name,
-			   user->default_acct,
-			   def_acct);
-		mod_user.default_acct = def_acct;
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed GrpWallDuration",
+			   type, name,
+			   assoc->grp_wall,
+			   file_opts->grp_wall);
+	}
+	
+	if((file_opts->max_cpu_mins_pj != NO_VAL)
+	   && (assoc->max_cpu_mins_pj != file_opts->max_cpu_mins_pj)) {
+		mod_assoc.max_cpu_mins_pj =
+			file_opts->max_cpu_mins_pj;
 		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8llu -> %llu\n",
+			   " Changed MaxCPUMinsPerJob",
+			   type, name,
+			   assoc->max_cpu_mins_pj,
+			   file_opts->max_cpu_mins_pj);
 	}
-									
-	if(file_opts->def_wckey)
-		def_wckey = xstrdup(file_opts->def_wckey);
 
-	if(def_wckey && 
-	   (!user->default_wckey || strcmp(def_wckey, user->default_wckey))) {
+	if((file_opts->max_cpus_pj != NO_VAL)
+	   && (assoc->max_cpus_pj != file_opts->max_cpus_pj)) {
+		mod_assoc.max_cpus_pj = file_opts->max_cpus_pj;
+		changed = 1;
 		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
-			   " Changed Default WCKey", "User",
-			   user->name,
-			   user->default_wckey,
-			   def_wckey);
-		mod_user.default_wckey = def_wckey;
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed MaxCpusPerJob",
+			   type, name,
+			   assoc->max_cpus_pj, 
+			   file_opts->max_cpus_pj);
+	}
+
+	if((file_opts->max_jobs != NO_VAL)
+	   && (assoc->max_jobs != file_opts->max_jobs)) {
+		mod_assoc.max_jobs = file_opts->max_jobs;
 		changed = 1;
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
+			   " Changed MaxJobs",
+			   type, name,
+			   assoc->max_jobs,
+			   file_opts->max_jobs);
 	}
-								
-	if(user->admin_level != ACCT_ADMIN_NOTSET
-	   && file_opts->admin != ACCT_ADMIN_NOTSET
-	   && user->admin_level != file_opts->admin) {
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
-			   " Changed Admin Level", "User",
-			   user->name,
-			   acct_admin_level_str(
-				   user->admin_level),
-			   acct_admin_level_str(
-				   file_opts->admin));
-		mod_user.admin_level = file_opts->admin;
-		changed = 1;
-	}
-									
-	if(changed) {
-		notice_thread_init();
-		ret_list = acct_storage_g_modify_users(
-			db_conn, my_uid,
-			&user_cond, 
-			&mod_user);
-		notice_thread_fini();
-					
-/* 		if(ret_list && list_count(ret_list)) { */
-/* 			char *object = NULL; */
-/* 			ListIterator itr = list_iterator_create(ret_list); */
-/* 			printf(" Modified user defaults for " */
-/* 			       "associations...\n"); */
-/* 			while((object = list_next(itr)))  */
-/* 				printf("  %s\n", object); */
-/* 			list_iterator_destroy(itr); */
-/* 		} */
- 
-		if(ret_list) {
-			printf("%s", my_info);
-			list_destroy(ret_list);
-			set = 1;
-		} 
-		xfree(my_info);
-	}
-	xfree(def_acct);
-	xfree(def_wckey);
-
-	if((!user->coord_accts || !list_count(user->coord_accts))
-		  && (file_opts->coord_list 
-		      && list_count(file_opts->coord_list))) {
-		ListIterator coord_itr = NULL;
-		char *temp_char = NULL;
-		acct_coord_rec_t *coord = NULL;
-		int first = 1;
-		notice_thread_init();
-		rc = acct_storage_g_add_coord(db_conn, my_uid, 
-					      file_opts->coord_list,
-					      &user_cond);
-		notice_thread_fini();
-
-		user->coord_accts = list_create(destroy_acct_coord_rec);
-		coord_itr = list_iterator_create(file_opts->coord_list);
-		printf(" Making User '%s' coordinator for account(s)",
-		       user->name);
-		while((temp_char = list_next(coord_itr))) {
-			coord = xmalloc(sizeof(acct_coord_rec_t));
-			coord->name = xstrdup(temp_char);
-			coord->direct = 1;
-			list_push(user->coord_accts, coord);
-
-			if(first) {
-				printf(" %s", temp_char);
-				first = 0;
-			} else
-				printf(", %s", temp_char);
-		}
-		list_iterator_destroy(coord_itr);
-		printf("\n");
-		set = 1;
-	} else if((user->coord_accts && list_count(user->coord_accts))
-		  && (file_opts->coord_list 
-		      && list_count(file_opts->coord_list))) {
-		ListIterator coord_itr = NULL;
-		ListIterator char_itr = NULL;
-		char *temp_char = NULL;
-		acct_coord_rec_t *coord = NULL;
-		List add_list = list_create(NULL);
-
-		coord_itr = list_iterator_create(user->coord_accts);
-		char_itr = list_iterator_create(file_opts->coord_list);
-
-		while((temp_char = list_next(char_itr))) {
-			while((coord = list_next(coord_itr))) {
-				if(!coord->direct)
-					continue;
-				if(!strcmp(coord->name, temp_char)) {
-					break;
-				}
-			}
-			if(!coord) {
-				printf(" Making User '%s' coordinator of "
-				       "account '%s'\n",
-				       user->name,
-				       temp_char);
-					
-				list_append(add_list, temp_char);
-			}
-			list_iterator_reset(coord_itr);
-		}
-
-		list_iterator_destroy(char_itr);
-		list_iterator_destroy(coord_itr);
-
-		if(list_count(add_list)) {
-			notice_thread_init();
-			rc = acct_storage_g_add_coord(db_conn, my_uid, 
-						      add_list,
-						      &user_cond);
-			notice_thread_fini();
-			set = 1;
-		}
-		list_destroy(add_list);
-	}
-
-	if((!user->wckey_list || !list_count(user->wckey_list))
-		  && (file_opts->wckey_list 
-		      && list_count(file_opts->wckey_list))) {
-		ListIterator wckey_itr = NULL;
-		char *temp_char = NULL;
-		acct_wckey_rec_t *wckey = NULL;
-		int first = 1;
-
-		user->wckey_list = list_create(destroy_acct_wckey_rec);
-		wckey_itr = list_iterator_create(file_opts->wckey_list);
-		printf(" Adding WCKey(s) ");
-		while((temp_char = list_next(wckey_itr))) {
-			wckey = xmalloc(sizeof(acct_wckey_rec_t));
-			wckey->name = xstrdup(temp_char);
-			wckey->cluster = xstrdup(cluster);
-			wckey->user = xstrdup(user->name);
-			list_push(user->wckey_list, wckey);
-
-			if(first) {
-				printf("'%s'", temp_char);
-				first = 0;
-			} else
-				printf(", '%s'", temp_char);
-		}
-		list_iterator_destroy(wckey_itr);
-		printf(" for user '%s'\n", user->name);
-		set = 1;
-		notice_thread_init();
-		rc = acct_storage_g_add_wckeys(db_conn, my_uid, 
-					       user->wckey_list);
-		notice_thread_fini();
-	} else if((user->wckey_list && list_count(user->wckey_list))
-		  && (file_opts->wckey_list 
-		      && list_count(file_opts->wckey_list))) {
-		ListIterator wckey_itr = NULL;
-		ListIterator char_itr = NULL;
-		char *temp_char = NULL;
-		acct_wckey_rec_t *wckey = NULL;
-		List add_list = list_create(destroy_acct_wckey_rec);
-
-		wckey_itr = list_iterator_create(user->wckey_list);
-		char_itr = list_iterator_create(file_opts->wckey_list);
-
-		while((temp_char = list_next(char_itr))) {
-			while((wckey = list_next(wckey_itr))) {
-				if(!strcmp(wckey->name, temp_char)) 
-					break;
-			}
-			if(!wckey) {
-				printf(" Adding WCKey '%s' to User '%s'\n",
-				       temp_char, user->name);
-				wckey = xmalloc(sizeof(acct_wckey_rec_t));
-				wckey->name = xstrdup(temp_char);
-				wckey->cluster = xstrdup(cluster);
-				wckey->user = xstrdup(user->name);
-					
-				list_append(add_list, wckey);
-			}
-			list_iterator_reset(wckey_itr);
-		}
-
-		list_iterator_destroy(char_itr);
-		list_iterator_destroy(wckey_itr);
-
-		if(list_count(add_list)) {
-			notice_thread_init();
-			rc = acct_storage_g_add_wckeys(db_conn, my_uid, 
-						       add_list);
-			notice_thread_fini();
-			set = 1;
-		}
-		list_transfer(user->wckey_list, add_list);
-		list_destroy(add_list);
-	}
-
-	list_destroy(assoc_cond.user_list);
-
-	return set;
-}
-
-static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
-		      acct_association_rec_t *assoc,
-		      sacctmgr_mod_type_t mod_type,
-		      char *parent)
-{
-	int changed = 0;
-	acct_association_rec_t mod_assoc;
-	acct_association_cond_t assoc_cond;
-	char *type = NULL;
-	char *name = NULL;
-	char *my_info = NULL;
-
-	switch(mod_type) {
-	case MOD_CLUSTER:
-		type = "Cluster";
-		name = assoc->cluster;
-		break;
-	case MOD_ACCT:
-		type = "Account";
-		name = assoc->acct;
-		break;
-	case MOD_USER:
-		type = "User";
-		name = assoc->user;
-		break;
-	default:
-		return 0;
-		break;
-	}
-	init_acct_association_rec(&mod_assoc);
-	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
-
-	if((file_opts->fairshare != NO_VAL)
-	   && (assoc->fairshare != file_opts->fairshare)) {
-		mod_assoc.fairshare = file_opts->fairshare;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed fairshare",
-			   type, name,
-			   assoc->fairshare,
-			   file_opts->fairshare);
-	}
-
-	if((file_opts->grp_cpu_mins != NO_VAL)
-	   && (assoc->grp_cpu_mins != file_opts->grp_cpu_mins)) {
-		mod_assoc.grp_cpu_mins = file_opts->grp_cpu_mins;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8llu -> %llu\n",
-			   " Changed GrpCPUMins",
-			   type, name,
-			   assoc->grp_cpu_mins,
-			   file_opts->grp_cpu_mins);
-	}
-
-	if((file_opts->grp_cpus != NO_VAL)
-	   && (assoc->grp_cpus != file_opts->grp_cpus)) {
-		mod_assoc.grp_cpus = file_opts->grp_cpus;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpCpus",
-			   type, name,
-			   assoc->grp_cpus, 
-			   file_opts->grp_cpus);
-	}
-
-	if((file_opts->grp_jobs != NO_VAL)
-	   && (assoc->grp_jobs != file_opts->grp_jobs)) {
-		mod_assoc.grp_jobs = file_opts->grp_jobs;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpJobs",
-			   type, name,
-			   assoc->grp_jobs,
-			   file_opts->grp_jobs);
-	}
-
-	if((file_opts->grp_nodes != NO_VAL)
-	   && (assoc->grp_nodes != file_opts->grp_nodes)) {
-		mod_assoc.grp_nodes = file_opts->grp_nodes;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpNodes",
-			   type, name,
-			   assoc->grp_nodes, 
-			   file_opts->grp_nodes);
-	}
-
-	if((file_opts->grp_submit_jobs != NO_VAL)
-	   && (assoc->grp_submit_jobs != file_opts->grp_submit_jobs)) {
-		mod_assoc.grp_submit_jobs = file_opts->grp_submit_jobs;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpSubmitJobs",
-			   type, name,
-			   assoc->grp_submit_jobs,
-			   file_opts->grp_submit_jobs);
-	}
-
-	if((file_opts->grp_wall != NO_VAL)
-	   && (assoc->grp_wall != file_opts->grp_wall)) {
-		mod_assoc.grp_wall = file_opts->grp_wall;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpWallDuration",
-			   type, name,
-			   assoc->grp_wall,
-			   file_opts->grp_wall);
-	}
-	
-	if((file_opts->max_cpu_mins_pj != NO_VAL)
-	   && (assoc->max_cpu_mins_pj != file_opts->max_cpu_mins_pj)) {
-		mod_assoc.max_cpu_mins_pj =
-			file_opts->max_cpu_mins_pj;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8llu -> %llu\n",
-			   " Changed MaxCPUMinsPerJob",
-			   type, name,
-			   assoc->max_cpu_mins_pj,
-			   file_opts->max_cpu_mins_pj);
-	}
-
-	if((file_opts->max_cpus_pj != NO_VAL)
-	   && (assoc->max_cpus_pj != file_opts->max_cpus_pj)) {
-		mod_assoc.max_cpus_pj = file_opts->max_cpus_pj;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed MaxCpusPerJob",
-			   type, name,
-			   assoc->max_cpus_pj, 
-			   file_opts->max_cpus_pj);
-	}
-
-	if((file_opts->max_jobs != NO_VAL)
-	   && (assoc->max_jobs != file_opts->max_jobs)) {
-		mod_assoc.max_jobs = file_opts->max_jobs;
-		changed = 1;
-		xstrfmtcat(my_info, 
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed MaxJobs",
-			   type, name,
-			   assoc->max_jobs,
-			   file_opts->max_jobs);
-	}
-
-	if((file_opts->max_nodes_pj != NO_VAL)
-	   && (assoc->max_nodes_pj != file_opts->max_nodes_pj)) {
-		mod_assoc.max_nodes_pj = file_opts->max_nodes_pj;
-		changed = 1;
+
+	if((file_opts->max_nodes_pj != NO_VAL)
+	   && (assoc->max_nodes_pj != file_opts->max_nodes_pj)) {
+		mod_assoc.max_nodes_pj = file_opts->max_nodes_pj;
+		changed = 1;
 		xstrfmtcat(my_info, 
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed MaxNodesPerJob",
@@ -1445,46 +1124,285 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 		}
 	}
 
-	if(changed) {
-		List ret_list = NULL;
-					
-		assoc_cond.cluster_list = list_create(NULL); 
-		list_push(assoc_cond.cluster_list, assoc->cluster);
+	if(changed) {
+		List ret_list = NULL;
+					
+		assoc_cond.cluster_list = list_create(NULL); 
+		list_push(assoc_cond.cluster_list, assoc->cluster);
+
+		assoc_cond.acct_list = list_create(NULL); 
+		list_push(assoc_cond.acct_list, assoc->acct);
+		
+		if(mod_type == MOD_USER) {
+			assoc_cond.user_list = list_create(NULL); 
+			list_push(assoc_cond.user_list, assoc->user);
+			if(assoc->partition) {
+				assoc_cond.partition_list = list_create(NULL); 
+				list_push(assoc_cond.partition_list,
+					  assoc->partition);
+			}
+		}
+			
+		notice_thread_init();
+		ret_list = acct_storage_g_modify_associations(
+			db_conn, my_uid,
+			&assoc_cond, 
+			&mod_assoc);
+		notice_thread_fini();
+					
+		if(mod_assoc.qos_list)
+			list_destroy(mod_assoc.qos_list);
+
+		list_destroy(assoc_cond.cluster_list);
+		list_destroy(assoc_cond.acct_list);
+		if(assoc_cond.user_list)
+			list_destroy(assoc_cond.user_list);
+		if(assoc_cond.partition_list)
+			list_destroy(assoc_cond.partition_list);
+
+/* 		if(ret_list && list_count(ret_list)) { */
+/* 			char *object = NULL; */
+/* 			ListIterator itr = list_iterator_create(ret_list); */
+/* 			printf(" Modified account defaults for " */
+/* 			       "associations...\n"); */
+/* 			while((object = list_next(itr)))  */
+/* 				printf("  %s\n", object); */
+/* 			list_iterator_destroy(itr); */
+/* 		} */
+ 
+		if(ret_list) {
+			printf("%s", my_info);
+			list_destroy(ret_list);
+		} else
+			changed = 0;
+		xfree(my_info);
+	}
+
+	return changed;
+}
+
+static int _mod_cluster(sacctmgr_file_opts_t *file_opts,
+			acct_cluster_rec_t *cluster, char *parent)
+{
+	int changed = 0;
+	char *my_info = NULL;
+	acct_cluster_rec_t mod_cluster;
+	acct_cluster_cond_t cluster_cond;
+	
+	memset(&mod_cluster, 0, sizeof(acct_cluster_rec_t));
+	memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t));
+
+	if(file_opts->classification 
+	   && (file_opts->classification != cluster->classification)) {
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
+			   " Changed Classification", "Cluster",
+			   cluster->name,
+			   get_classification_str(cluster->classification),
+			   get_classification_str(file_opts->classification));
+		mod_cluster.classification = file_opts->classification;
+		changed = 1;
+	}
+
+	if(changed) {
+		List ret_list = NULL;
+					
+		cluster_cond.cluster_list = list_create(NULL);
+		list_append(cluster_cond.cluster_list, cluster->name);
+
+		notice_thread_init();
+		ret_list = acct_storage_g_modify_clusters(db_conn, my_uid,
+							  &cluster_cond, 
+							  &mod_cluster);
+		notice_thread_fini();
+	
+		list_destroy(cluster_cond.cluster_list);
+
+/* 		if(ret_list && list_count(ret_list)) { */
+/* 			char *object = NULL; */
+/* 			ListIterator itr = list_iterator_create(ret_list); */
+/* 			printf(" Modified account defaults for " */
+/* 			       "associations...\n"); */
+/* 			while((object = list_next(itr)))  */
+/* 				printf("  %s\n", object); */
+/* 			list_iterator_destroy(itr); */
+/* 		} */
+ 
+		if(ret_list) {
+			printf("%s", my_info);
+			list_destroy(ret_list);
+		} else
+			changed = 0;
+		xfree(my_info);
+	}
+
+	changed += _mod_assoc(file_opts, cluster->root_assoc,
+			      MOD_CLUSTER, parent);
+
+	return changed;
+}
+
+static int _mod_acct(sacctmgr_file_opts_t *file_opts,
+		     acct_account_rec_t *acct, char *parent)
+{
+	int changed = 0;
+	char *desc = NULL, *org = NULL, *my_info = NULL;
+	acct_account_rec_t mod_acct;
+	acct_account_cond_t acct_cond;
+	acct_association_cond_t assoc_cond;
+	
+	memset(&mod_acct, 0, sizeof(acct_account_rec_t));
+	memset(&acct_cond, 0, sizeof(acct_account_cond_t));
+	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
+
+	if(file_opts->desc) 
+		desc = xstrdup(file_opts->desc);
+
+	if(desc && strcmp(desc, acct->description)) {
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
+			   " Changed description", "Account",
+			   acct->name,
+			   acct->description,
+			   desc);
+		mod_acct.description = desc;
+		changed = 1;
+	} else 
+		xfree(desc);
+				
+	if(file_opts->org)
+		org = xstrdup(file_opts->org);
+
+	if(org && strcmp(org, acct->organization)) {
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
+			   " Changed organization", "Account",
+			   acct->name,
+			   acct->organization,
+			   org);
+		mod_acct.organization = org;
+		changed = 1;
+	} else
+		xfree(org);
+									
+	if(changed) {
+		List ret_list = NULL;
+					
+		assoc_cond.acct_list = list_create(NULL);
+		list_append(assoc_cond.acct_list, acct->name);
+		acct_cond.assoc_cond = &assoc_cond;
+
+		notice_thread_init();
+		ret_list = acct_storage_g_modify_accounts(db_conn, my_uid,
+							  &acct_cond, 
+							  &mod_acct);
+		notice_thread_fini();
+	
+		list_destroy(assoc_cond.acct_list);
+
+/* 		if(ret_list && list_count(ret_list)) { */
+/* 			char *object = NULL; */
+/* 			ListIterator itr = list_iterator_create(ret_list); */
+/* 			printf(" Modified account defaults for " */
+/* 			       "associations...\n"); */
+/* 			while((object = list_next(itr)))  */
+/* 				printf("  %s\n", object); */
+/* 			list_iterator_destroy(itr); */
+/* 		} */
+ 
+		if(ret_list) {
+			printf("%s", my_info);
+			list_destroy(ret_list);
+		} else
+			changed = 0;
+		xfree(my_info);
+	}
+	xfree(desc);
+	xfree(org);
+	return changed;
+}
+
+static int _mod_user(sacctmgr_file_opts_t *file_opts,
+		     acct_user_rec_t *user, char *cluster, char *parent)
+{
+	int rc;
+	int set = 0;
+	int changed = 0;
+	char *def_acct = NULL, *def_wckey = NULL, *my_info = NULL;
+	acct_user_rec_t mod_user;
+	acct_user_cond_t user_cond;
+	List ret_list = NULL;
+	acct_association_cond_t assoc_cond;
+
+	if(!user || !user->name) {
+		fatal(" We need a user name in _mod_user");
+	}
+
+	memset(&mod_user, 0, sizeof(acct_user_rec_t));
+	memset(&user_cond, 0, sizeof(acct_user_cond_t));
+	memset(&assoc_cond, 0, sizeof(acct_association_cond_t));
+				
+	assoc_cond.user_list = list_create(NULL);
+	list_append(assoc_cond.user_list, user->name);
+	user_cond.assoc_cond = &assoc_cond;
 
-		assoc_cond.acct_list = list_create(NULL); 
-		list_push(assoc_cond.acct_list, assoc->acct);
-		
-		if(mod_type == MOD_USER) {
-			assoc_cond.user_list = list_create(NULL); 
-			list_push(assoc_cond.user_list, assoc->user);
-			if(assoc->partition) {
-				assoc_cond.partition_list = list_create(NULL); 
-				list_push(assoc_cond.partition_list,
-					  assoc->partition);
-			}
-		}
-			
+	if(file_opts->def_acct)
+		def_acct = xstrdup(file_opts->def_acct);
+
+	if(def_acct && 
+	   (!user->default_acct || strcmp(def_acct, user->default_acct))) {
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
+			   " Changed Default Account", "User",
+			   user->name,
+			   user->default_acct,
+			   def_acct);
+		mod_user.default_acct = def_acct;
+		changed = 1;
+	}
+									
+	if(file_opts->def_wckey)
+		def_wckey = xstrdup(file_opts->def_wckey);
+
+	if(def_wckey && 
+	   (!user->default_wckey || strcmp(def_wckey, user->default_wckey))) {
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
+			   " Changed Default WCKey", "User",
+			   user->name,
+			   user->default_wckey,
+			   def_wckey);
+		mod_user.default_wckey = def_wckey;
+		changed = 1;
+	}
+								
+	if(user->admin_level != ACCT_ADMIN_NOTSET
+	   && file_opts->admin != ACCT_ADMIN_NOTSET
+	   && user->admin_level != file_opts->admin) {
+		xstrfmtcat(my_info, 
+			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
+			   " Changed Admin Level", "User",
+			   user->name,
+			   acct_admin_level_str(
+				   user->admin_level),
+			   acct_admin_level_str(
+				   file_opts->admin));
+		mod_user.admin_level = file_opts->admin;
+		changed = 1;
+	}
+									
+	if(changed) {
 		notice_thread_init();
-		ret_list = acct_storage_g_modify_associations(
+		ret_list = acct_storage_g_modify_users(
 			db_conn, my_uid,
-			&assoc_cond, 
-			&mod_assoc);
+			&user_cond, 
+			&mod_user);
 		notice_thread_fini();
 					
-		if(mod_assoc.qos_list)
-			list_destroy(mod_assoc.qos_list);
-
-		list_destroy(assoc_cond.cluster_list);
-		list_destroy(assoc_cond.acct_list);
-		if(assoc_cond.user_list)
-			list_destroy(assoc_cond.user_list);
-		if(assoc_cond.partition_list)
-			list_destroy(assoc_cond.partition_list);
-
 /* 		if(ret_list && list_count(ret_list)) { */
 /* 			char *object = NULL; */
 /* 			ListIterator itr = list_iterator_create(ret_list); */
-/* 			printf(" Modified account defaults for " */
+/* 			printf(" Modified user defaults for " */
 /* 			       "associations...\n"); */
 /* 			while((object = list_next(itr)))  */
 /* 				printf("  %s\n", object); */
@@ -1494,12 +1412,168 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 		if(ret_list) {
 			printf("%s", my_info);
 			list_destroy(ret_list);
-		} else
-			changed = 0;
+			set = 1;
+		} 
 		xfree(my_info);
 	}
+	xfree(def_acct);
+	xfree(def_wckey);
 
-	return changed;
+	if((!user->coord_accts || !list_count(user->coord_accts))
+		  && (file_opts->coord_list 
+		      && list_count(file_opts->coord_list))) {
+		ListIterator coord_itr = NULL;
+		char *temp_char = NULL;
+		acct_coord_rec_t *coord = NULL;
+		int first = 1;
+		notice_thread_init();
+		rc = acct_storage_g_add_coord(db_conn, my_uid, 
+					      file_opts->coord_list,
+					      &user_cond);
+		notice_thread_fini();
+
+		user->coord_accts = list_create(destroy_acct_coord_rec);
+		coord_itr = list_iterator_create(file_opts->coord_list);
+		printf(" Making User '%s' coordinator for account(s)",
+		       user->name);
+		while((temp_char = list_next(coord_itr))) {
+			coord = xmalloc(sizeof(acct_coord_rec_t));
+			coord->name = xstrdup(temp_char);
+			coord->direct = 1;
+			list_push(user->coord_accts, coord);
+
+			if(first) {
+				printf(" %s", temp_char);
+				first = 0;
+			} else
+				printf(", %s", temp_char);
+		}
+		list_iterator_destroy(coord_itr);
+		printf("\n");
+		set = 1;
+	} else if((user->coord_accts && list_count(user->coord_accts))
+		  && (file_opts->coord_list 
+		      && list_count(file_opts->coord_list))) {
+		ListIterator coord_itr = NULL;
+		ListIterator char_itr = NULL;
+		char *temp_char = NULL;
+		acct_coord_rec_t *coord = NULL;
+		List add_list = list_create(NULL);
+
+		coord_itr = list_iterator_create(user->coord_accts);
+		char_itr = list_iterator_create(file_opts->coord_list);
+
+		while((temp_char = list_next(char_itr))) {
+			while((coord = list_next(coord_itr))) {
+				if(!coord->direct)
+					continue;
+				if(!strcmp(coord->name, temp_char)) {
+					break;
+				}
+			}
+			if(!coord) {
+				printf(" Making User '%s' coordinator of "
+				       "account '%s'\n",
+				       user->name,
+				       temp_char);
+					
+				list_append(add_list, temp_char);
+			}
+			list_iterator_reset(coord_itr);
+		}
+
+		list_iterator_destroy(char_itr);
+		list_iterator_destroy(coord_itr);
+
+		if(list_count(add_list)) {
+			notice_thread_init();
+			rc = acct_storage_g_add_coord(db_conn, my_uid, 
+						      add_list,
+						      &user_cond);
+			notice_thread_fini();
+			set = 1;
+		}
+		list_destroy(add_list);
+	}
+
+	if((!user->wckey_list || !list_count(user->wckey_list))
+		  && (file_opts->wckey_list 
+		      && list_count(file_opts->wckey_list))) {
+		ListIterator wckey_itr = NULL;
+		char *temp_char = NULL;
+		acct_wckey_rec_t *wckey = NULL;
+		int first = 1;
+
+		user->wckey_list = list_create(destroy_acct_wckey_rec);
+		wckey_itr = list_iterator_create(file_opts->wckey_list);
+		printf(" Adding WCKey(s) ");
+		while((temp_char = list_next(wckey_itr))) {
+			wckey = xmalloc(sizeof(acct_wckey_rec_t));
+			wckey->name = xstrdup(temp_char);
+			wckey->cluster = xstrdup(cluster);
+			wckey->user = xstrdup(user->name);
+			list_push(user->wckey_list, wckey);
+
+			if(first) {
+				printf("'%s'", temp_char);
+				first = 0;
+			} else
+				printf(", '%s'", temp_char);
+		}
+		list_iterator_destroy(wckey_itr);
+		printf(" for user '%s'\n", user->name);
+		set = 1;
+		notice_thread_init();
+		rc = acct_storage_g_add_wckeys(db_conn, my_uid, 
+					       user->wckey_list);
+		notice_thread_fini();
+	} else if((user->wckey_list && list_count(user->wckey_list))
+		  && (file_opts->wckey_list 
+		      && list_count(file_opts->wckey_list))) {
+		ListIterator wckey_itr = NULL;
+		ListIterator char_itr = NULL;
+		char *temp_char = NULL;
+		acct_wckey_rec_t *wckey = NULL;
+		List add_list = list_create(destroy_acct_wckey_rec);
+
+		wckey_itr = list_iterator_create(user->wckey_list);
+		char_itr = list_iterator_create(file_opts->wckey_list);
+
+		while((temp_char = list_next(char_itr))) {
+			while((wckey = list_next(wckey_itr))) {
+				if(!strcmp(wckey->name, temp_char)) 
+					break;
+			}
+			if(!wckey) {
+				printf(" Adding WCKey '%s' to User '%s'\n",
+				       temp_char, user->name);
+				wckey = xmalloc(sizeof(acct_wckey_rec_t));
+				wckey->name = xstrdup(temp_char);
+				wckey->cluster = xstrdup(cluster);
+				wckey->user = xstrdup(user->name);
+					
+				list_append(add_list, wckey);
+			}
+			list_iterator_reset(wckey_itr);
+		}
+
+		list_iterator_destroy(char_itr);
+		list_iterator_destroy(wckey_itr);
+
+		if(list_count(add_list)) {
+			notice_thread_init();
+			rc = acct_storage_g_add_wckeys(db_conn, my_uid, 
+						       add_list);
+			notice_thread_fini();
+			set = 1;
+		}
+		list_transfer(user->wckey_list, add_list);
+		list_destroy(add_list);
+	}
+
+	list_destroy(assoc_cond.user_list);
+
+	return set;
 }
 
 static acct_user_rec_t *_set_user_up(sacctmgr_file_opts_t *file_opts,
@@ -1641,7 +1715,7 @@ static acct_association_rec_t *_set_assoc_up(sacctmgr_file_opts_t *file_opts,
 	}
 
 	
-	assoc->fairshare = file_opts->fairshare;
+	assoc->shares_raw = file_opts->fairshare;
 	
 	assoc->grp_cpu_mins = file_opts->grp_cpu_mins;
 	assoc->grp_cpus = file_opts->grp_cpus;
@@ -1801,8 +1875,8 @@ extern int print_file_add_limits_to_line(char **line,
 	if(!assoc)
 		return SLURM_ERROR;
 
-	if(assoc->fairshare != INFINITE)
-		xstrfmtcat(*line, ":Fairshare=%u", assoc->fairshare);
+	if(assoc->shares_raw != INFINITE)
+		xstrfmtcat(*line, ":Fairshare=%u", assoc->shares_raw);
 		
 	if(assoc->grp_cpu_mins != INFINITE)
 		xstrfmtcat(*line, ":GrpCPUMins=%llu", assoc->grp_cpu_mins);
@@ -2169,7 +2243,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				db_conn, my_uid, NULL);
 
 			if(cluster_name) 
-				info("For cluster %s", cluster_name);
+				printf("For cluster %s\n", cluster_name);
 			
 			if(!(cluster = sacctmgr_find_cluster_from_list(
 				     curr_cluster_list, cluster_name))) {
@@ -2180,6 +2254,15 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				cluster = xmalloc(sizeof(acct_cluster_rec_t));
 				list_append(cluster_list, cluster);
 				cluster->name = xstrdup(cluster_name);
+				if(file_opts->classification) {
+					cluster->classification =
+						file_opts->classification;
+					printf("Classification: %s\n", 
+					       get_classification_str(
+						       cluster->
+						       classification));
+				}
+
 				cluster->root_assoc = _set_assoc_up(
 					file_opts, MOD_CLUSTER,
 					cluster_name, "root");
@@ -2205,8 +2288,8 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				}
 				set = 1;
 			} else {
-				set = _mod_assoc(file_opts, cluster->root_assoc,
-						 MOD_CLUSTER, parent);
+				set = _mod_cluster(file_opts,
+						   cluster, parent);
 			}
 				     
 			_destroy_sacctmgr_file_opts(file_opts);
diff --git a/src/sacctmgr/qos_functions.c b/src/sacctmgr/qos_functions.c
index e1c36fec225e0bb4631f7813657fd937b135aa8d..a936b2dbf510c62d81215ce95989105b460f8d38 100644
--- a/src/sacctmgr/qos_functions.c
+++ b/src/sacctmgr/qos_functions.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2008 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -324,6 +325,14 @@ static int _set_rec(int *start, int argc, char *argv[],
 			if (get_uint(argv[i]+end, &qos->priority,
 			    "Priority") == SLURM_SUCCESS)
 				set = 1;
+		} else if (!strncasecmp (argv[i], "UsageFactor", 
+					 MAX(command_len, 3))) {
+			if(!qos)
+				continue;
+			
+			if (get_double(argv[i]+end, &qos->usage_factor,
+			    "UsageFactor") == SLURM_SUCCESS)
+				set = 1;
 		} else {
 			printf(" Unknown option: %s\n"
 			       " Use keyword 'where' to modify condition\n",
@@ -352,8 +361,14 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 
 	init_acct_qos_rec(start_qos);
 
-	for (i=0; i<argc; i++) 
-		limit_set = _set_rec(&i, argc, argv, name_list, start_qos);
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+
+		limit_set += _set_rec(&i, argc, argv, name_list, start_qos);
+	}
 
 	if(exit_code) {
 		list_destroy(name_list);
@@ -412,6 +427,8 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 
 			qos->priority = start_qos->priority;
 
+			qos->usage_factor = start_qos->usage_factor;
+
 			xstrfmtcat(qos_str, "  %s\n", name);
 			list_append(qos_list, qos);
 		}
@@ -501,9 +518,16 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 		PRINT_MAXN,
 		PRINT_MAXS,
 		PRINT_MAXW,
+		PRINT_UF,
 	};
 
-	_set_cond(&i, argc, argv, qos_cond, format_list);
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		_set_cond(&i, argc, argv, qos_cond, format_list);
+	}
 
 	if(exit_code) {
 		destroy_acct_qos_cond(qos_cond);
@@ -614,7 +638,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			field->print_routine = print_fields_time;
 		} else if(!strncasecmp("Name", object, MAX(command_len, 1))) {
 			field->type = PRINT_NAME;
-			field->name = xstrdup("NAME");
+			field->name = xstrdup("Name");
 			field->len = 10;
 			field->print_routine = print_fields_str;
 		} else if(!strncasecmp("Priority", object,
@@ -623,6 +647,12 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			field->name = xstrdup("Priority");
 			field->len = 10;
 			field->print_routine = print_fields_int;
+		} else if(!strncasecmp("UsageFactor", object,
+				       MAX(command_len, 1))) {
+			field->type = PRINT_UF;
+			field->name = xstrdup("UsageFactor");
+			field->len = 11;
+			field->print_routine = print_fields_double;
 		} else {
 			exit_code=1;
 			fprintf(stderr, "Unknown field '%s'\n", object);
@@ -630,7 +660,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			continue;
 		}
 
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
@@ -750,6 +780,11 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 					field, qos->priority,
 					(curr_inx == field_count));
 				break;
+			case PRINT_UF:
+				field->print_routine(
+					field, qos->usage_factor,
+					(curr_inx == field_count));
+				break;
 			default:
 				field->print_routine(
 					field, NULL,
@@ -784,13 +819,13 @@ extern int sacctmgr_modify_qos(int argc, char *argv[])
 		int command_len = strlen(argv[i]);
 		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))) {
 			i++;
-			cond_set = _set_cond(&i, argc, argv, qos_cond, NULL);
+			cond_set += _set_cond(&i, argc, argv, qos_cond, NULL);
 			      
 		} else if (!strncasecmp (argv[i], "Set", MAX(command_len, 3))) {
 			i++;
-			rec_set = _set_rec(&i, argc, argv, NULL, qos);
+			rec_set += _set_rec(&i, argc, argv, NULL, qos);
 		} else {
-			cond_set = _set_cond(&i, argc, argv, qos_cond, NULL);
+			cond_set += _set_cond(&i, argc, argv, qos_cond, NULL);
 		}
 	}
 
@@ -863,7 +898,15 @@ extern int sacctmgr_delete_qos(int argc, char *argv[])
 	List ret_list = NULL;
 	int set = 0;
 	
-	if(!(set = _set_cond(&i, argc, argv, qos_cond, NULL))) {
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		set += _set_cond(&i, argc, argv, qos_cond, NULL);
+	}
+
+	if(!set) {
 		exit_code=1;
 		fprintf(stderr, 
 			" No conditions given to remove, not executing.\n");
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index 45187b98fdce263e2e19c2a0610c0eb5880bc2d8..1e456d29349843f257ba400fa09c1afc8c427739 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -3,14 +3,15 @@
  *	         provides interface to read, write, update, and configure
  *               accounting.
  *****************************************************************************
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -77,15 +78,15 @@ main (int argc, char *argv[])
 	int option_index;
 	static struct option long_options[] = {
 		{"help",     0, 0, 'h'},
+		{"usage",    0, 0, 'h'},
 		{"immediate",0, 0, 'i'},
+		{"noheader",0, 0, 'n'},
 		{"oneliner", 0, 0, 'o'},
-		{"no_header", 0, 0, 'n'},
 		{"parsable", 0, 0, 'p'},
 		{"parsable2", 0, 0, 'P'},
 		{"quiet",    0, 0, 'q'},
 		{"readonly", 0, 0, 'r'},
 		{"associations", 0, 0, 's'},
-		{"usage",    0, 0, 'h'},
 		{"verbose",  0, 0, 'v'},
 		{"version",  0, 0, 'V'},
 		{NULL,       0, 0, 0}
@@ -185,13 +186,18 @@ main (int argc, char *argv[])
 		exit(1);
 	}
 	xfree(temp);
+
 	/* always do a rollback.  If you don't then if there is an
 	 * error you can not rollback ;)
 	 */
 	errno = 0;
 	db_conn = acct_storage_g_get_connection(false, 0, 1);
 	if(errno != SLURM_SUCCESS) {
-		error("sacctmgr: %m");
+		if((input_field_count == 2) &&
+		   (!strncasecmp(argv[2], "Configuration", strlen(argv[1]))) &&
+		   ((!strncasecmp(argv[1], "list", strlen(argv[0]))) || 
+		    (!strncasecmp(argv[1], "show", strlen(argv[0])))))
+			sacctmgr_list_config(false);
 		exit(1);
 	}
 	my_uid = getuid();
@@ -405,7 +411,8 @@ _process_command (int argc, char *argv[])
 	} else if ((strncasecmp (argv[0], "show", MAX(command_len, 3)) == 0) ||
 		   (strncasecmp (argv[0], "list", MAX(command_len, 3)) == 0)) {
 		_show_it((argc - 1), &argv[1]);
-	} else if (strncasecmp (argv[0], "modify", MAX(command_len, 1)) == 0) {
+	} else if (!strncasecmp (argv[0], "modify", MAX(command_len, 1))
+		   || !strncasecmp (argv[0], "update", MAX(command_len, 1))) {
 		_modify_it((argc - 1), &argv[1]);
 	} else if ((strncasecmp (argv[0], "delete",
 				 MAX(command_len, 3)) == 0) ||
@@ -430,8 +437,10 @@ _process_command (int argc, char *argv[])
 		}		
 		readonly_flag = 1;
 	} else if (strncasecmp (argv[0], "rollup", MAX(command_len, 2)) == 0) {
-		time_t my_time = 0;
-		if (argc > 2) {
+		time_t my_start = 0;
+		time_t my_end = 0;
+		uint16_t archive_data = 0;
+		if (argc > 4) {
 			exit_code = 1;
 			fprintf (stderr,
 				 "too many arguments for %s keyword\n",
@@ -439,8 +448,13 @@ _process_command (int argc, char *argv[])
 		}
 
 		if(argc > 1)
-			my_time = parse_time(argv[1], 1);
-		if(acct_storage_g_roll_usage(db_conn, my_time)
+			my_start = parse_time(argv[1], 1);
+		if(argc > 2)
+			my_end = parse_time(argv[2], 1);
+		if(argc > 3)
+			archive_data = atoi(argv[3]);
+		if(acct_storage_g_roll_usage(db_conn, my_start, 
+					     my_end, archive_data)
 		   == SLURM_SUCCESS) {
 			if(commit_check("Would you like to commit rollup?")) {
 				acct_storage_g_commit(db_conn, 1);
@@ -586,6 +600,9 @@ static void _show_it (int argc, char *argv[])
 	} else if (strncasecmp (argv[0], "Clusters", 
 				MAX(command_len, 1)) == 0) {
 		error_code = sacctmgr_list_cluster((argc - 1), &argv[1]);
+	} else if (strncasecmp (argv[0], "Configuration", 
+				MAX(command_len, 1)) == 0) {
+		error_code = sacctmgr_list_config(true);
 	} else if (strncasecmp (argv[0], "QOS", MAX(command_len, 1)) == 0) {
 		error_code = sacctmgr_list_qos((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "Transactions", 
@@ -714,7 +731,7 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
     Valid <OPTION> values are:                                             \n\
      -h or --help: equivalent to \"help\" command                          \n\
      -i or --immediate: commit changes immediately                         \n\
-     -n or --no_header: no header will be added to the beginning of output \n\
+     -n or --noheader: no header will be added to the beginning of output  \n\
      -o or --oneliner: equivalent to \"oneliner\" command                  \n\
      -p or --parsable: output will be '|' delimited with a '|' at the end  \n\
      -P or --parsable2: output will be '|' delimited without a '|' at the end\n\
@@ -758,17 +775,17 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
      oneliner                 report output one record per line.           \n\
      parsable                 output will be | delimited with an ending '|'\n\
      parsable2                output will be | delimited without an ending '|'\n\
-     readonly                 makes it so no modification can happen.      \n\
      quiet                    print no messages other than error messages. \n\
      quit                     terminate this command.                      \n\
+     readonly                 makes it so no modification can happen.      \n\
      show                     same as list                                 \n\
      verbose                  enable detailed logging.                     \n\
      version                  display tool version number.                 \n\
      !!                       Repeat the last command entered.             \n\
                                                                            \n\
   <ENTITY> may be \"account\", \"association\", \"cluster\",               \n\
-                  \"coordinator\", \"qos\", \"transaction\", \"user\",     \n\
-                  or \"wckey\"                                             \n\
+                  \"configuration\", \"coordinator\", \"qos\",             \n\
+                  \"transaction\", \"user\",or \"wckey\"                   \n\
                                                                            \n\
   <SPECS> are different for each command entity pair.                      \n\
        list account       - Clusters=, Descriptions=, Format=, Names=,     \n\
@@ -789,19 +806,18 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
        delete account     - Clusters=, Descriptions=, Names=,              \n\
                             Organizations=, and Parents=                   \n\
                                                                            \n\
-       list associations  - Accounts=, Clusters=, Format=, IDs=,            \n\
+       list associations  - Accounts=, Clusters=, Format=, IDs=,           \n\
                             Partitions=, Parent=, Tree, Users=,            \n\
                             WithSubAccounts, WithDeleted, WOPInfo,         \n\
                             and WOPLimits                                  \n\
                                                                            \n\
        list cluster       - Format=, Names=                                \n\
-       add cluster        - Fairshare=, GrpCPUMins=, GrpCPUs=, GrpJobs=,   \n\
-                            GrpNodes=, GrpSubmitJob=, GrpWall=, MaxCPUMins=\n\
+       add cluster        - Fairshare=, GrpCPUs=, GrpJobs=,                \n\
+                            GrpNodes=, GrpSubmitJob=, MaxCPUMins=          \n\
                             MaxJobs=, MaxNodes=, MaxWall=, and Name=       \n\
-       modify cluster     - (set options) Fairshare=, GrpCPUMins=,         \n\
+       modify cluster     - (set options) Fairshare=,                      \n\
                             GrpCPUs=, GrpJobs=, GrpNodes=, GrpSubmitJob=,  \n\
-                            GrpWall=, MaxCPUMins=, MaxJobs=, MaxNodes=,    \n\
-                            and MaxWall=                                   \n\
+                            MaxCPUMins=, MaxJobs=, MaxNodes=, and MaxWall= \n\
                             (where options) Names=                         \n\
        delete cluster     - Names=                                         \n\
                                                                            \n\
@@ -841,8 +857,9 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
        list wckey         - Clusters=, End=, Format=, IDs=, Names=,        \n\
                             Start=, User=, and WCKeys=                     \n\
                                                                            \n\
-       archive dump       - Directory=, Jobs, PurgeJobsBefore=,            \n\
-                            PurgeStepsBefore=, Script=, and Steps          \n\
+       archive dump       - Directory=, Events, Jobs, PurgeEventMonths=,   \n\
+                            PurgeJobMonths=, PurgeStepMonths=,             \n\
+                            PurgeSuspendMonths=, Script=, Steps and Suspend\n\
                                                                            \n\
        archive load       - File=, or Insert=                              \n\
                                                                            \n\
@@ -858,10 +875,11 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                             ParentID, ParentName, Partition, RawQOS, RGT,  \n\
                             User                                           \n\
                                                                            \n\
-       Cluster            - Cluster, ControlHost, ControlPort, Fairshare   \n\
-                            GrpCPUMins, GrpCPUs, GrpJobs, GrpNodes,        \n\
-                            GrpSubmitJob, GrpWall, MaxCPUs, MaxCPUMins,    \n\
-                            MaxJobs, MaxNodes, MaxSubmitJobs, MaxWall      \n\
+       Cluster            - Cluster, ControlHost, ControlPort, CpuCount,   \n\
+                            Fairshare, GrpCPUs, GrpJobs,                   \n\
+                            GrpNodes, GrpSubmitJob, MaxCPUs,               \n\
+                            MaxCPUMins, MaxJobs, MaxNodes, MaxSubmitJobs,  \n\
+                            MaxWall, NodeCount, NodeNames                  \n\
                                                                            \n\
        QOS                - Description, ID, Name                          \n\
                                                                            \n\
diff --git a/src/sacctmgr/sacctmgr.h b/src/sacctmgr/sacctmgr.h
index 74b84f1163a8c5976289df4ce0c3d99492e66ee7..b88cb7008a3d243a364bcfd730c4cf70ea69f859 100644
--- a/src/sacctmgr/sacctmgr.h
+++ b/src/sacctmgr/sacctmgr.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  sacctmgr.h - definitions for all sacctmgr modules.
  *****************************************************************************
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -109,6 +110,7 @@ extern int sacctmgr_list_association(int argc, char *argv[]);
 extern int sacctmgr_list_user(int argc, char *argv[]);
 extern int sacctmgr_list_account(int argc, char *argv[]);
 extern int sacctmgr_list_cluster(int argc, char *argv[]);
+extern int sacctmgr_list_config(bool have_db_conn);
 extern int sacctmgr_list_qos(int argc, char *argv[]);
 extern int sacctmgr_list_wckey(int argc, char *argv[]);
 
@@ -138,6 +140,7 @@ extern int commit_check(char *warning);
 extern int get_uint(char *in_value, uint32_t *out_value, char *type);
 extern int get_uint16(char *in_value, uint16_t *out_value, char *type);
 extern int get_uint64(char *in_value, uint64_t *out_value, char *type);
+extern int get_double(char *in_value, double *out_value, char *type);
 extern int addto_qos_char_list(List char_list, List qos_list, char *names, 
 			       int option);
 extern int addto_action_char_list(List char_list, char *names);
diff --git a/src/sacctmgr/txn_functions.c b/src/sacctmgr/txn_functions.c
index bc978aac0e09bf1bdd82ca91c4143cc7872a52d1..b92c8526d10ad15f902567bf46f68c8a4939ed15 100644
--- a/src/sacctmgr/txn_functions.c
+++ b/src/sacctmgr/txn_functions.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -190,7 +191,13 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 		PRINT_WHERE
 	};
 
-	_set_cond(&i, argc, argv, txn_cond, format_list);
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		_set_cond(&i, argc, argv, txn_cond, format_list);
+	}
 
 	if(exit_code) {
 		destroy_acct_txn_cond(txn_cond);
@@ -264,7 +271,7 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 				       MAX(command_len, 1))) {
 			field->type = PRINT_TS;
 			field->name = xstrdup("Time");
-			field->len = 15;
+			field->len = 19;
 			field->print_routine = print_fields_date;
 		} else if(!strncasecmp("Users", object, MAX(command_len, 4))) {
 			field->type = PRINT_USER;
@@ -283,7 +290,7 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 			continue;
 		}
 
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index 3b3835180f5e2a6b38ec0d9785d8ab8ddd5c9b62..48dbf57acfe627bbe455686c44b24fab52fa1051 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -379,7 +380,7 @@ static int _set_rec(int *start, int argc, char *argv[],
 					 MAX(command_len, 1))) {
 			if(!assoc)
 				continue;
-			if (get_uint(argv[i]+end, &assoc->fairshare, 
+			if (get_uint(argv[i]+end, &assoc->shares_raw, 
 				     "FairShare") == SLURM_SUCCESS)
 				a_set = 1;
 		} else if (!strncasecmp (argv[i], "GrpCPUMins",
@@ -530,8 +531,9 @@ static int _set_rec(int *start, int argc, char *argv[],
 static int _check_coord_request(acct_user_cond_t *user_cond, bool check)
 {
 	ListIterator itr = NULL, itr2 = NULL;
-	char *name = NULL, *name2 = NULL;
-
+	char *name = NULL;
+	acct_user_rec_t *user_rec = NULL;
+	acct_account_rec_t *acct_rec = NULL;
 	acct_account_cond_t account_cond;
 	List local_acct_list = NULL;
 	List local_user_list = NULL;
@@ -568,20 +570,20 @@ static int _check_coord_request(acct_user_cond_t *user_cond, bool check)
 		return SLURM_ERROR;
 	}
 
-	if(user_cond->assoc_cond->acct_list && 
-	   (list_count(local_acct_list) != 
+	if(user_cond->assoc_cond->acct_list &&
+	   (list_count(local_acct_list) !=
 	    list_count(user_cond->assoc_cond->acct_list))) {
 		
 		itr = list_iterator_create(user_cond->assoc_cond->acct_list);
 		itr2 = list_iterator_create(local_acct_list);
 		
 		while((name = list_next(itr))) {
-			while((name2 = list_next(itr2))) {
-				if(!strcmp(name, name2)) 
+			while((acct_rec = list_next(itr2))) {
+				if(!strcmp(name, acct_rec->name)) 
 					break;
 			}
 			list_iterator_reset(itr2);
-			if(!name2) {
+			if(!acct_rec) {
 				fprintf(stderr, 
 					" You specified a non-existant "
 					"account '%s'.\n", name); 
@@ -604,19 +606,19 @@ static int _check_coord_request(acct_user_cond_t *user_cond, bool check)
 	}
 
 	if(user_cond->assoc_cond->user_list &&
-	   (list_count(local_user_list) != 
+	   (list_count(local_user_list) !=
 	    list_count(user_cond->assoc_cond->user_list))) {
 		
 		itr = list_iterator_create(user_cond->assoc_cond->user_list);
 		itr2 = list_iterator_create(local_user_list);
 		
 		while((name = list_next(itr))) {
-			while((name2 = list_next(itr2))) {
-				if(!strcmp(name, name2)) 
+			while((user_rec = list_next(itr2))) {
+				if(!strcmp(name, user_rec->name)) 
 					break;
 			}
 			list_iterator_reset(itr2);
-			if(!name2) {
+			if(!user_rec) {
 				fprintf(stderr, 
 					" You specified a non-existant "
 					"user '%s'.\n", name); 
@@ -750,7 +752,7 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 					 MAX(command_len, 1))
 			   || !strncasecmp (argv[i], "Shares",
 					 MAX(command_len, 1))) {
-			if (get_uint(argv[i]+end, &start_assoc.fairshare, 
+			if (get_uint(argv[i]+end, &start_assoc.shares_raw, 
 			    "FairShare") == SLURM_SUCCESS)
 				limit_set = 1;
 		} else if (!strncasecmp (argv[i], "GrpCPUMins",
@@ -1149,8 +1151,8 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 					assoc->cluster = xstrdup(cluster);
 					assoc->partition = xstrdup(partition);
 					
-					assoc->fairshare = 
-						start_assoc.fairshare;
+					assoc->shares_raw = 
+						start_assoc.shares_raw;
 
 					assoc->grp_cpu_mins = 
 						start_assoc.grp_cpu_mins;
@@ -1207,7 +1209,7 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 				assoc->acct = xstrdup(account);
 				assoc->cluster = xstrdup(cluster);
 
-				assoc->fairshare = start_assoc.fairshare;
+				assoc->shares_raw = start_assoc.shares_raw;
 
 				assoc->grp_cpu_mins = 
 					start_assoc.grp_cpu_mins;
@@ -1391,7 +1393,11 @@ extern int sacctmgr_add_coord(int argc, char *argv[])
 	ListIterator itr = NULL;
 
 	for (i=0; i<argc; i++) {
-		cond_set = _set_cond(&i, argc, argv, user_cond, NULL);
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		cond_set += _set_cond(&i, argc, argv, user_cond, NULL);
 	}
 
 	if(exit_code) {
@@ -1499,7 +1505,13 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 
 	user_cond->with_assocs = with_assoc_flag;
 
-	set = _set_cond(&i, argc, argv, user_cond, format_list);
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		set += _set_cond(&i, argc, argv, user_cond, format_list);
+	}
 
 	if(exit_code) {
 		destroy_acct_user_cond(user_cond);
@@ -1711,7 +1723,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 			continue;
 		}
 
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
@@ -1801,7 +1813,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 					case PRINT_FAIRSHARE:
 						field->print_routine(
 							field,
-							assoc->fairshare,
+							assoc->shares_raw,
 							(curr_inx == 
 							 field_count));
 						break;
@@ -2066,13 +2078,13 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 		int command_len = strlen(argv[i]);
 		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))) {
 			i++;
-			cond_set = _set_cond(&i, argc, argv, user_cond, NULL);
+			cond_set += _set_cond(&i, argc, argv, user_cond, NULL);
 			      
 		} else if (!strncasecmp (argv[i], "Set", MAX(command_len, 3))) {
 			i++;
-			rec_set = _set_rec(&i, argc, argv, user, assoc);
+			rec_set += _set_rec(&i, argc, argv, user, assoc);
 		} else {
-			cond_set = _set_cond(&i, argc, argv, user_cond, NULL);
+			cond_set += _set_cond(&i, argc, argv, user_cond, NULL);
 		}
 	}
 
@@ -2212,7 +2224,15 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 	List ret_list = NULL;
 	int set = 0;
 
-	if(!(set = _set_cond(&i, argc, argv, user_cond, NULL))) {
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		set += _set_cond(&i, argc, argv, user_cond, NULL);
+	}
+
+	if(!set) {
 		exit_code=1;
 		fprintf(stderr, 
 			" No conditions given to remove, not executing.\n");
@@ -2284,7 +2304,11 @@ extern int sacctmgr_delete_coord(int argc, char *argv[])
 
 
 	for (i=0; i<argc; i++) {
-		cond_set = _set_cond(&i, argc, argv, user_cond, NULL);
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		cond_set += _set_cond(&i, argc, argv, user_cond, NULL);
 	}
 
 	if(exit_code) {
diff --git a/src/sacctmgr/wckey_functions.c b/src/sacctmgr/wckey_functions.c
index 40e9901b11755eb1c03dd90e5f971e58ec54789b..b3403ef8c694ab4767901c89b5d62b7b9be0f9c2 100644
--- a/src/sacctmgr/wckey_functions.c
+++ b/src/sacctmgr/wckey_functions.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -151,7 +152,7 @@ extern int sacctmgr_list_wckey(int argc, char *argv[])
 	int rc = SLURM_SUCCESS;
 	acct_wckey_cond_t *wckey_cond = xmalloc(sizeof(acct_wckey_cond_t));
 	List wckey_list = NULL;
-	int i=0, set=0;
+	int i=0;
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
 	acct_wckey_rec_t *wckey = NULL;
@@ -170,7 +171,13 @@ extern int sacctmgr_list_wckey(int argc, char *argv[])
 		PRINT_USER
 	};
 
-	set = _set_cond(&i, argc, argv, wckey_cond, format_list);
+	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp (argv[i], "Set", MAX(command_len, 3))) 
+			i++;		
+		_set_cond(&i, argc, argv, wckey_cond, format_list);
+	}
 
 	if(exit_code) {
 		destroy_acct_wckey_cond(wckey_cond);
@@ -228,7 +235,7 @@ extern int sacctmgr_list_wckey(int argc, char *argv[])
 			continue;
 		}
 
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
diff --git a/src/salloc/Makefile.in b/src/salloc/Makefile.in
index a89476413ccade14f54adc875842a2278b87ed50..e0d34e14245468cb74085ea0751ef9c3af44cb74 100644
--- a/src/salloc/Makefile.in
+++ b/src/salloc/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -105,6 +109,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/salloc/opt.c b/src/salloc/opt.c
index 14ea9975bbe742ca2049b022361b37359bc7b7e8..fca9cffeeb50e1aa79cefbf3f2cc7acfb8175336 100644
--- a/src/salloc/opt.c
+++ b/src/salloc/opt.c
@@ -2,13 +2,14 @@
  *  opt.c - options processing for salloc
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -61,13 +62,15 @@
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/parse_time.h"
+#include "src/common/plugstack.h"
 #include "src/common/proc_args.h"
+#include "src/common/read_config.h" /* contains getnodename() */
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_resource_info.h"
+#include "src/common/slurm_rlimits_info.h"
 #include "src/common/uid.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/common/slurm_rlimits_info.h"
-#include "src/common/read_config.h" /* contains getnodename() */
 
 #include "src/salloc/salloc.h"
 #include "src/salloc/opt.h"
@@ -77,21 +80,25 @@
 #define OPT_INT         0x01
 #define OPT_STRING      0x02
 #define OPT_DEBUG       0x03
-#define OPT_NODES       0x05
-#define OPT_BOOL        0x06
-#define OPT_CORE        0x07
-#define OPT_CONN_TYPE	0x08
-#define OPT_NO_ROTATE	0x0a
-#define OPT_GEOMETRY	0x0b
-#define OPT_BELL        0x0f
-#define OPT_NO_BELL     0x10
-#define OPT_JOBID       0x11
-#define OPT_EXCLUSIVE   0x12
-#define OPT_OVERCOMMIT  0x13
-#define OPT_ACCTG_FREQ  0x14
+#define OPT_NODES       0x04
+#define OPT_BOOL        0x05
+#define OPT_CORE        0x06
+#define OPT_CONN_TYPE	0x07
+#define OPT_NO_ROTATE	0x08
+#define OPT_GEOMETRY	0x09
+#define OPT_BELL        0x0a
+#define OPT_NO_BELL     0x0b
+#define OPT_JOBID       0x0c
+#define OPT_EXCLUSIVE   0x0d
+#define OPT_OVERCOMMIT  0x0e
+#define OPT_ACCTG_FREQ  0x0f
+#define OPT_CPU_BIND    0x10
+#define OPT_MEM_BIND    0x11
 #define OPT_WCKEY       0x15
 
 /* generic getopt_long flags, integers and *not* valid characters */
+#define LONG_OPT_CPU_BIND    0x101
+#define LONG_OPT_MEM_BIND    0x102
 #define LONG_OPT_JOBID       0x105
 #define LONG_OPT_TMP         0x106
 #define LONG_OPT_MEM         0x107
@@ -130,6 +137,7 @@
 #define LONG_OPT_HINT            0x13b
 #define LONG_OPT_ACCTG_FREQ      0x13c
 #define LONG_OPT_WCKEY           0x13d
+#define LONG_OPT_RESERVATION     0x13e
 
 /*---- global variables, defined in opt.h ----*/
 opt_t opt;
@@ -238,7 +246,10 @@ static void _opt_default()
 	opt.ntasks_per_node      = NO_VAL; /* ntask max limits */
 	opt.ntasks_per_socket    = NO_VAL;
 	opt.ntasks_per_core      = NO_VAL;
-	opt.cpu_bind_type = 0;		/* local dummy variable for now */
+	opt.cpu_bind_type = 0;
+	opt.cpu_bind = NULL;
+	opt.mem_bind_type = 0;
+	opt.mem_bind = NULL;
 	opt.time_limit = NO_VAL;
 	opt.time_limit_str = NULL;
 	opt.partition = NULL;
@@ -293,7 +304,8 @@ static void _opt_default()
 	opt.no_shell	    = false;
 	opt.get_user_env_time = -1;
 	opt.get_user_env_mode = -1;
-	opt.wckey = NULL;
+	opt.reservation     = NULL;
+	opt.wckey           = NULL;
 }
 
 /*---[ env var processing ]-----------------------------------------------*/
@@ -316,21 +328,23 @@ struct env_vars {
 
 env_vars_t env_vars[] = {
   {"SALLOC_ACCOUNT",       OPT_STRING,     &opt.account,       NULL           },
+  {"SALLOC_ACCTG_FREQ",    OPT_INT,        &opt.acctg_freq,    NULL           },
+  {"SALLOC_BELL",          OPT_BELL,       NULL,               NULL           },
   {"SALLOC_CONN_TYPE",     OPT_CONN_TYPE,  NULL,               NULL           },
+  {"SALLOC_CPU_BIND",      OPT_CPU_BIND,   NULL,               NULL           },
   {"SALLOC_DEBUG",         OPT_DEBUG,      NULL,               NULL           },
+  {"SALLOC_EXCLUSIVE",     OPT_EXCLUSIVE,  NULL,               NULL           },
   {"SALLOC_GEOMETRY",      OPT_GEOMETRY,   NULL,               NULL           },
   {"SALLOC_IMMEDIATE",     OPT_BOOL,       &opt.immediate,     NULL           },
   {"SALLOC_JOBID",         OPT_JOBID,      NULL,               NULL           },
+  {"SALLOC_MEM_BIND",      OPT_MEM_BIND,   NULL,               NULL           },
+  {"SALLOC_NETWORK",       OPT_STRING    , &opt.network,       NULL           },
+  {"SALLOC_NO_BELL",       OPT_NO_BELL,    NULL,               NULL           },
   {"SALLOC_NO_ROTATE",     OPT_NO_ROTATE,  NULL,               NULL           },
+  {"SALLOC_OVERCOMMIT",    OPT_OVERCOMMIT, NULL,               NULL           },
   {"SALLOC_PARTITION",     OPT_STRING,     &opt.partition,     NULL           },
   {"SALLOC_TIMELIMIT",     OPT_STRING,     &opt.time_limit_str,NULL           },
   {"SALLOC_WAIT",          OPT_INT,        &opt.max_wait,      NULL           },
-  {"SALLOC_BELL",          OPT_BELL,       NULL,               NULL           },
-  {"SALLOC_NO_BELL",       OPT_NO_BELL,    NULL,               NULL           },
-  {"SALLOC_EXCLUSIVE",     OPT_EXCLUSIVE,  NULL,               NULL           },
-  {"SALLOC_OVERCOMMIT",    OPT_OVERCOMMIT, NULL,               NULL           },
-  {"SALLOC_ACCTG_FREQ",    OPT_INT,        &opt.acctg_freq,    NULL           },
-  {"SALLOC_NETWORK",       OPT_STRING,     &opt.network,       NULL           },
   {"SALLOC_WCKEY",         OPT_STRING,     &opt.wckey,         NULL           },
   {NULL, 0, NULL, NULL}
 };
@@ -443,6 +457,16 @@ _process_env_var(env_vars_t *e, const char *val)
 	case OPT_OVERCOMMIT:
 		opt.overcommit = true;
 		break;
+	case OPT_CPU_BIND:
+		if (slurm_verify_cpu_bind(val, &opt.cpu_bind,
+					  &opt.cpu_bind_type))
+			exit(1);
+		break;
+	case OPT_MEM_BIND:
+		if (slurm_verify_mem_bind(val, &opt.mem_bind,
+					  &opt.mem_bind_type))
+			exit(1);
+		break;
 	case OPT_WCKEY:
 		xfree(opt.wckey);
 		opt.wckey = xstrdup(optarg);
@@ -502,7 +526,7 @@ void set_options(const int argc, char **argv)
 		{"overcommit",    no_argument,       0, 'O'},
 		{"partition",     required_argument, 0, 'p'},
 		{"dependency",    required_argument, 0, 'P'},
-		{"quiet",         no_argument,       0, 'q'},
+		{"quiet",         no_argument,       0, 'Q'},
 		{"no-rotate",     no_argument,       0, 'R'},
 		{"share",         no_argument,       0, 's'},
 		{"time",          required_argument, 0, 't'},
@@ -520,8 +544,6 @@ void set_options(const int argc, char **argv)
 		{"mincores",      required_argument, 0, LONG_OPT_MINCORES},
 		{"minthreads",    required_argument, 0, LONG_OPT_MINTHREADS},
 		{"mem",           required_argument, 0, LONG_OPT_MEM},
-		{"job-mem",       required_argument, 0, LONG_OPT_MEM_PER_CPU},
-		{"task-mem",      required_argument, 0, LONG_OPT_MEM_PER_CPU},
 		{"mem-per-cpu",   required_argument, 0, LONG_OPT_MEM_PER_CPU},
 		{"hint",          required_argument, 0, LONG_OPT_HINT},
 		{"sockets-per-node", required_argument, 0, LONG_OPT_SOCKETSPERNODE},
@@ -554,15 +576,23 @@ void set_options(const int argc, char **argv)
 		{"no-shell",      no_argument,       0, LONG_OPT_NOSHELL},
 		{"get-user-env",  optional_argument, 0, LONG_OPT_GET_USER_ENV},
 		{"network",       required_argument, 0, LONG_OPT_NETWORK},
+		{"cpu_bind",      required_argument, 0, LONG_OPT_CPU_BIND},
+		{"mem_bind",      required_argument, 0, LONG_OPT_MEM_BIND},
 		{"wckey",         required_argument, 0, LONG_OPT_WCKEY},
+		{"reservation",   required_argument, 0, LONG_OPT_RESERVATION},
 		{NULL,            0,                 0, 0}
 	};
-	char *opt_string = "+a:B:c:C:d:D:F:g:hHIJ:kK:L:m:n:N:Op:P:qR:st:uU:vVw:W:x:";
+	char *opt_string = "+B:c:C:d:D:F:g:hHIJ:kK::L:m:n:N:Op:P:QRst:uU:vVw:W:x:";
+
+	struct option *optz = spank_option_table_create(long_options);
+
+	if (!optz)
+		fatal("Unable to create options table");
 
 	opt.progname = xbasename(argv[0]);
 	optind = 0;		
 	while((opt_char = getopt_long(argc, argv, opt_string,
-				      long_options, &option_index)) != -1) {
+				      optz, &option_index)) != -1) {
 		switch (opt_char) {
 			
 		case '?':
@@ -680,7 +710,7 @@ void set_options(const int argc, char **argv)
 			xfree(opt.dependency);
 			opt.dependency = xstrdup(optarg);
 			break;
-		case 'q':
+		case 'Q':
 			opt.quiet++;
 			break;
 		case 'R':
@@ -931,15 +961,32 @@ void set_options(const int argc, char **argv)
 			xfree(opt.network);
 			opt.network = xstrdup(optarg);
 			break;
+		case LONG_OPT_CPU_BIND:
+			if (slurm_verify_cpu_bind(optarg, &opt.cpu_bind,
+						  &opt.cpu_bind_type))
+				exit(1);
+			break;
+		case LONG_OPT_MEM_BIND:
+			if (slurm_verify_mem_bind(optarg, &opt.mem_bind,
+						  &opt.mem_bind_type))
+				exit(1);
+			break;
 		case LONG_OPT_WCKEY:
 			xfree(opt.wckey);
 			opt.wckey = xstrdup(optarg);
 			break;
+		case LONG_OPT_RESERVATION:
+			xfree(opt.reservation);
+			opt.reservation = xstrdup(optarg);
+			break;
 		default:
-			fatal("Unrecognized command line parameter %c",
-			      opt_char);
+			if (spank_process_option(opt_char, optarg) < 0)
+			    fatal("Unrecognized command line parameter %c",
+				    opt_char);
 		}
 	}
+
+	spank_option_table_destroy(optz);
 }
 
 static void _proc_get_user_env(char *optarg)
@@ -1035,7 +1082,7 @@ static bool _opt_verify(void)
 	bool verified = true;
 
 	if (opt.quiet && opt.verbose) {
-		error ("don't specify both --verbose (-v) and --quiet (-q)");
+		error ("don't specify both --verbose (-v) and --quiet (-Q)");
 		verified = false;
 	}
 
@@ -1056,21 +1103,51 @@ static bool _opt_verify(void)
 
 	/* check for realistic arguments */
 	if (opt.nprocs <= 0) {
-		error("%s: invalid number of processes (-n %d)",
-		      opt.progname, opt.nprocs);
+		error("invalid number of processes (-n %d)",
+		      opt.nprocs);
 		verified = false;
 	}
 
 	if (opt.cpus_per_task <= 0) {
-		error("%s: invalid number of cpus per task (-c %d)\n",
-		      opt.progname, opt.cpus_per_task);
+		error("invalid number of cpus per task (-c %d)\n",
+		      opt.cpus_per_task);
 		verified = false;
 	}
 
 	if ((opt.min_nodes < 0) || (opt.max_nodes < 0) || 
 	    (opt.max_nodes && (opt.min_nodes > opt.max_nodes))) {
-		error("%s: invalid number of nodes (-N %d-%d)\n",
-		      opt.progname, opt.min_nodes, opt.max_nodes);
+		error("invalid number of nodes (-N %d-%d)\n",
+		      opt.min_nodes, opt.max_nodes);
+		verified = false;
+	}
+
+#ifdef HAVE_BGL
+	if (opt.blrtsimage && strchr(opt.blrtsimage, ' ')) {
+		error("invalid BlrtsImage given '%s'", opt.blrtsimage);
+		verified = false;
+	}
+#endif
+
+	if (opt.linuximage && strchr(opt.linuximage, ' ')) {
+#ifdef HAVE_BGL
+		error("invalid LinuxImage given '%s'", opt.linuximage);
+#else
+		error("invalid CnloadImage given '%s'", opt.linuximage);
+#endif
+		verified = false;
+	}
+
+	if (opt.mloaderimage && strchr(opt.mloaderimage, ' ')) {
+		error("invalid MloaderImage given '%s'", opt.mloaderimage);
+		verified = false;
+	}
+
+	if (opt.ramdiskimage && strchr(opt.ramdiskimage, ' ')) {
+#ifdef HAVE_BGL
+		error("invalid RamDiskImage given '%s'", opt.ramdiskimage);
+#else
+		error("invalid IoloadImage given '%s'", opt.ramdiskimage);
+#endif
 		verified = false;
 	}
 
@@ -1197,6 +1274,35 @@ static bool _opt_verify(void)
 		opt.network = "us,sn_all,bulk_xfer";
 #endif
 
+	if (slurm_verify_cpu_bind(NULL, &opt.cpu_bind,
+				  &opt.cpu_bind_type))
+		exit(1);
+	if (opt.cpu_bind_type && (getenv("SLURM_CPU_BIND") == NULL)) {
+		char tmp[64];
+		slurm_sprint_cpu_bind_type(tmp, opt.cpu_bind_type);
+		if (opt.cpu_bind) {
+			setenvf(NULL, "SLURM_CPU_BIND", "%s:%s", 
+				tmp, opt.cpu_bind);
+		} else {
+			setenvf(NULL, "SLURM_CPU_BIND", "%s", tmp);
+		}
+	}
+	if (opt.mem_bind_type && (getenv("SLURM_MEM_BIND") == NULL)) {
+		char tmp[64];
+		slurm_sprint_mem_bind_type(tmp, opt.mem_bind_type);
+		if (opt.mem_bind) {
+			setenvf(NULL, "SLURM_MEM_BIND", "%s:%s", 
+				tmp, opt.mem_bind);
+		} else {
+			setenvf(NULL, "SLURM_MEM_BIND", "%s", tmp);
+		}
+	}
+	if ((opt.ntasks_per_node != NO_VAL) && 
+	    (getenv("SLURM_NTASKS_PER_NODE") == NULL)) {
+		setenvf(NULL, "SLURM_NTASKS_PER_NODE", "%d", 
+			opt.ntasks_per_node);
+	}
+
 	return verified;
 }
 
@@ -1324,6 +1430,7 @@ static void _opt_list()
 	info("partition      : %s",
 		opt.partition == NULL ? "default" : opt.partition);
 	info("job name       : `%s'", opt.job_name);
+	info("reservation    : `%s'", opt.reservation);
 	info("wckey          : `%s'", opt.wckey);
 	if (opt.jobid != NO_VAL)
 		info("jobid          : %u", opt.jobid);
@@ -1390,6 +1497,10 @@ static void _opt_list()
 	info("ntasks-per-socket : %d", opt.ntasks_per_socket);
 	info("ntasks-per-core   : %d", opt.ntasks_per_core);
 	info("plane_size        : %u", opt.plane_size);
+	info("cpu_bind          : %s", 
+	     opt.cpu_bind == NULL ? "default" : opt.cpu_bind);
+	info("mem_bind          : %s",
+	     opt.mem_bind == NULL ? "default" : opt.mem_bind);
 	str = print_commandline(command_argc, command_argv);
 	info("user command   : `%s'", str);
 	xfree(str);
@@ -1421,6 +1532,7 @@ static void _usage(void)
 "              [--bell] [--no-bell] [--kill-command[=signal]]\n"
 "              [--nodefile=file] [--nodelist=hosts] [--exclude=hosts]\n"
 "              [--network=type] [--mem-per-cpu=MB]\n"
+"              [--cpu_bind=...] [--mem_bind=...] [--reservation=name]\n"
 "              [executable [args...]]\n");
 }
 
@@ -1432,51 +1544,53 @@ static void _help(void)
 "Usage: salloc [OPTIONS...] [executable [args...]]\n"
 "\n"
 "Parallel run options:\n"
-"  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
-"  -n, --tasks=N               number of processors required\n"
+"      --begin=time            defer job until HH:MM DD/MM/YY\n"
+"      --bell                  ring the terminal bell when the job is allocated\n"
 "  -c, --cpus-per-task=ncpus   number of cpus required per task\n"
-"      --ntasks-per-node=n     number of tasks to invoke on each node\n"
-"  -p, --partition=partition   partition requested\n"
-"  -H, --hold                  submit job in held state\n"
-"  -t, --time=minutes          time limit\n"
+"      --comment=name          arbitrary comment\n"
 "  -D, --chdir=path            change working directory\n"
+"      --get-user-env          used by Moab.  See srun man page.\n"
+"      --gid=group_id          group ID to run job as (user root only)\n"
+"  -H, --hold                  submit job in held state\n"
 "  -I, --immediate             exit if resources are not immediately available\n"
+"      --jobid=id              specify jobid to use\n"
+"  -J, --job-name=jobname      name of job\n"
 "  -k, --no-kill               do not kill job on node failure\n"
 "  -K, --kill-command[=signal] signal to send terminating job\n"
-"  -O, --overcommit            overcommit resources\n"
-"  -s, --share                 share nodes with other jobs\n"
+"  -L, --licenses=names        required license, comma separated\n"
 "  -m, --distribution=type     distribution method for processes to nodes\n"
 "                              (type = block|cyclic|arbitrary)\n"
-"  -J, --job-name=jobname      name of job\n"
-"      --jobid=id              specify jobid to use\n"
-"  -W, --wait=sec              seconds to wait for allocation if not\n"
-"                              immediately available\n"
-"  -v, --verbose               verbose mode (multiple -v's increase verbosity)\n"
-"  -q, --quiet                 quiet mode (suppress informational messages)\n"
-"  -P, --dependency=type:jobid defer job until condition on jobid is satisfied\n"
-"      --nice[=value]          decrease secheduling priority by value\n"
-"  -U, --account=name          charge job to specified account\n"
-"      --begin=time            defer job until HH:MM DD/MM/YY\n"
-"      --comment=name          arbitrary comment\n"
-"  -L, --licenses=names        required license, comma separated\n"
 "      --mail-type=type        notify on state change: BEGIN, END, FAIL or ALL\n"
-"      --mail-user=user        who to send email notification for job state changes\n"
-"      --bell                  ring the terminal bell when the job is allocated\n"
+"      --mail-user=user        who to send email notification for job state\n"
+"                              changes\n"
+"  -n, --tasks=N               number of processors required\n"
+"      --nice[=value]          decrease secheduling priority by value\n"
 "      --no-bell               do NOT ring the terminal bell\n"
-"      --gid=group_id          group ID to run job as (user root only)\n"
+"      --ntasks-per-node=n     number of tasks to invoke on each node\n"
+"  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
+"  -O, --overcommit            overcommit resources\n"
+"  -Q, --quiet                 quiet mode (suppress informational messages)\n"
+"  -p, --partition=partition   partition requested\n"
+"  -P, --dependency=type:jobid defer job until condition on jobid is satisfied\n"
+"  -s, --share                 share nodes with other jobs\n"
+"  -t, --time=minutes          time limit\n"
+"  -U, --account=name          charge job to specified account\n"
 "      --uid=user_id           user ID to run job as (user root only)\n"
-"      --get-user-env          used by Moab.  See srun man page.\n"
+"  -v, --verbose               verbose mode (multiple -v's increase verbosity)\n"
+"  -W, --wait=sec              seconds to wait for allocation if not\n"
+"                              immediately available\n"
 "\n"
 "Constraint options:\n"
+"      --contiguous            demand a contiguous range of nodes\n"
+"  -C, --constraint=list       specify a list of constraints\n"
+"  -F, --nodefile=filename     request a specific list of hosts\n"
+"      --mem=MB                minimum amount of real memory\n"
 "      --mincpus=n             minimum number of cpus per node\n"
 "      --minsockets=n          minimum number of sockets per node\n"
 "      --mincores=n            minimum number of cores per cpu\n"
 "      --minthreads=n          minimum number of threads per core\n"
-"      --mem=MB                minimum amount of real memory\n"
+"      --reservation=name      allocate resources from named reservation\n"
 "      --tmp=MB                minimum amount of temporary disk\n"
-"      --contiguous            demand a contiguous range of nodes\n"
-"  -C, --constraint=list       specify a list of constraints\n"
-"  -F, --nodefile=filename     request a specific list of hosts\n"
 "  -w, --nodelist=hosts...     request a specific list of hosts\n"
 "  -x, --exclude=hosts...      exclude a specific list of hosts\n"
 "\n"
@@ -1485,27 +1599,33 @@ static void _help(void)
 "                              cpu consumable resource is enabled\n"
 "      --mem-per-cpu=MB        maximum amount of real memory per allocated\n"
 "                              cpu required by the job.\n" 
-"                              --mem >= --job-mem if --mem is specified.\n" 
+"                              --mem >= --mem-per-cpu if --mem is specified.\n" 
 "\n"
 "Affinity/Multi-core options: (when the task/affinity plugin is enabled)\n" 
-"  -B --extra-node-info=S[:C[:T]]            Expands to:\n"
-"      --sockets-per-node=S    number of sockets per node to allocate\n"
-"      --cores-per-socket=C    number of cores per socket to allocate\n"
-"      --threads-per-core=T    number of threads per core to allocate\n"
+"  -B  --extra-node-info=S[:C[:T]]            Expands to:\n"
+"       --sockets-per-node=S   number of sockets per node to allocate\n"
+"       --cores-per-socket=C   number of cores per socket to allocate\n"
+"       --threads-per-core=T   number of threads per core to allocate\n"
 "                              each field can be 'min[-max]' or wildcard '*'\n"
 "                              total cpus requested = (N x S x C x T)\n"
 "\n"
-"      --ntasks-per-socket=n   number of tasks to invoke on each socket\n"
-"      --ntasks-per-core=n     number of tasks to invoke on each core\n");
+"      --ntasks-per-core=n     number of tasks to invoke on each core\n"
+"      --ntasks-per-socket=n   number of tasks to invoke on each socket\n");
 	conf = slurm_conf_lock();
 	if (conf->task_plugin != NULL
 	    && strcasecmp(conf->task_plugin, "task/affinity") == 0) {
 		printf(
+"      --cpu_bind=             Bind tasks to CPUs\n"
+"                              (see \"--cpu_bind=help\" for options)\n"
 "      --hint=                 Bind tasks according to application hints\n"
-"                              (see \"--hint=help\" for options)\n");
+"                              (see \"--hint=help\" for options)\n"
+"      --mem_bind=             Bind memory to locality domains (ldom)\n"
+"                              (see \"--mem_bind=help\" for options)\n");
 	}
 	slurm_conf_unlock();
 
+	spank_print_options(stdout, 6, 30);
+
         printf("\n"
 #ifdef HAVE_AIX				/* AIX/Federation specific options */
 "AIX related options:\n"
@@ -1529,10 +1649,14 @@ static void _help(void)
 "      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
 "      --ioload-image=path     path to ioload image for bluegene block.  Default if not set\n"
 #else
-"      --blrts-image=path      path to blrts image for bluegene block.  Default if not set\n"
-"      --linux-image=path      path to linux image for bluegene block.  Default if not set\n"
-"      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
-"      --ramdisk-image=path    path to ramdisk image for bluegene block.  Default if not set\n"
+"      --blrts-image=path      path to blrts image for bluegene block.\n"
+"                              Default if not set\n"
+"      --linux-image=path      path to linux image for bluegene block.  Default\n"
+"                              if not set\n"
+"      --mloader-image=path    path to mloader image for bluegene block.\n"
+"                              Default if not set\n"
+"      --ramdisk-image=path    path to ramdisk image for bluegene block.\n"
+"                              Default if not set\n"
 #endif
 #endif
 "\n"
diff --git a/src/salloc/opt.h b/src/salloc/opt.h
index 632239176b24bc6f84a6c0834e3e0f9961d7c55f..f413c0d80eb19c265409bf8b2c1e0192b34862ec 100644
--- a/src/salloc/opt.h
+++ b/src/salloc/opt.h
@@ -2,14 +2,15 @@
  *  opt.h - definitions for salloc option processing
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>,
  *    Christopher J. Morrone <morrone2@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -72,6 +73,9 @@ typedef struct salloc_options {
 	int ntasks_per_socket; /* --ntasks-per-socket=n     */
 	int ntasks_per_core;   /* --ntasks-per-core=n	    */
 	cpu_bind_type_t cpu_bind_type; /* --cpu_bind=           */
+	char *cpu_bind;		/* binding map for map/mask_cpu */
+	mem_bind_type_t mem_bind_type; /* --mem_bind=		*/
+	char *mem_bind;		/* binding map for map/mask_mem	*/
 	bool extra_set;		/* true if extra node info explicitly set */
 	int  time_limit;	/* --time,   -t	(int minutes)	*/
 	char *time_limit_str;	/* --time,   -t (string)	*/
@@ -135,6 +139,7 @@ typedef struct salloc_options {
 	int get_user_env_time;	/* --get-user-env[=secs]	*/
 	int get_user_env_mode; 	/* --get-user-env=[S|L]		*/
 	char *cwd;		/* current working directory	*/
+	char *reservation;	/* --reservation		*/
 	char *wckey;            /* --wckey workload characterization key */
 } opt_t;
 
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index 4eb596af45ff25a5e373d2059d7fb097f64a1096..58a8e09e49f0c5761d9d4986b47a77fac32cb3ce 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -3,13 +3,14 @@
  *             launch a user-specified command.
  *****************************************************************************
  *  Copyright (C) 2006-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -41,12 +42,14 @@
 
 #include <slurm/slurm.h>
 
+#include "src/common/basil_resv_conf.h"
 #include "src/common/env.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_rlimits_info.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
+#include "src/common/plugstack.h"
 
 #include "src/salloc/salloc.h"
 #include "src/salloc/opt.h"
@@ -59,6 +62,10 @@
 #include "src/plugins/select/bluegene/wrap_rm_api.h"
 #endif
 
+#ifdef HAVE_CRAY_XT
+#include "src/common/node_select.h"
+#endif
+
 #define MAX_RETRIES 3
 
 char **command_argv;
@@ -72,9 +79,9 @@ static bool allocation_interrupted = false;
 static uint32_t pending_job_id = 0;
 static time_t last_timeout = 0;
 
-static int fill_job_desc_from_opts(job_desc_msg_t *desc);
-static void ring_terminal_bell(void);
-static int fork_command(char **command);
+static int  _fill_job_desc_from_opts(job_desc_msg_t *desc);
+static void _ring_terminal_bell(void);
+static int  _fork_command(char **command);
 static void _pending_callback(uint32_t job_id);
 static void _ignore_signal(int signo);
 static void _exit_on_signal(int signo);
@@ -87,14 +94,15 @@ static void _ping_handler(srun_ping_msg_t *msg);
 static void _node_fail_handler(srun_node_fail_msg_t *msg);
 
 #ifdef HAVE_BG
-
 #define POLL_SLEEP 3			/* retry interval in seconds  */
-
 static int _wait_bluegene_block_ready(
-	resource_allocation_response_msg_t *alloc);
+			resource_allocation_response_msg_t *alloc);
 static int _blocks_dealloc();
 #endif
 
+#ifdef HAVE_CRAY_XT
+static int  _claim_reservation(resource_allocation_response_msg_t *alloc);
+#endif
 
 int main(int argc, char *argv[])
 {
@@ -114,6 +122,16 @@ int main(int argc, char *argv[])
 	slurm_allocation_callbacks_t callbacks;
 
 	log_init(xbasename(argv[0]), logopt, 0, NULL);
+
+	if (spank_init_allocator() < 0)
+		fatal("Failed to initialize plugin stack");
+
+	/* Be sure to call spank_fini when salloc exits
+	 */
+	if (atexit((void (*) (void)) spank_fini) < 0)
+		error("Failed to register atexit handler for plugins: %m");
+
+
 	if (initialize_and_process_args(argc, argv) < 0) {
 		fatal("salloc parameter parsing");
 	}
@@ -125,6 +143,9 @@ int main(int argc, char *argv[])
 		log_alter(logopt, 0, NULL);
 	}
 
+	if (spank_init_post_opt() < 0)
+		fatal("Plugin stack post-option processing failed");
+
 	if (opt.cwd && chdir(opt.cwd)) {
 		error("chdir(%s): %m", opt.cwd);
 		exit(1);
@@ -149,7 +170,7 @@ int main(int argc, char *argv[])
 	 * Request a job allocation
 	 */
 	slurm_init_job_desc_msg(&desc);
-	if (fill_job_desc_from_opts(&desc) == -1) {
+	if (_fill_job_desc_from_opts(&desc) == -1) {
 		exit(1);
 	}
 	if (opt.gid != (gid_t) -1) {
@@ -176,7 +197,7 @@ int main(int argc, char *argv[])
 	xsignal(SIGTERM, _signal_while_allocating);
 	xsignal(SIGUSR1, _signal_while_allocating);
 	xsignal(SIGUSR2, _signal_while_allocating);
-
+	
 	before = time(NULL);
 	while ((alloc = slurm_allocate_resources_blocking(&desc, opt.max_wait,
 					_pending_callback)) == NULL) {
@@ -201,20 +222,29 @@ int main(int argc, char *argv[])
 		}
 		slurm_allocation_msg_thr_destroy(msg_thr);
 		exit(1);
-	}
-
-	/*
-	 * Allocation granted!
-	 */
-	info("Granted job allocation %d", alloc->job_id);
+	} else if(!allocation_interrupted) {
+		/*
+		 * Allocation granted!
+		 */
+		info("Granted job allocation %d", alloc->job_id);
 #ifdef HAVE_BG
-	if (!_wait_bluegene_block_ready(alloc)) {
-		if(!allocation_interrupted)
-			error("Something is wrong with the boot of the block.");
-		goto relinquish;
+		if (!_wait_bluegene_block_ready(alloc)) {
+			if(!allocation_interrupted)
+				error("Something is wrong with the "
+				      "boot of the block.");
+			goto relinquish;
+		}
+#endif
+#ifdef HAVE_CRAY_XT
+		if (!_claim_reservation(alloc)) {
+			if(!allocation_interrupted)
+				error("Something is wrong with the ALPS "
+				      "resource reservation.");
+			goto relinquish;
+		}
+#endif
 	}
 
-#endif
 	after = time(NULL);
 
 	xsignal(SIGHUP, _exit_on_signal);
@@ -228,7 +258,7 @@ int main(int argc, char *argv[])
 	if (opt.bell == BELL_ALWAYS
 	    || (opt.bell == BELL_AFTER_DELAY
 		&& ((after - before) > DEFAULT_BELL_DELAY))) {
-		ring_terminal_bell();
+		_ring_terminal_bell();
 	}
 	if (opt.no_shell)
 		exit(0);
@@ -272,7 +302,7 @@ int main(int argc, char *argv[])
 		return 1;
 	} else {
 		allocation_state = GRANTED;
-		command_pid = pid = fork_command(command_argv);
+		command_pid = pid = _fork_command(command_argv);
 	}
 	pthread_mutex_unlock(&allocation_state_lock);
 
@@ -323,23 +353,34 @@ relinquish:
 		} else if (WIFSIGNALED(status)) {
 			verbose("Command \"%s\" was terminated by signal %d",
 				command_argv[0], WTERMSIG(status));
+			/* if we get these signals we return a normal
+			   exit since this was most likely sent from the
+			   user */
+			switch(WTERMSIG(status)) {
+			case SIGHUP:
+			case SIGINT:
+			case SIGQUIT:
+			case SIGKILL:
+				rc = 0;
+				break;
+			default:
+				break;
+			}
 		}
 	}
-
 	return rc;
 }
 
 
 /* Returns 0 on success, -1 on failure */
-static int fill_job_desc_from_opts(job_desc_msg_t *desc)
+static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 {
 	desc->contiguous = opt.contiguous ? 1 : 0;
 	desc->features = opt.constraints;
 	desc->immediate = opt.immediate ? 1 : 0;
 	desc->name = xstrdup(opt.job_name);
-
-	if(opt.wckey)
- 		xstrfmtcat(desc->name, "\"%s", opt.wckey);
+	desc->reservation = xstrdup(opt.reservation);
+	desc->wckey  = xstrdup(opt.wckey);
 
 	desc->req_nodes = opt.nodelist;
 	desc->exc_nodes = opt.exc_nodes;
@@ -351,9 +392,21 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc)
 	desc->group_id = opt.gid;
 	if (opt.dependency)
 		desc->dependency = xstrdup(opt.dependency);
+
+	if (opt.cpu_bind)
+		desc->cpu_bind       = opt.cpu_bind;
+	if (opt.cpu_bind_type)
+		desc->cpu_bind_type  = opt.cpu_bind_type;
+	if (opt.mem_bind)
+		desc->mem_bind       = opt.mem_bind;
+	if (opt.mem_bind_type)
+		desc->mem_bind_type  = opt.mem_bind_type;
+	if (opt.plane_size != NO_VAL)
+		desc->plane_size     = opt.plane_size;
 	desc->task_dist  = opt.distribution;
 	if (opt.plane_size != NO_VAL)
 		desc->plane_size = opt.plane_size;
+
 	if (opt.licenses)
 		desc->licenses = xstrdup(opt.licenses);
 	desc->network = opt.network;
@@ -448,7 +501,7 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc)
 	return 0;
 }
 
-static void ring_terminal_bell(void)
+static void _ring_terminal_bell(void)
 {
         if (isatty(STDOUT_FILENO)) {
                 fprintf(stdout, "\a");
@@ -457,7 +510,7 @@ static void ring_terminal_bell(void)
 }
 
 /* returns the pid of the forked command, or <0 on error */
-static pid_t fork_command(char **command)
+static pid_t _fork_command(char **command)
 {
 	pid_t pid;
 
@@ -698,4 +751,22 @@ static int _blocks_dealloc()
 	bg_info_ptr = new_bg_ptr;
 	return rc;
 }
+#endif	/* HAVE_BG */
+
+#ifdef HAVE_CRAY_XT
+/* returns 1 if job and nodes are ready for job to begin, 0 otherwise */
+static int _claim_reservation(resource_allocation_response_msg_t *alloc)
+{
+	int rc = 0;
+	char *resv_id = NULL;
+
+	select_g_get_jobinfo(alloc->select_jobinfo, SELECT_DATA_RESV_ID,
+			     &resv_id);
+	if (resv_id == NULL)
+		return rc;
+	if (basil_resv_conf(resv_id, alloc->job_id) == SLURM_SUCCESS)
+		rc = 1;
+	xfree(resv_id);
+	return rc;
+}
 #endif
diff --git a/src/salloc/salloc.h b/src/salloc/salloc.h
index fbe7c43a7ea3097e24b6cb36077f03c55ea9f7fb..20a578cb54c395affe5dc39c2b6e0638c374f634 100644
--- a/src/salloc/salloc.h
+++ b/src/salloc/salloc.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sattach/Makefile.in b/src/sattach/Makefile.in
index c6b9ce1c7278c6c2a3b530bd6b938edd4c61bd36..309a8798d4e431f85dd27d839c02f360267dd13c 100644
--- a/src/sattach/Makefile.in
+++ b/src/sattach/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -106,6 +110,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/sattach/attach.c b/src/sattach/attach.c
index e4ca98076a5cefe338b9bece1aa9914dc82ec4ac..2d7b89253cf70e0567cf63115fb13617713c25c5 100644
--- a/src/sattach/attach.c
+++ b/src/sattach/attach.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  attach.c - Definitions needed for parallel debugger
- *  $Id: attach.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: attach.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sattach/opt.c b/src/sattach/opt.c
index b9abbf6fa1d0c07886cf18b2ea03954644ca5bee..622208d5ed4caf4adf29fdf6925582f1414eb314 100644
--- a/src/sattach/opt.c
+++ b/src/sattach/opt.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  opt.c - options processing for sattach
- *  $Id: opt.c 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: opt.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sattach/opt.h b/src/sattach/opt.h
index 84681975affcdaaed36247d1bf1b633b197bb7ce..516e18c1efad2efb233a9ff82b29a56fba5011db 100644
--- a/src/sattach/opt.h
+++ b/src/sattach/opt.h
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  opt.h - definitions for sattach option processing
- *  $Id: opt.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: opt.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>,
  *    Christopher J. Morrone <morrone2@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sattach/sattach.c b/src/sattach/sattach.c
index ddc5e4452e02af60b1fb0326fed46f3e4deab0ca..140ca368093691fad7f204e08d861113d184da50 100644
--- a/src/sattach/sattach.c
+++ b/src/sattach/sattach.c
@@ -1,15 +1,15 @@
 /*****************************************************************************\
  *  sattach.c - Attach to a running job step.
- *
- *  $Id: sattach.c 8447 2006-06-26 22:29:29Z morrone $
  *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
+ *  Copyright (C) 2006-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -67,7 +67,8 @@ static void _mpir_cleanup(void);
 static void _mpir_dump_proctable(void);
 static void print_layout_info(slurm_step_layout_t *layout);
 static slurm_cred_t _generate_fake_cred(uint32_t jobid, uint32_t stepid,
-					uid_t uid, char *nodelist);
+					uid_t uid, char *nodelist, 
+					uint32_t node_cnt);
 static uint32_t _nodeid_from_layout(slurm_step_layout_t *layout,
 				    uint32_t taskid);
 static int _attach_to_tasks(uint32_t jobid,
@@ -146,7 +147,8 @@ int sattach(int argc, char *argv[])
 	}
 
 	fake_cred = _generate_fake_cred(opt.jobid, opt.stepid,
-					opt.uid, layout->node_list);
+					opt.uid, layout->node_list,
+					layout->node_cnt);
 	
 	mts = _msg_thr_create(layout->node_cnt, layout->task_cnt);
 
@@ -217,7 +219,8 @@ static void print_layout_info(slurm_step_layout_t *layout)
 
 /* return a faked job credential */
 static slurm_cred_t _generate_fake_cred(uint32_t jobid, uint32_t stepid,
-					uid_t uid, char *nodelist)
+					uid_t uid, char *nodelist,
+					uint32_t node_cnt)
 {
 	slurm_cred_arg_t arg;
 	slurm_cred_t cred;
@@ -226,10 +229,24 @@ static slurm_cred_t _generate_fake_cred(uint32_t jobid, uint32_t stepid,
 	arg.stepid   = stepid;
 	arg.uid      = uid;
 	arg.hostlist = nodelist;
-        arg.alloc_lps_cnt = 0;    
-        arg.alloc_lps =  NULL; 
+
+	arg.core_bitmap   = bit_alloc(node_cnt);
+	bit_nset(arg.core_bitmap, 0, node_cnt-1);
+	arg.cores_per_socket = xmalloc(sizeof(uint16_t));
+	arg.cores_per_socket[0] = 1;
+	arg.sockets_per_node = xmalloc(sizeof(uint16_t));
+	arg.sockets_per_node[0] = 1;
+	arg.sock_core_rep_count = xmalloc(sizeof(uint32_t));
+	arg.sock_core_rep_count[0] = node_cnt;
+	arg.job_nhosts    = node_cnt;
+	arg.job_hostlist  = nodelist;
+
 	cred = slurm_cred_faker(&arg);
 
+	bit_free(arg.core_bitmap);
+	xfree(arg.cores_per_socket);
+	xfree(arg.sockets_per_node);
+	xfree(arg.sock_core_rep_count);
 	return cred;
 }
 
diff --git a/src/sbatch/Makefile.in b/src/sbatch/Makefile.in
index e531a33ef308f9b504cdc0efe31efbf2b914bef1..6bb0c96df615acde3f6cd67f091563cfe0389a47 100644
--- a/src/sbatch/Makefile.in
+++ b/src/sbatch/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -105,6 +109,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c
index 207beef37a9c248e18bfe263e7c3249f71ebe619..bd324fe67e151f72be3b201b072bac543ad791ed 100644
--- a/src/sbatch/opt.c
+++ b/src/sbatch/opt.c
@@ -2,13 +2,14 @@
  *  opt.c - options processing for sbatch
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -61,13 +62,15 @@
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/parse_time.h"
+#include "src/common/plugstack.h"
 #include "src/common/proc_args.h"
+#include "src/common/read_config.h" /* contains getnodename() */
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_resource_info.h"
+#include "src/common/slurm_rlimits_info.h"
 #include "src/common/uid.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/common/slurm_rlimits_info.h"
-#include "src/common/read_config.h" /* contains getnodename() */
 
 #include "src/sbatch/opt.h"
 
@@ -76,24 +79,28 @@
 #define OPT_INT         0x01
 #define OPT_STRING      0x02
 #define OPT_DEBUG       0x03
-#define OPT_NODES       0x05
-#define OPT_BOOL        0x06
-#define OPT_CORE        0x07
-#define OPT_CONN_TYPE	0x08
-#define OPT_DISTRIB	0x09
-#define OPT_NO_ROTATE	0x0a
-#define OPT_GEOMETRY	0x0b
-#define OPT_MULTI	0x0f
-#define OPT_EXCLUSIVE	0x10
-#define OPT_OVERCOMMIT	0x11
-#define OPT_OPEN_MODE	0x12
-#define OPT_ACCTG_FREQ  0x13
-#define OPT_NO_REQUEUE  0x14
-#define OPT_REQUEUE     0x15
-#define OPT_WCKEY       0x16
+#define OPT_NODES       0x04
+#define OPT_BOOL        0x05
+#define OPT_CORE        0x06
+#define OPT_CONN_TYPE	0x07
+#define OPT_DISTRIB	0x08
+#define OPT_NO_ROTATE	0x09
+#define OPT_GEOMETRY	0x0a
+#define OPT_MULTI	0x0b
+#define OPT_EXCLUSIVE	0x0c
+#define OPT_OVERCOMMIT	0x0d
+#define OPT_OPEN_MODE	0x0e
+#define OPT_ACCTG_FREQ  0x0f
+#define OPT_NO_REQUEUE  0x10
+#define OPT_REQUEUE     0x11
+#define OPT_CPU_BIND    0x12
+#define OPT_MEM_BIND    0x13
+#define OPT_WCKEY       0x14
 
 /* generic getopt_long flags, integers and *not* valid characters */
 #define LONG_OPT_PROPAGATE   0x100
+#define LONG_OPT_CPU_BIND    0x101
+#define LONG_OPT_MEM_BIND    0x102
 #define LONG_OPT_JOBID       0x105
 #define LONG_OPT_TMP         0x106
 #define LONG_OPT_MEM         0x107
@@ -133,6 +140,9 @@
 #define LONG_OPT_OPEN_MODE       0x147
 #define LONG_OPT_ACCTG_FREQ      0x148
 #define LONG_OPT_WCKEY           0x149
+#define LONG_OPT_RESERVATION     0x14a
+#define LONG_OPT_CHECKPOINT      0x14b
+#define LONG_OPT_CHECKPOINT_DIR  0x14c
 
 /*---- global variables, defined in opt.h ----*/
 opt_t opt;
@@ -241,7 +251,10 @@ static void _opt_default()
 	opt.ntasks_per_node      = 0;      /* ntask max limits */
 	opt.ntasks_per_socket    = NO_VAL;
 	opt.ntasks_per_core      = NO_VAL;
-	opt.cpu_bind_type = 0;		/* local dummy variable for now */
+	opt.cpu_bind_type = 0;
+	opt.cpu_bind = NULL;
+	opt.mem_bind_type = 0;
+	opt.mem_bind = NULL;
 	opt.time_limit = NO_VAL;
 	opt.partition = NULL;
 
@@ -298,7 +311,54 @@ static void _opt_default()
 	opt.get_user_env_time = -1;
 	opt.get_user_env_mode = -1;
 	opt.acctg_freq        = -1;
-	opt.wckey = NULL;
+	opt.reservation       = NULL;
+	opt.wckey             = NULL;
+
+	opt.ckpt_interval = 0;
+	opt.ckpt_interval_str = NULL;
+	opt.ckpt_dir = xstrdup(opt.cwd);
+}
+
+static void _set_distribution(task_dist_states_t distribution,
+			      char **dist, char **lllp_dist)
+{
+	if (((int)distribution >= 0)
+	&&  (distribution != SLURM_DIST_UNKNOWN)) {
+		switch(distribution) {
+		case SLURM_DIST_CYCLIC:
+			*dist      = "cyclic";
+			break;
+		case SLURM_DIST_BLOCK:
+			*dist      = "block";
+			break;
+		case SLURM_DIST_PLANE:
+			*dist      = "plane";
+			*lllp_dist = "plane";
+			break;
+		case SLURM_DIST_ARBITRARY:
+			*dist      = "arbitrary";
+			break;
+		case SLURM_DIST_CYCLIC_CYCLIC:
+			*dist      = "cyclic";
+			*lllp_dist = "cyclic";
+			break;
+		case SLURM_DIST_CYCLIC_BLOCK:
+			*dist      = "cyclic";
+			*lllp_dist = "block";
+			break;
+		case SLURM_DIST_BLOCK_CYCLIC:
+			*dist      = "block";
+			*lllp_dist = "cyclic";
+			break;
+		case SLURM_DIST_BLOCK_BLOCK:
+			*dist      = "block";
+			*lllp_dist = "block";
+			break;
+		default:
+			error("unknown dist, type %d", distribution);
+			break;
+		}
+	}
 }
 
 /*---[ env var processing ]-----------------------------------------------*/
@@ -322,30 +382,34 @@ struct env_vars {
 
 env_vars_t env_vars[] = {
   {"SBATCH_ACCOUNT",       OPT_STRING,     &opt.account,       NULL           },
+  {"SBATCH_ACCTG_FREQ",    OPT_INT,        &opt.acctg_freq,    NULL           },
   {"SBATCH_BLRTS_IMAGE",   OPT_STRING,     &opt.blrtsimage,    NULL           },
   {"SBATCH_CONN_TYPE",     OPT_CONN_TYPE,  NULL,               NULL           },
+  {"SBATCH_CPU_BIND",      OPT_CPU_BIND,   NULL,               NULL           },
   {"SBATCH_DEBUG",         OPT_DEBUG,      NULL,               NULL           },
   {"SBATCH_DISTRIBUTION",  OPT_DISTRIB ,   NULL,               NULL           },
+  {"SBATCH_EXCLUSIVE",     OPT_EXCLUSIVE,  NULL,               NULL           },
   {"SBATCH_GEOMETRY",      OPT_GEOMETRY,   NULL,               NULL           },
   {"SBATCH_IMMEDIATE",     OPT_BOOL,       &opt.immediate,     NULL           },
   {"SBATCH_JOBID",         OPT_INT,        &opt.jobid,         NULL           },
   {"SBATCH_JOB_NAME",      OPT_STRING,     &opt.job_name,      NULL           },
   {"SBATCH_LINUX_IMAGE",   OPT_STRING,     &opt.linuximage,    NULL           },
   {"SBATCH_CNLOAD_IMAGE",  OPT_STRING,     &opt.linuximage,    NULL           },
+  {"SBATCH_MEM_BIND",      OPT_MEM_BIND,   NULL,               NULL           },
   {"SBATCH_MLOADER_IMAGE", OPT_STRING,     &opt.mloaderimage,  NULL           },
+  {"SBATCH_NETWORK",       OPT_STRING,     &opt.network,       NULL           },
   {"SBATCH_NO_REQUEUE",    OPT_NO_REQUEUE, NULL,               NULL           },
-  {"SBATCH_REQUEUE",       OPT_REQUEUE,    NULL,               NULL           },
   {"SBATCH_NO_ROTATE",     OPT_BOOL,       &opt.no_rotate,     NULL           },
+  {"SBATCH_OPEN_MODE",     OPT_OPEN_MODE,  NULL,               NULL           },
   {"SBATCH_OVERCOMMIT",    OPT_OVERCOMMIT, NULL,               NULL           },
   {"SBATCH_PARTITION",     OPT_STRING,     &opt.partition,     NULL           },
   {"SBATCH_RAMDISK_IMAGE", OPT_STRING,     &opt.ramdiskimage,  NULL           },
   {"SBATCH_IOLOAD_IMAGE",  OPT_STRING,     &opt.ramdiskimage,  NULL           },
   {"SBATCH_TIMELIMIT",     OPT_STRING,     &opt.time_limit_str,NULL           },
-  {"SBATCH_EXCLUSIVE",     OPT_EXCLUSIVE,  NULL,               NULL           },
-  {"SBATCH_OPEN_MODE",     OPT_OPEN_MODE,  NULL,               NULL           },
-  {"SBATCH_ACCTG_FREQ",    OPT_INT,        &opt.acctg_freq,    NULL           },
-  {"SBATCH_NETWORK",       OPT_STRING,     &opt.network,       NULL           },
+  {"SBATCH_REQUEUE",       OPT_REQUEUE,    NULL,               NULL           },
   {"SBATCH_WCKEY",         OPT_STRING,     &opt.wckey,         NULL           },
+  {"SBATCH_CHECKPOINT",    OPT_STRING,     &opt.ckpt_interval_str, NULL       },
+  {"SBATCH_CHECKPOINT_DIR",OPT_STRING,     &opt.ckpt_dir,      NULL           },
   {NULL, 0, NULL, NULL}
 };
 
@@ -416,13 +480,23 @@ _process_env_var(env_vars_t *e, const char *val)
 		}
 		break;
 
+	case OPT_CPU_BIND:
+		if (slurm_verify_cpu_bind(val, &opt.cpu_bind,
+					  &opt.cpu_bind_type))
+			exit(1);
+		break;
+
+	case OPT_MEM_BIND:
+		if (slurm_verify_mem_bind(val, &opt.mem_bind,
+					  &opt.mem_bind_type))
+			exit(1);
+		break;
+
 	case OPT_DISTRIB:
 		opt.distribution = verify_dist_type(optarg, 
 						    &opt.plane_size);
 		if (opt.distribution == SLURM_DIST_UNKNOWN)
 			error("distribution type `%s' is invalid", optarg);
-		else
-			setenv("SLURM_DISTRIBUTION", optarg, 1);
 		break;
 
 	case OPT_NODES:
@@ -514,7 +588,7 @@ static struct option long_options[] = {
 	{"overcommit",    no_argument,       0, 'O'},
 	{"partition",     required_argument, 0, 'p'},
 	{"dependency",    required_argument, 0, 'P'},
-	{"quiet",         no_argument,       0, 'q'},
+	{"quiet",         no_argument,       0, 'Q'},
 	{"no-rotate",     no_argument,       0, 'R'},
 	{"share",         no_argument,       0, 's'},
 	{"time",          required_argument, 0, 't'},
@@ -531,8 +605,6 @@ static struct option long_options[] = {
 	{"mincores",      required_argument, 0, LONG_OPT_MINCORES},
 	{"minthreads",    required_argument, 0, LONG_OPT_MINTHREADS},
 	{"mem",           required_argument, 0, LONG_OPT_MEM},
-	{"job-mem",       required_argument, 0, LONG_OPT_MEM_PER_CPU},
-	{"task-mem",      required_argument, 0, LONG_OPT_MEM_PER_CPU},
 	{"mem-per-cpu",   required_argument, 0, LONG_OPT_MEM_PER_CPU},
 	{"hint",          required_argument, 0, LONG_OPT_HINT},
 	{"tmp",           required_argument, 0, LONG_OPT_TMP},
@@ -567,12 +639,17 @@ static struct option long_options[] = {
 	{"acctg-freq",    required_argument, 0, LONG_OPT_ACCTG_FREQ},
 	{"propagate",     optional_argument, 0, LONG_OPT_PROPAGATE},
 	{"network",       required_argument, 0, LONG_OPT_NETWORK},
-	{"wckey",            required_argument, 0, LONG_OPT_WCKEY},
+	{"cpu_bind",      required_argument, 0, LONG_OPT_CPU_BIND},
+	{"mem_bind",      required_argument, 0, LONG_OPT_MEM_BIND},
+	{"wckey",         required_argument, 0, LONG_OPT_WCKEY},
+	{"reservation",   required_argument, 0, LONG_OPT_RESERVATION},
+ 	{"checkpoint",    required_argument, 0, LONG_OPT_CHECKPOINT},
+ 	{"checkpoint-dir",required_argument, 0, LONG_OPT_CHECKPOINT_DIR},
 	{NULL,            0,                 0, 0}
 };
 
 static char *opt_string =
-	"+a:bB:c:C:d:D:e:F:g:hHi:IJ:kL:m:n:N:o:Op:P:qR:st:uU:vVw:x:";
+	"+bB:c:C:d:D:e:F:g:hHi:IJ:kL:m:n:N:o:Op:P:QRst:uU:vVw:x:";
 
 
 /*
@@ -595,6 +672,11 @@ char *process_options_first_pass(int argc, char **argv)
 	int opt_char, option_index = 0;
 	char *str = NULL;
 
+	struct option *optz = spank_option_table_create(long_options);
+
+	if (!optz)
+		fatal("Unable to create options table");
+
 	/* initialize option defaults */
 	_opt_default();
 
@@ -602,7 +684,7 @@ char *process_options_first_pass(int argc, char **argv)
 	optind = 0;
 
 	while((opt_char = getopt_long(argc, argv, opt_string,
-				      long_options, &option_index)) != -1) {
+				      optz, &option_index)) != -1) {
 		switch (opt_char) {
 		case '?':
 			fprintf(stderr, "Try \"sbatch --help\" for more "
@@ -613,7 +695,7 @@ char *process_options_first_pass(int argc, char **argv)
 			_help();
 			exit(0);
 			break;
-		case 'q':
+		case 'Q':
 			opt.quiet++;
 			break;
 		case 'u':
@@ -635,6 +717,7 @@ char *process_options_first_pass(int argc, char **argv)
 		}
 	}
 	xfree(str);
+	spank_option_table_destroy(optz);
 
 	if (argc > optind && opt.wrap != NULL) {
 		fatal("Script arguments are not permitted with the"
@@ -922,9 +1005,14 @@ static void _set_options(int argc, char **argv)
 	int opt_char, option_index = 0;
 	char *tmp;
 
+	struct option *optz = spank_option_table_create(long_options);
+
+	if (!optz)
+		fatal("Unable to create options table");
+
 	optind = 0;
 	while((opt_char = getopt_long(argc, argv, opt_string,
-				      long_options, &option_index)) != -1) {
+				      optz, &option_index)) != -1) {
 		switch (opt_char) {
 		case '?':
 			fatal("Try \"sbatch --help\" for more information");
@@ -1022,7 +1110,6 @@ static void _set_options(int argc, char **argv)
 				      "is not recognized", optarg);
 				exit(1);
 			} 
-			setenv("SLURM_DISTRIBUTION", optarg, 1);
 			break;
 		case 'n':
 			opt.nprocs_set = true;
@@ -1056,11 +1143,11 @@ static void _set_options(int argc, char **argv)
 			break;
 		case 'd':
 		case 'P':
-			/* use -P instead */
+			/* use -P instead of -d (deprecated) */
 			xfree(opt.dependency);
 			opt.dependency = xstrdup(optarg);
 			break;
-		case 'q':
+		case 'Q':
 			opt.quiet++;
 			break;
 		case 'R':
@@ -1110,6 +1197,16 @@ static void _set_options(int argc, char **argv)
                 case LONG_OPT_EXCLUSIVE:
                         opt.shared = 0;
                         break;
+                case LONG_OPT_CPU_BIND:
+			if (slurm_verify_cpu_bind(optarg, &opt.cpu_bind,
+						  &opt.cpu_bind_type))
+				exit(1);
+			break;
+		case LONG_OPT_MEM_BIND:
+			if (slurm_verify_mem_bind(optarg, &opt.mem_bind,
+						  &opt.mem_bind_type))
+				exit(1);
+			break;
 		case LONG_OPT_MINCPU:
 			opt.mincpus = _get_int(optarg, "mincpus");
 			if (opt.mincpus < 0) {
@@ -1336,15 +1433,30 @@ static void _set_options(int argc, char **argv)
 			xfree(opt.wckey);
 			opt.wckey = xstrdup(optarg);
 			break;
+		case LONG_OPT_RESERVATION:
+			xfree(opt.reservation);
+			opt.reservation = xstrdup(optarg);
+			break;
+		case LONG_OPT_CHECKPOINT:
+			xfree(opt.ckpt_interval_str);
+			opt.ckpt_interval_str = xstrdup(optarg);
+			break;
+		case LONG_OPT_CHECKPOINT_DIR:
+			xfree(opt.ckpt_dir);
+			opt.ckpt_dir = xstrdup(optarg);
+			break;
 		default:
-			fatal("Unrecognized command line parameter %c",
-			      opt_char);
+			if (spank_process_option (opt_char, optarg) < 0)
+				fatal("Unrecognized command line parameter %c",
+						opt_char);
 		}
 	}
 
 	if (optind < argc) {
 		fatal("Invalid argument: %s", argv[optind]);
 	}
+
+	spank_option_table_destroy (optz);
 }
 
 static void _proc_get_user_env(char *optarg)
@@ -1738,9 +1850,10 @@ static void _parse_pbs_resource_list(char *rl)
 static bool _opt_verify(void)
 {
 	bool verified = true;
+	char *dist = NULL, *lllp_dist = NULL;
 
 	if (opt.quiet && opt.verbose) {
-		error ("don't specify both --verbose (-v) and --quiet (-q)");
+		error ("don't specify both --verbose (-v) and --quiet (-Q)");
 		verified = false;
 	}
 
@@ -1758,21 +1871,50 @@ static bool _opt_verify(void)
 
 	/* check for realistic arguments */
 	if (opt.nprocs <= 0) {
-		error("%s: invalid number of processes (-n %d)",
-		      opt.progname, opt.nprocs);
+		error("invalid number of processes (-n %d)", opt.nprocs);
 		verified = false;
 	}
 
 	if (opt.cpus_per_task <= 0) {
-		error("%s: invalid number of cpus per task (-c %d)\n",
-		      opt.progname, opt.cpus_per_task);
+		error("invalid number of cpus per task (-c %d)\n",
+		      opt.cpus_per_task);
 		verified = false;
 	}
 
 	if ((opt.min_nodes < 0) || (opt.max_nodes < 0) || 
 	    (opt.max_nodes && (opt.min_nodes > opt.max_nodes))) {
-		error("%s: invalid number of nodes (-N %d-%d)\n",
-		      opt.progname, opt.min_nodes, opt.max_nodes);
+		error("invalid number of nodes (-N %d-%d)\n",
+		      opt.min_nodes, opt.max_nodes);
+		verified = false;
+	}
+
+#ifdef HAVE_BGL
+	if (opt.blrtsimage && strchr(opt.blrtsimage, ' ')) {
+		error("invalid BlrtsImage given '%s'", opt.blrtsimage);
+		verified = false;
+	}
+#endif
+
+	if (opt.linuximage && strchr(opt.linuximage, ' ')) {
+#ifdef HAVE_BGL
+		error("invalid LinuxImage given '%s'", opt.linuximage);
+#else
+		error("invalid CnloadImage given '%s'", opt.linuximage);
+#endif
+		verified = false;
+	}
+
+	if (opt.mloaderimage && strchr(opt.mloaderimage, ' ')) {
+		error("invalid MloaderImage given '%s'", opt.mloaderimage);
+		verified = false;
+	}
+
+	if (opt.ramdiskimage && strchr(opt.ramdiskimage, ' ')) {
+#ifdef HAVE_BGL
+		error("invalid RamDiskImage given '%s'", opt.ramdiskimage);
+#else
+		error("invalid IoloadImage given '%s'", opt.ramdiskimage);
+#endif
 		verified = false;
 	}
 
@@ -1810,6 +1952,23 @@ static bool _opt_verify(void)
 		}
 	}
 
+	_set_distribution(opt.distribution, &dist, &lllp_dist);
+	if(dist) 
+		if (setenvf(NULL, "SLURM_DISTRIBUTION", "%s", dist)) {
+			error("Can't set SLURM_DISTRIBUTION env variable");
+		}
+		
+	if(opt.distribution == SLURM_DIST_PLANE)
+		if (setenvf(NULL, "SLURM_DIST_PLANESIZE", "%d", 
+			    opt.plane_size)) {
+			error("Can't set SLURM_DIST_PLANESIZE env variable");
+		}
+
+	if(lllp_dist)
+		if (setenvf(NULL, "SLURM_DIST_LLLP", "%s", lllp_dist)) {
+			error("Can't set SLURM_DIST_LLLP env variable");
+		}
+
 	/* bound max_threads/cores from ntasks_cores/sockets */ 
 	if ((opt.max_threads_per_core <= 0) &&
 	    (opt.ntasks_per_core > 0)) {
@@ -1884,6 +2043,14 @@ static bool _opt_verify(void)
 			opt.time_limit = INFINITE;
 	}
 
+	if (opt.ckpt_interval_str) {
+		opt.ckpt_interval = time_str2mins(opt.ckpt_interval_str);
+		if ((opt.ckpt_interval < 0) && (opt.ckpt_interval != INFINITE)) {
+			error("Invalid checkpoint interval specification");
+			exit(1);
+		}
+	}
+
 	if ((opt.euid != (uid_t) -1) && (opt.euid != opt.uid)) 
 		opt.uid = opt.euid;
 
@@ -1907,8 +2074,6 @@ static bool _opt_verify(void)
 		else
 			setenvf(NULL, "SLURM_OPEN_MODE", "t");
 	}
-	if (opt.cpus_per_task > 1)
-		setenvfs("SLURM_CPUS_PER_TASK=%d", opt.cpus_per_task); 
 	if (opt.dependency)
 		setenvfs("SLURM_JOB_DEPENDENCY=%s", opt.dependency);
 
@@ -1921,6 +2086,30 @@ static bool _opt_verify(void)
 	setenv("SLURM_NETWORK", opt.network, 1);
 #endif
 
+	if (slurm_verify_cpu_bind(NULL, &opt.cpu_bind,
+				  &opt.cpu_bind_type))
+		exit(1);
+	if (opt.cpu_bind_type && (getenv("SBATCH_CPU_BIND") == NULL)) {
+		char tmp[64];
+		slurm_sprint_cpu_bind_type(tmp, opt.cpu_bind_type);
+		if (opt.cpu_bind) {
+			setenvf(NULL, "SBATCH_CPU_BIND", "%s:%s", 
+				tmp, opt.cpu_bind);
+		} else {
+			setenvf(NULL, "SBATCH_CPU_BIND", "%s", tmp);
+		}
+	}
+	if (opt.mem_bind_type && (getenv("SBATCH_MEM_BIND") == NULL)) {
+		char tmp[64];
+		slurm_sprint_mem_bind_type(tmp, opt.mem_bind_type);
+		if (opt.mem_bind) {
+			setenvf(NULL, "SBATCH_MEM_BIND", "%s:%s", 
+				tmp, opt.mem_bind);
+		} else {
+			setenvf(NULL, "SBATCH_MEM_BIND", "%s", tmp);
+		}
+	}
+
 	return verified;
 }
 
@@ -2048,82 +2237,84 @@ static void _opt_list()
 	char *str;
 
 	info("defined options for program `%s'", opt.progname);
-	info("--------------- ---------------------");
+	info("----------------- ---------------------");
 
-	info("user           : `%s'", opt.user);
-	info("uid            : %ld", (long) opt.uid);
-	info("gid            : %ld", (long) opt.gid);
-	info("cwd            : %s", opt.cwd);
-	info("nprocs         : %d %s", opt.nprocs,
+	info("user              : `%s'", opt.user);
+	info("uid               : %ld", (long) opt.uid);
+	info("gid               : %ld", (long) opt.gid);
+	info("cwd               : %s", opt.cwd);
+	info("nprocs            : %d %s", opt.nprocs,
 		opt.nprocs_set ? "(set)" : "(default)");
-	info("cpus_per_task  : %d %s", opt.cpus_per_task,
+	info("cpus_per_task     : %d %s", opt.cpus_per_task,
 		opt.cpus_set ? "(set)" : "(default)");
 	if (opt.max_nodes)
-		info("nodes          : %d-%d", opt.min_nodes, opt.max_nodes);
+		info("nodes             : %d-%d", opt.min_nodes, opt.max_nodes);
 	else {
-		info("nodes          : %d %s", opt.min_nodes,
+		info("nodes             : %d %s", opt.min_nodes,
 			opt.nodes_set ? "(set)" : "(default)");
 	}
-	info("jobid          : %u %s", opt.jobid, 
+	info("jobid             : %u %s", opt.jobid, 
 		opt.jobid_set ? "(set)" : "(default)");
-	info("partition      : %s",
+	info("partition         : %s",
 		opt.partition == NULL ? "default" : opt.partition);
-	info("job name       : `%s'", opt.job_name);
-	info("wckey          : `%s'", opt.wckey);
-	info("distribution   : %s", format_task_dist_states(opt.distribution));
+	info("job name          : `%s'", opt.job_name);
+	info("reservation       : `%s'", opt.reservation);
+	info("wckey             : `%s'", opt.wckey);
+	info("distribution      : %s",
+	     format_task_dist_states(opt.distribution));
 	if(opt.distribution == SLURM_DIST_PLANE)
-		info("plane size   : %u", opt.plane_size);
-	info("verbose        : %d", opt.verbose);
-	info("immediate      : %s", tf_(opt.immediate));
+		info("plane size        : %u", opt.plane_size);
+	info("verbose           : %d", opt.verbose);
+	info("immediate         : %s", tf_(opt.immediate));
 	if (opt.requeue != NO_VAL)
-		info("requeue        : %u", opt.requeue);
-	info("overcommit     : %s", tf_(opt.overcommit));
+		info("requeue           : %u", opt.requeue);
+	info("overcommit        : %s", tf_(opt.overcommit));
 	if (opt.time_limit == INFINITE)
-		info("time_limit     : INFINITE");
+		info("time_limit        : INFINITE");
 	else if (opt.time_limit != NO_VAL)
-		info("time_limit     : %d", opt.time_limit);
+		info("time_limit        : %d", opt.time_limit);
 	if (opt.nice)
-		info("nice           : %d", opt.nice);
-	info("account        : %s", opt.account);
-	info("comment        : %s", opt.comment);
-	info("dependency     : %s", opt.dependency);
+		info("nice              : %d", opt.nice);
+	info("account           : %s", opt.account);
+	info("comment           : %s", opt.comment);
+	info("dependency        : %s", opt.dependency);
 	str = print_constraints();
-	info("constraints    : %s", str);
+	info("constraints       : %s", str);
 	xfree(str);
 	if (opt.conn_type != (uint16_t) NO_VAL)
-		info("conn_type      : %u", opt.conn_type);
+		info("conn_type         : %u", opt.conn_type);
 	str = print_geometry(opt.geometry);
-	info("geometry       : %s", str);
+	info("geometry          : %s", str);
 	xfree(str);
-	info("reboot         : %s", opt.reboot ? "no" : "yes");
-	info("rotate         : %s", opt.no_rotate ? "yes" : "no");
-	info("network        : %s", opt.network);
+	info("reboot            : %s", opt.reboot ? "no" : "yes");
+	info("rotate            : %s", opt.no_rotate ? "yes" : "no");
+	info("network           : %s", opt.network);
 
 #ifdef HAVE_BGL
 	if (opt.blrtsimage)
-		info("BlrtsImage     : %s", opt.blrtsimage);
+		info("BlrtsImage        : %s", opt.blrtsimage);
 #endif
 	if (opt.linuximage)
 #ifdef HAVE_BGL
-		info("LinuxImage     : %s", opt.linuximage);
+		info("LinuxImage        : %s", opt.linuximage);
 #else
-		info("CnloadImage    : %s", opt.linuximage);
+		info("CnloadImage       : %s", opt.linuximage);
 #endif
 	if (opt.mloaderimage)
-		info("MloaderImage   : %s", opt.mloaderimage);
+		info("MloaderImage      : %s", opt.mloaderimage);
 	if (opt.ramdiskimage)
 #ifdef HAVE_BGL
-		info("RamDiskImage   : %s", opt.ramdiskimage);
+		info("RamDiskImage      : %s", opt.ramdiskimage);
 #else
-		info("IoloadImage   : %s", opt.ramdiskimage);
+		info("IoloadImage       : %s", opt.ramdiskimage);
 #endif
 	if (opt.begin) {
 		char time_str[32];
 		slurm_make_time_str(&opt.begin, time_str, sizeof(time_str));
-		info("begin          : %s", time_str);
+		info("begin             : %s", time_str);
 	}
-	info("mail_type      : %s", print_mail_type(opt.mail_type));
-	info("mail_user      : %s", opt.mail_user);
+	info("mail_type         : %s", print_mail_type(opt.mail_type));
+	info("mail_user         : %s", opt.mail_user);
 	info("sockets-per-node  : %d - %d", opt.min_sockets_per_node,
 					    opt.max_sockets_per_node);
 	info("cores-per-socket  : %d - %d", opt.min_cores_per_socket,
@@ -2133,11 +2324,15 @@ static void _opt_list()
 	info("ntasks-per-node   : %d", opt.ntasks_per_node);
 	info("ntasks-per-socket : %d", opt.ntasks_per_socket);
 	info("ntasks-per-core   : %d", opt.ntasks_per_core);
+	info("cpu_bind          : %s", 
+	     opt.cpu_bind == NULL ? "default" : opt.cpu_bind);
+	info("mem_bind          : %s",
+	     opt.mem_bind == NULL ? "default" : opt.mem_bind);
 	info("plane_size        : %u", opt.plane_size);
-	info("propagate      : %s",
+	info("propagate         : %s",
 	     opt.propagate == NULL ? "NONE" : opt.propagate);
 	str = print_commandline(opt.script_argc, opt.script_argv);
-	info("remote command : `%s'", str);
+	info("remote command    : `%s'", str);
 	xfree(str);
 
 }
@@ -2168,6 +2363,7 @@ static void _usage(void)
 "              [--requeue] [--no-requeue] [--ntasks-per-node=n] [--propagate]\n"
 "              [--nodefile=file] [--nodelist=hosts] [--exclude=hosts]\n"
 "              [--network=type] [--mem-per-cpu=MB]\n"
+"              [--cpu_bind=...] [--mem_bind=...] [--reservation=name]\n"
 "              executable [args...]\n");
 }
 
@@ -2179,53 +2375,54 @@ static void _help(void)
 "Usage: sbatch [OPTIONS...] executable [args...]\n"
 "\n"
 "Parallel run options:\n"
-"  -n, --ntasks=ntasks         number of tasks to run\n"
-"  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
+"      --begin=time            defer job until HH:MM DD/MM/YY\n"
 "  -c, --cpus-per-task=ncpus   number of cpus required per task\n"
-"      --ntasks-per-node=n     number of tasks to invoke on each node\n"
-"  -i, --input=in              file for batch script's standard input\n"
-"  -o, --output=out            file for batch script's standard output\n"
+"      --comment=name          arbitrary comment\n"
+"  -D, --workdir=directory     set working directory for batch script\n"
 "  -e, --error=err             file for batch script's standard error\n"
-"  -p, --partition=partition   partition requested\n"
+"      --get-user-env          used by Moab.  See srun man page.\n"
+"      --gid=group_id          group ID to run job as (user root only)\n"
 "  -H, --hold                  submit job in held state\n"
-"  -t, --time=minutes          time limit\n"
-"  -D, --chdir=path            change remote current working directory\n"
+"  -i, --input=in              file for batch script's standard input\n"
 "  -I, --immediate             exit if resources are not immediately available\n"
-"  -k, --no-kill               do not kill job on node failure\n"
-"  -s, --share                 share nodes with other jobs\n"
+"      --jobid=id              run under already allocated job\n"
 "  -J, --job-name=jobname      name of job\n"
+"  -k, --no-kill               do not kill job on node failure\n"
+"  -L, --licenses=names        required license, comma separated\n"
 "  -m, --distribution=type     distribution method for processes to nodes\n"
 "                              (type = block|cyclic|arbitrary)\n"
-"      --jobid=id              run under already allocated job\n"
-"  -v, --verbose               verbose mode (multiple -v's increase verbosity)\n"
-"  -q, --quiet                 quiet mode (suppress informational messages)\n"
-"  -P, --dependency=type:jobid defer job until condition on jobid is satisfied\n"
-"  -D, --workdir=directory     set working directory for batch script\n"
+"      --mail-type=type        notify on state change: BEGIN, END, FAIL or ALL\n"
+"      --mail-user=user        who to send email notification for job state\n"
+"                              changes\n"
+"  -n, --ntasks=ntasks         number of tasks to run\n"
 "      --nice[=value]          decrease secheduling priority by value\n"
+"      --no-requeue            if set, do not permit the job to be requeued\n"
+"      --ntasks-per-node=n     number of tasks to invoke on each node\n"
+"  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
+"  -o, --output=out            file for batch script's standard output\n"
 "  -O, --overcommit            overcommit resources\n"
+"  -p, --partition=partition   partition requested\n"
+"      --propagate[=rlimits]   propagate all [or specific list of] rlimits\n"
+"  -P, --dependency=type:jobid defer job until condition on jobid is satisfied\n"
+"  -Q, --quiet                 quiet mode (suppress informational messages)\n"
+"      --requeue               if set, permit the job to be requeued\n"
+"  -t, --time=minutes          time limit\n"
+"  -s, --share                 share nodes with other jobs\n"
 "  -U, --account=name          charge job to specified account\n"
-"      --begin=time            defer job until HH:MM DD/MM/YY\n"
-"      --comment=name          arbitrary comment\n"
-"  -L, --licenses=names        required license, comma separated\n"
-"      --mail-type=type        notify on state change: BEGIN, END, FAIL or ALL\n"
-"      --mail-user=user        who to send email notification for job state changes\n"
-"      --gid=group_id          group ID to run job as (user root only)\n"
 "      --uid=user_id           user ID to run job as (user root only)\n"
-"      --get-user-env          used by Moab.  See srun man page.\n"
-"      --no-requeue            if set, do not permit the job to be requeued\n"
-"      --requeue               if set, permit the job to be requeued\n"
-"      --propagate[=rlimits]   propagate all [or specific list of] rlimits\n"
+"  -v, --verbose               verbose mode (multiple -v's increase verbosity)\n"
 "\n"
 "Constraint options:\n"
+"  -C, --constraint=list       specify a list of constraints\n"
+"  -F, --nodefile=filename     request a specific list of hosts\n"
+"      --mem=MB                minimum amount of real memory\n"
 "      --mincpus=n             minimum number of cpus per node\n"
 "      --minsockets=n          minimum number of sockets per node\n"
 "      --mincores=n            minimum number of cores per cpu\n"
 "      --minthreads=n          minimum number of threads per core\n"
-"      --mem=MB                minimum amount of real memory\n"
+"      --reservation=name      allocate resources from named reservation\n"
 "      --tmp=MB                minimum amount of temporary disk\n"
 "      --contiguous            demand a contiguous range of nodes\n"
-"  -C, --constraint=list       specify a list of constraints\n"
-"  -F, --nodefile=filename     request a specific list of hosts\n"
 "  -w, --nodelist=hosts...     request a specific list of hosts\n"
 "  -x, --exclude=hosts...      exclude a specific list of hosts\n"
 "\n"
@@ -2236,25 +2433,31 @@ static void _help(void)
 "                              allocated to the job.\n" 
 "\n"
 "Affinity/Multi-core options: (when the task/affinity plugin is enabled)\n" 
-"  -B --extra-node-info=S[:C[:T]]            Expands to:\n"
-"      --sockets-per-node=S    number of sockets per node to allocate\n"
-"      --cores-per-socket=C    number of cores per socket to allocate\n"
-"      --threads-per-core=T    number of threads per core to allocate\n"
+"  -B  --extra-node-info=S[:C[:T]]            Expands to:\n"
+"       --sockets-per-node=S   number of sockets per node to allocate\n"
+"       --cores-per-socket=C   number of cores per socket to allocate\n"
+"       --threads-per-core=T   number of threads per core to allocate\n"
 "                              each field can be 'min[-max]' or wildcard '*'\n"
 "                              total cpus requested = (N x S x C x T)\n"
 "\n"
-"      --ntasks-per-socket=n   number of tasks to invoke on each socket\n"
-"      --ntasks-per-core=n     number of tasks to invoke on each core\n");
+"      --ntasks-per-core=n     number of tasks to invoke on each core\n"
+"      --ntasks-per-socket=n   number of tasks to invoke on each socket\n");
 	conf = slurm_conf_lock();
 	if (conf->task_plugin != NULL
 	    && strcasecmp(conf->task_plugin, "task/affinity") == 0) {
 		printf(
+"      --cpu_bind=             Bind tasks to CPUs\n"
+"                              (see \"--cpu_bind=help\" for options)\n"
 "      --hint=                 Bind tasks according to application hints\n"
-"                              (see \"--hint=help\" for options)\n");
+"                              (see \"--hint=help\" for options)\n"
+"      --mem_bind=             Bind memory to locality domains (ldom)\n"
+"                              (see \"--mem_bind=help\" for options)\n");
 	}
 	slurm_conf_unlock();
 
-        printf("\n"
+	spank_print_options (stdout, 6, 30);
+
+	printf(
 #ifdef HAVE_AIX				/* AIX/Federation specific options */
 "AIX related options:\n"
 "      --network=type          communication protocol to be used\n"
@@ -2276,10 +2479,14 @@ static void _help(void)
 "      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
 "      --ioload-image=path     path to ioload image for bluegene block.  Default if not set\n"
 #else
-"      --blrts-image=path      path to blrts image for bluegene block.  Default if not set\n"
-"      --linux-image=path      path to linux image for bluegene block.  Default if not set\n"
-"      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
-"      --ramdisk-image=path    path to ramdisk image for bluegene block.  Default if not set\n"
+"      --blrts-image=path      path to blrts image for bluegene block.  Default\n"
+"                              if not set\n"
+"      --linux-image=path      path to linux image for bluegene block.  Default\n"
+"                              if not set\n"
+"      --mloader-image=path    path to mloader image for bluegene block.\n"
+"                              Default if not set\n"
+"      --ramdisk-image=path    path to ramdisk image for bluegene block.\n"
+"                              Default if not set\n"
 #endif
 #endif
 "\n"
diff --git a/src/sbatch/opt.h b/src/sbatch/opt.h
index 88ee97d5e0f665c77e764c31d5a6b62ac5d2a628..97aab2d64e49244ea4834a29160dd3d77fb24a73 100644
--- a/src/sbatch/opt.h
+++ b/src/sbatch/opt.h
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  opt.h - definitions for srun option processing
- *  $Id: opt.h 15808 2008-12-02 23:38:47Z da $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>,
  *    Christopher J. Morrone <morrone2@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -74,6 +75,9 @@ typedef struct sbatch_options {
 	int ntasks_per_socket; /* --ntasks-per-socket=n     */
 	int ntasks_per_core;   /* --ntasks-per-core=n	    */
 	cpu_bind_type_t cpu_bind_type; /* --cpu_bind=           */
+	char *cpu_bind;		/* binding map for map/mask_cpu */
+	mem_bind_type_t mem_bind_type; /* --mem_bind=		*/
+	char *mem_bind;		/* binding map for map/mask_mem	*/
 	bool extra_set;		/* true if extra node info explicitly set */
 	int  time_limit;	/* --time,   -t	(int minutes)	*/
 	char *time_limit_str;	/* --time,   -t (string)	*/
@@ -141,6 +145,10 @@ typedef struct sbatch_options {
 	int get_user_env_time;	/* --get-user-env[=timeout]	*/
 	int get_user_env_mode;	/* --get-user-env=[S|L]         */
 	char *wckey;            /* --wckey workload characterization key */
+	char *reservation;      /* --reservation */
+ 	int ckpt_interval;	/* --checkpoint (int minutes)   */
+ 	char *ckpt_interval_str;/* --checkpoint (string)        */
+ 	char *ckpt_dir;		/* --checkpoint-dir (string)    */
 } opt_t;
 
 extern opt_t opt;
diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c
index 6fa685f7ede7cd20036dd600632d6cf5b284b65d..752a511798c81eb2fc4fb16593d3d1338540bf14 100644
--- a/src/sbatch/sbatch.c
+++ b/src/sbatch/sbatch.c
@@ -1,16 +1,15 @@
 /*****************************************************************************\
- *  sbatch.c - Submit a SLURM batch script.
- *
- *  $Id: sbatch.c 15808 2008-12-02 23:38:47Z da $
+ *  sbatch.c - Submit a SLURM batch script.$
  *****************************************************************************
  *  Copyright (C) 2006-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -37,6 +36,7 @@
 #include <unistd.h>
 #include <sys/types.h>
 #include <sys/stat.h>
+#include <sys/param.h>               /* MAXPATHLEN */
 #include <fcntl.h>
 
 #include <slurm/slurm.h>
@@ -46,17 +46,19 @@
 #include "src/common/slurm_rlimits_info.h"
 #include "src/common/xstring.h"
 #include "src/common/xmalloc.h"
+#include "src/common/plugstack.h"
 
 #include "src/sbatch/opt.h"
 
-#define MAX_RETRIES 3
+#define MAX_RETRIES 15
 
 static int   fill_job_desc_from_opts(job_desc_msg_t *desc);
 static void *get_script_buffer(const char *filename, int *size);
-static void  set_prio_process_env(void);
-static int   set_umask_env(void);
 static char *script_wrap(char *command_string);
-static int  _set_rlimit_env(void);
+static void  _set_prio_process_env(void);
+static int   _set_rlimit_env(void);
+static void  _set_submit_dir_env(void);
+static int   _set_umask_env(void);
 
 int main(int argc, char *argv[])
 {
@@ -69,6 +71,15 @@ int main(int argc, char *argv[])
 	int retries = 0;
 
 	log_init(xbasename(argv[0]), logopt, 0, NULL);
+
+	if (spank_init_allocator() < 0)
+		fatal("Failed to intialize plugin stack");
+
+	/* Be sure to call spank_fini when sbatch exits
+	 */
+	if (atexit((void (*) (void)) spank_fini) < 0)
+		error("Failed to register atexit handler for plugins: %m");
+
 	script_name = process_options_first_pass(argc, argv);
 	/* reinit log with new verbosity (if changed by command line) */
 	if (opt.verbose || opt.quiet) {
@@ -91,6 +102,9 @@ int main(int argc, char *argv[])
 		fatal("sbatch parameter parsing");
 	}
 
+	if (spank_init_post_opt() < 0)
+		fatal("Plugin stack post-option processing failed");
+
 	if (opt.get_user_env_time < 0) {
 		/* Moab does not propage the user's resource limits, so 
 		 * slurmd determines the values at the same time that it 
@@ -98,8 +112,9 @@ int main(int argc, char *argv[])
 		(void) _set_rlimit_env();
 	}
 
-	set_prio_process_env();
-	set_umask_env();
+	_set_prio_process_env();
+	_set_submit_dir_env();
+	_set_umask_env();
 	slurm_init_job_desc_msg(&desc);
 	if (fill_job_desc_from_opts(&desc) == -1) {
 		exit(2);
@@ -108,16 +123,27 @@ int main(int argc, char *argv[])
 	desc.script = (char *)script_body;
 
 	while (slurm_submit_batch_job(&desc, &resp) < 0) {
-		static char *msg = "Slurm job queue full, sleeping and retrying.";
-
-		if ((errno != ESLURM_ERROR_ON_DESC_TO_RECORD_COPY) ||
-		    (retries >= MAX_RETRIES)) {
+		static char *msg;
+		
+		if (errno == ESLURM_ERROR_ON_DESC_TO_RECORD_COPY)
+			msg = "Slurm job queue full, sleeping and retrying.";
+		else if (errno == ESLURM_NODES_BUSY) {
+			msg = "Job step creation temporarily disabled, "
+			      "retrying";
+		} else if (errno == EAGAIN) {
+			msg = "Slurm temporarily unable to accept job, "
+			      "sleeping and retrying.";
+		} else
+			msg = NULL;
+		if ((msg == NULL) || (retries >= MAX_RETRIES)) {
 			error("Batch job submission failed: %m");
 			exit(3);
 		}
 
 		if (retries)
 			debug(msg);
+		else if (errno == ESLURM_NODES_BUSY)
+			info(msg);	/* Not an error, powering up nodes */
 		else
 			error(msg);
 		sleep (++retries);
@@ -142,9 +168,8 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc)
 		desc->name = xstrdup(opt.job_name);
 	else
 		desc->name = xstrdup("sbatch");
-
-	if(opt.wckey)
- 		xstrfmtcat(desc->name, "\"%s", opt.wckey);
+	desc->reservation  = xstrdup(opt.reservation);
+	desc->wckey  = xstrdup(opt.wckey);
 
 	desc->req_nodes = opt.nodelist;
 	desc->exc_nodes = opt.exc_nodes;
@@ -161,9 +186,19 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc)
 	desc->group_id = opt.gid;
 	if (opt.dependency)
 		desc->dependency = xstrdup(opt.dependency);
-	desc->task_dist  = opt.distribution;
+
+	if (opt.cpu_bind)
+		desc->cpu_bind       = opt.cpu_bind;
+	if (opt.cpu_bind_type)
+		desc->cpu_bind_type  = opt.cpu_bind_type;
+	if (opt.mem_bind)
+		desc->mem_bind       = opt.mem_bind;
+	if (opt.mem_bind_type)
+		desc->mem_bind_type  = opt.mem_bind_type;
 	if (opt.plane_size != NO_VAL)
-		desc->plane_size = opt.plane_size;
+		desc->plane_size     = opt.plane_size;
+	desc->task_dist  = opt.distribution;
+
 	desc->network = opt.network;
 	if (opt.nice)
 		desc->nice = NICE_OFFSET + opt.nice;
@@ -271,11 +306,31 @@ static int fill_job_desc_from_opts(job_desc_msg_t *desc)
 	if (opt.acctg_freq >= 0)
 		desc->acctg_freq = opt.acctg_freq;
 
+	desc->ckpt_dir = opt.ckpt_dir;
+	desc->ckpt_interval = (uint16_t)opt.ckpt_interval;
 	return 0;
 }
 
+/* Set SLURM_SUBMIT_DIR environment variable with current state */
+static void _set_submit_dir_env(void)
+{
+	char buf[MAXPATHLEN + 1];
+
+	if (getenv("SLURM_SUBMIT_DIR"))	/* use this value */
+		return;
+
+	if ((getcwd(buf, MAXPATHLEN)) == NULL)
+		fatal("getcwd failed: %m");
+
+	if (setenvf(NULL, "SLURM_SUBMIT_DIR", "%s", buf) < 0) {
+		error ("unable to set SLURM_SUBMIT_DIR in environment");
+		return;
+	}
+	debug ("propagating SUBMIT_DIR=%s", buf);
+}
+
 /* Set SLURM_UMASK environment variable with current state */
-static int set_umask_env(void)
+static int _set_umask_env(void)
 {
 	char mask_char[5];
 	mode_t mask;
@@ -297,13 +352,13 @@ static int set_umask_env(void)
 }
 
 /*
- * set_prio_process_env
+ * _set_prio_process_env
  *
  * Set the internal SLURM_PRIO_PROCESS environment variable to support
  * the propagation of the users nice value and the "PropagatePrioProcess"
  * config keyword.
  */
-static void  set_prio_process_env(void)
+static void  _set_prio_process_env(void)
 {
 	int retval;
 
diff --git a/src/sbcast/Makefile.am b/src/sbcast/Makefile.am
index bedbd116a9d51aeb4ec6b4724dfa0fc3d8f44701..48336a59641c7089df5798e5269eff1658570813 100644
--- a/src/sbcast/Makefile.am
+++ b/src/sbcast/Makefile.am
@@ -6,7 +6,7 @@ AUTOMAKE_OPTIONS = foreign
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 bin_PROGRAMS = sbcast
 
-sbcast_LDADD = 	$(top_builddir)/src/api/libslurm.o -ldl
+sbcast_LDADD = 	$(top_builddir)/src/api/libslurm.o -ldl -lm
 
 noinst_HEADERS = sbcast.h
 sbcast_SOURCES = agent.c sbcast.c opts.c
diff --git a/src/sbcast/Makefile.in b/src/sbcast/Makefile.in
index 1c29a252e7f558f324a4c5fe67e82020b69eca81..457e25347814a0b42d0f27268cf1a5fb124231b3 100644
--- a/src/sbcast/Makefile.in
+++ b/src/sbcast/Makefile.in
@@ -47,14 +47,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -108,6 +112,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -269,7 +277,7 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
-sbcast_LDADD = $(top_builddir)/src/api/libslurm.o -ldl
+sbcast_LDADD = $(top_builddir)/src/api/libslurm.o -ldl -lm
 noinst_HEADERS = sbcast.h
 sbcast_SOURCES = agent.c sbcast.c opts.c
 sbcast_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
diff --git a/src/sbcast/agent.c b/src/sbcast/agent.c
index 1610132edd0611625280e3f28ce2bfc173eda253..296794b0262558e4b5401d1de4aa1a997b04a5ef 100644
--- a/src/sbcast/agent.c
+++ b/src/sbcast/agent.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sbcast/opts.c b/src/sbcast/opts.c
index d0b4525848fad49e4a06fbf04a782e87f3bc172f..083bd00d6b1f74a4f3b49b888d983e5fc69cfeee 100644
--- a/src/sbcast/opts.c
+++ b/src/sbcast/opts.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sbcast/sbcast.c b/src/sbcast/sbcast.c
index 07f6e6ac24705a3151951d1d963c0a3d0d8735cf..c1e30ba098f318de8a4e253e1e28afc5635cb6ef 100644
--- a/src/sbcast/sbcast.c
+++ b/src/sbcast/sbcast.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -116,7 +117,7 @@ static void _get_job_info(void)
 	char *jobid_str;
 	uint32_t jobid;
 
-	jobid_str = getenv("SLURM_JOBID");
+	jobid_str = getenv("SLURM_JOB_ID");
 	if (!jobid_str) {
 		error("Command only valid from within SLURM job");
 		exit(1);
diff --git a/src/sbcast/sbcast.h b/src/sbcast/sbcast.h
index 3560c13f4742d25b5a977e56a1b9ca5074826edd..6e853df2bf4bd37166c86e48e55e16ed3574ba88 100644
--- a/src/sbcast/sbcast.h
+++ b/src/sbcast/sbcast.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/scancel/Makefile.in b/src/scancel/Makefile.in
index c35da3bf9100575035ab6518d8931702219c44de..a513700d652dafb875779df5481278d1fe88fbf1 100644
--- a/src/scancel/Makefile.in
+++ b/src/scancel/Makefile.in
@@ -46,14 +46,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -107,6 +111,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/scancel/opt.c b/src/scancel/opt.c
index fec38adbbc6ecf6b49f60a70b2ce3ae194335f99..68016a141e4ee0478f36d363bbd41c2a691c92a6 100644
--- a/src/scancel/opt.c
+++ b/src/scancel/opt.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  opt.c - options processing for scancel
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -71,6 +73,7 @@
 #define OPT_LONG_HELP  0x100
 #define OPT_LONG_USAGE 0x101
 #define OPT_LONG_CTLD  0x102
+#define OPT_LONG_WCKEY 0x103
 
 #define SIZE(a) (sizeof(a)/sizeof(a[0]))
 
@@ -229,6 +232,8 @@ static void _opt_default()
 	opt.user_name	= NULL;
 	opt.user_id	= 0;
 	opt.verbose	= 0;
+	opt.wckey	= NULL;
+	opt.nodelist	= NULL;
 }
 
 /*
@@ -302,6 +307,10 @@ static void _opt_env()
 			error ("Unrecognized SCANCEL_VERBOSE value: %s",
 				val);
 	}
+
+	if ( (val=getenv("SCANCEL_WCKEY")) ) {
+		opt.wckey = xstrdup(val);
+	}
 }
 
 /*
@@ -323,12 +332,14 @@ static void _opt_args(int argc, char **argv)
 		{"user",        required_argument, 0, 'u'},
 		{"verbose",     no_argument,       0, 'v'},
 		{"version",     no_argument,       0, 'V'},
+		{"nodelist",    required_argument, 0, 'w'},
+		{"wckey",       required_argument, 0, OPT_LONG_WCKEY},
 		{"help",        no_argument,       0, OPT_LONG_HELP},
 		{"usage",       no_argument,       0, OPT_LONG_USAGE},
 		{NULL,          0,                 0, 0}
 	};
 
-	while((opt_char = getopt_long(argc, argv, "bin:p:qs:t:u:vV",
+	while((opt_char = getopt_long(argc, argv, "bin:p:qs:t:u:vVw:",
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
 			case (int)'?':
@@ -369,6 +380,12 @@ static void _opt_args(int argc, char **argv)
 			case (int)'V':
 				_print_version();
 				exit(0);
+			case (int)'w':
+				opt.nodelist = xstrdup(optarg);
+				break;
+			case OPT_LONG_WCKEY:
+				opt.wckey = xstrdup(optarg);
+				break;
 			case OPT_LONG_HELP:
 				_help();
 				exit(0);
@@ -454,7 +471,9 @@ _opt_verify(void)
 	    (opt.partition == NULL) &&
 	    (opt.state == JOB_END) &&
 	    (opt.user_name == NULL) &&
-	    (opt.job_cnt == 0)) {
+	    (opt.wckey == NULL) &&
+	    (opt.job_cnt == 0) &&
+	    (opt.nodelist == NULL)) {
 		error("No job identification provided");
 		verified = false;	/* no job specification */
 	}
@@ -478,6 +497,8 @@ static void _opt_list(void)
 	info("user_id        : %u", opt.user_id);
 	info("user_name      : %s", opt.user_name);
 	info("verbose        : %d", opt.verbose);
+	info("wckey          : %s", opt.wckey);
+	info("nodelist       : %s", opt.nodelist);
 
 	for (i=0; i<opt.job_cnt; i++) {
 		info("job_steps      : %u.%u ", opt.job_id[i], opt.step_id[i]);
@@ -488,7 +509,7 @@ static void _usage(void)
 {
 	printf("Usage: scancel [-n job_name] [-u user] [-p partition] [-q] [-s name | integer]\n");
 	printf("               [--batch] [-t PENDING | RUNNING | SUSPENDED] [--usage] [-v] [-V]\n");
-	printf("               [job_id[.step_id]]\n");
+	printf("               [-w hosts...] [job_id[.step_id]]\n");
 }
 
 static void _help(void)
@@ -501,12 +522,13 @@ static void _help(void)
 	printf("  -p, --partition=partition       name of job's partition\n");
 	printf("  -q, --quiet                     disable warnings\n");
 	printf("  -s, --signal=name | integer     signal to send to job, default is SIGKILL\n");
-	printf("  -t, --state=state               state of the jobs to be signaled\n");
-	printf("                                  valid options are either pending,\n");
-	printf("                                  running, or suspended\n");
-	printf("  -u, --user=user                 name or id of user to have jobs signaled\n");
+	printf("  -t, --state=states              states of jobs to be signaled,\n");
+	printf("                                  default is pending, running, and\n");
+	printf("                                  suspended\n");
+	printf("  -u, --user=user                 name or id of user to have jobs cancelled\n");
 	printf("  -v, --verbose                   verbosity level\n");
 	printf("  -V, --version                   output version information and exit\n");
+	printf("  -w, --nodelist                  cancel jobs using any of these nodes\n");
 	printf("\nHelp options:\n");
 	printf("  --help                          show this help message\n");
 	printf("  --usage                         display brief usage message\n");
diff --git a/src/scancel/scancel.c b/src/scancel/scancel.c
index 080a44742e89b42863cb088a0f3f6434665169c7..31d97e21074fc1aee8808123d707a3629a080720 100644
--- a/src/scancel/scancel.c
+++ b/src/scancel/scancel.c
@@ -2,12 +2,14 @@
  *  scancel - cancel specified job(s) and/or job step(s)
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -45,6 +47,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <signal.h>
+#include <pthread.h>
 
 #if HAVE_INTTYPES_H
 #  include <inttypes.h>
@@ -59,20 +62,34 @@
 #include "src/common/log.h"
 #include "src/common/xstring.h"
 #include "src/common/xmalloc.h"
+#include "src/common/hostlist.h"
 #include "src/scancel/scancel.h"
 
 #define MAX_CANCEL_RETRY 10
+#define MAX_THREADS 20
+
 
 static void _cancel_jobs (void);
-static void _cancel_job_id (uint32_t job_id, uint16_t sig);
-static void _cancel_step_id (uint32_t job_id, uint32_t step_id, 
-			     uint16_t sig);
+static void *_cancel_job_id (void *cancel_info);
+static void *_cancel_step_id (void *cancel_info);
+
 static int  _confirmation (int i, uint32_t step_id);
 static void _filter_job_records (void);
 static void _load_job_records (void);
+static void _verify_job_ids (void);
 
 static job_info_msg_t * job_buffer_ptr = NULL;
 
+typedef struct job_cancel_info {
+	uint32_t job_id;
+	uint32_t step_id;
+	uint16_t sig;
+	int             *num_active_threads;
+	pthread_mutex_t *num_active_threads_lock;
+	pthread_cond_t  *num_active_threads_cond;
+} job_cancel_info_t;
+
+
 int
 main (int argc, char *argv[]) 
 {
@@ -85,12 +102,15 @@ main (int argc, char *argv[])
 		log_alter (log_opts, SYSLOG_FACILITY_DAEMON, NULL);
 	} 
 	
+	_load_job_records();
+	_verify_job_ids();
+
 	if ((opt.interactive) ||
 	    (opt.job_name) ||
 	    (opt.partition) ||
 	    (opt.state != JOB_END) ||
-	    (opt.user_name)) {
-		_load_job_records ();
+	    (opt.user_name) ||
+	    (opt.nodelist)) {
 		_filter_job_records ();
 	}
 	_cancel_jobs ();
@@ -114,6 +134,35 @@ _load_job_records (void)
 }
 
 
+static void
+_verify_job_ids (void)
+{
+	/* If a list of jobs was given, make sure each job is actually in
+         * our list of job records. */
+	int i, j;
+	job_info_t *job_ptr = job_buffer_ptr->job_array;
+
+	for (j = 0; j < opt.job_cnt; j++ ) {
+		for (i = 0; i < job_buffer_ptr->record_count; i++) {
+			if (job_ptr[i].job_id == opt.job_id[j])
+				break;
+		}
+		if (((job_ptr[i].job_state >= JOB_COMPLETE) ||
+		     (i >= job_buffer_ptr->record_count)) &&
+		     (opt.verbose >= 0)) {
+			if (opt.step_id[j] == SLURM_BATCH_SCRIPT)
+				error("Kill job error on job id %u: %s", 
+				      opt.job_id[j], 
+				      slurm_strerror(ESLURM_INVALID_JOB_ID));
+			else
+				error("Kill job error on job step id %u.%u: %s",
+				      opt.job_id[j], opt.step_id[j],
+				      slurm_strerror(ESLURM_INVALID_JOB_ID));
+		}
+	}
+}
+
+
 /* _filter_job_records - filtering job information per user specification */
 static void 
 _filter_job_records (void)
@@ -133,23 +182,16 @@ _filter_job_records (void)
 			continue;
 		}
 
-		if (opt.job_name != NULL) {
-			char *quote = NULL;
-			int set = 0;
-			if ((quote = strchr(job_ptr[i].name, (int) '\"'))) 
-				/* take out the wckey */
-				*quote = '\0';
-			else 
-				quote = job_ptr[i].name;
-			
-			if(strcmp(job_ptr[i].name, opt.job_name) != 0) {
-				job_ptr[i].job_id = 0;
-				if(set)
-					*quote = '\"';	
-				continue;
-			}
-			if(set)
-				*quote = '\"';					
+		if (opt.job_name != NULL &&
+		    (strcmp(job_ptr[i].name, opt.job_name) != 0)) {
+			job_ptr[i].job_id = 0;
+			continue;
+		}
+
+		if (opt.wckey != NULL &&
+		    (strcmp(job_ptr[i].wckey, opt.wckey) != 0)) {
+			job_ptr[i].job_id = 0;
+			continue;
 		}
 
 		if ((opt.partition != NULL) &&
@@ -170,6 +212,28 @@ _filter_job_records (void)
 			continue;
 		}
 
+		if (opt.nodelist != NULL) {
+			/* If nodelist contains a '/', treat it as a file name */
+			if (strchr(opt.nodelist, '/') != NULL) {
+				char *reallist;
+				reallist = slurm_read_hostfile(opt.nodelist,
+							       NO_VAL);
+				if (reallist) {
+					xfree(opt.nodelist);
+					opt.nodelist = reallist;
+				}
+			}
+
+			hostset_t hs = hostset_create(job_ptr[i].nodes);
+			if (!hostset_intersects(hs, opt.nodelist)) {
+				job_ptr[i].job_id = 0;
+				hostset_destroy(hs);
+				continue;
+			} else {
+				hostset_destroy(hs);
+			}
+		}
+
 		if (opt.job_cnt == 0)
 			continue;
 		for (j = 0; j < opt.job_cnt; j++) {
@@ -188,62 +252,138 @@ _filter_job_records (void)
 static void
 _cancel_jobs (void)
 {
-	int i, j;
+	int i, j, err;
 	job_info_t *job_ptr = NULL;
+	pthread_attr_t  attr;
+	job_cancel_info_t *cancel_info;
+	pthread_t  dummy;
+	int num_active_threads = 0;
+	pthread_mutex_t  num_active_threads_lock;
+	pthread_cond_t   num_active_threads_cond;
+
+	slurm_attr_init(&attr);
+	if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
+		error("pthread_attr_setdetachstate error %m");
+
+	slurm_mutex_init(&num_active_threads_lock);
+
+	if (pthread_cond_init(&num_active_threads_cond, NULL))
+		error("pthread_cond_init error %m");
+
+	job_ptr = job_buffer_ptr->job_array ;
+
+	/* Spawn a thread to cancel each job or job step marked for
+	 * cancellation */
+	for (i = 0; i < job_buffer_ptr->record_count; i++) {
+		if (job_ptr[i].job_id == 0) 
+			continue;
 
-	if (opt.job_cnt && opt.interactive) {	/* confirm cancel */
-		job_ptr = job_buffer_ptr->job_array ;
-		for (j = 0; j < opt.job_cnt; j++ ) {
-			for (i = 0; i < job_buffer_ptr->record_count; i++) {
-				if (job_ptr[i].job_id != opt.job_id[j]) 
+		/* If cancelling a list of jobs, see if the current job 
+		 * included a step id */
+		if (opt.job_cnt) {
+			for (j = 0; j < opt.job_cnt; j++ ) {
+				if (job_ptr[i].job_id != opt.job_id[j])
 					continue;
+
 				if (opt.interactive && 
 				    (_confirmation(i, opt.step_id[j]) == 0))
+					continue;
+
+				cancel_info = 
+					(job_cancel_info_t *) 
+					xmalloc(sizeof(job_cancel_info_t));
+				cancel_info->job_id  = job_ptr[i].job_id;
+				cancel_info->sig     = opt.signal;
+				cancel_info->num_active_threads = 
+					&num_active_threads;
+				cancel_info->num_active_threads_lock = 
+					&num_active_threads_lock;
+				cancel_info->num_active_threads_cond = 
+					&num_active_threads_cond;
+
+				pthread_mutex_lock( &num_active_threads_lock );
+				num_active_threads++;
+				while (num_active_threads > MAX_THREADS) {
+					pthread_cond_wait(&num_active_threads_cond,
+							  &num_active_threads_lock);
+				}
+				pthread_mutex_unlock( &num_active_threads_lock );
+
+				if (opt.step_id[j] == SLURM_BATCH_SCRIPT) {
+					err = pthread_create(&dummy, &attr, 
+							     _cancel_job_id,
+							     cancel_info);
+					if (err)
+						_cancel_job_id(cancel_info);
 					break;
-				if (opt.step_id[j] == SLURM_BATCH_SCRIPT)
-					_cancel_job_id (opt.job_id[j], 
-							opt.signal);
-				else
-					_cancel_step_id (opt.job_id[j], 
-					                opt.step_id[j],
-							opt.signal);
-				break;
+				} else {
+					cancel_info->step_id = opt.step_id[j];
+					err = pthread_create(&dummy, &attr, 
+							     _cancel_step_id,
+							     cancel_info);
+					if (err)
+						_cancel_step_id(cancel_info);
+					/* Don't break here.  Keep looping in 
+					 * case other steps from the same job 
+					 * are cancelled. */
+				}
 			}
-			if (i >= job_buffer_ptr->record_count)
-				fprintf (stderr, "Job %u not found\n", 
-				         opt.job_id[j]);
-		}
-
-	} else if (opt.job_cnt) {	/* delete specific jobs */
-		for (j = 0; j < opt.job_cnt; j++ ) {
-			if (opt.step_id[j] == SLURM_BATCH_SCRIPT)
-				_cancel_job_id (opt.job_id[j], 
-						opt.signal);
-			else
-				_cancel_step_id (opt.job_id[j], 
-				                opt.step_id[j], 
-						opt.signal);
-		}
-
-	} else {		/* delete all jobs per filtering */
-		job_ptr = job_buffer_ptr->job_array ;
-		for (i = 0; i < job_buffer_ptr->record_count; i++) {
-			if (job_ptr[i].job_id == 0) 
-				continue;
+		} else {
 			if (opt.interactive && 
 			    (_confirmation(i, SLURM_BATCH_SCRIPT) == 0))
 				continue;
-			_cancel_job_id (job_ptr[i].job_id, opt.signal);
+
+			cancel_info = 
+				(job_cancel_info_t *)
+				xmalloc(sizeof(job_cancel_info_t));
+			cancel_info->job_id  = job_ptr[i].job_id;
+			cancel_info->sig     = opt.signal;
+			cancel_info->num_active_threads = &num_active_threads;
+			cancel_info->num_active_threads_lock = 
+				&num_active_threads_lock;
+			cancel_info->num_active_threads_cond = 
+				&num_active_threads_cond;
+
+			pthread_mutex_lock( &num_active_threads_lock );
+			num_active_threads++;
+			while (num_active_threads > MAX_THREADS) {
+				pthread_cond_wait( &num_active_threads_cond,
+						   &num_active_threads_lock );
+			}
+			pthread_mutex_unlock( &num_active_threads_lock );
+
+			err = pthread_create(&dummy, &attr, 
+					     _cancel_job_id,
+					     cancel_info);
+			if (err)
+				_cancel_job_id(cancel_info);
 		}
 	}
+
+	/* Wait for any spawned threads that have not finished */
+	pthread_mutex_lock( &num_active_threads_lock );
+	while (num_active_threads > 0) {
+		pthread_cond_wait( &num_active_threads_cond,
+				   &num_active_threads_lock );
+	}
+	pthread_mutex_unlock( &num_active_threads_lock );
+
+	slurm_attr_destroy(&attr);
+	slurm_mutex_destroy(&num_active_threads_lock);
+	if (pthread_cond_destroy(&num_active_threads_cond))
+		error("pthread_cond_destroy error %m");
 }
 
-static void
-_cancel_job_id (uint32_t job_id, uint16_t sig)
+static void *
+_cancel_job_id (void *ci)
 {
 	int error_code = SLURM_SUCCESS, i;
 	bool sig_set = true;
 
+	job_cancel_info_t *cancel_info = (job_cancel_info_t *)ci;
+	uint32_t job_id = cancel_info->job_id;
+	uint16_t sig    = cancel_info->sig;
+
 	if (sig == (uint16_t)-1) {
 		sig = SIGKILL;
 		sig_set = false;
@@ -281,12 +421,27 @@ _cancel_job_id (uint32_t job_id, uint16_t sig)
 			error("Kill job error on job id %u: %s", 
 				job_id, slurm_strerror(slurm_get_errno()));
 	}
+
+	/* Purposely free the struct passed in here, so the caller doesn't have
+	 * to keep track of it, but don't destroy the mutex and condition 
+	 * variables contained. */ 
+	pthread_mutex_lock(   cancel_info->num_active_threads_lock );
+	(*(cancel_info->num_active_threads))--;
+	pthread_cond_signal(  cancel_info->num_active_threads_cond );
+	pthread_mutex_unlock( cancel_info->num_active_threads_lock );
+
+	xfree(cancel_info);
+	return NULL;
 }
 
-static void
-_cancel_step_id (uint32_t job_id, uint32_t step_id, uint16_t sig)
+static void *
+_cancel_step_id (void *ci)
 {
 	int error_code = SLURM_SUCCESS, i;
+	job_cancel_info_t *cancel_info = (job_cancel_info_t *)ci;
+	uint32_t job_id  = cancel_info->job_id;
+	uint32_t step_id = cancel_info->step_id;
+	uint16_t sig     = cancel_info->sig;
 
 	if (sig == (uint16_t)-1)
 		sig = SIGKILL;
@@ -320,6 +475,17 @@ _cancel_step_id (uint32_t job_id, uint32_t step_id, uint16_t sig)
 		 		job_id, step_id, 
 				slurm_strerror(slurm_get_errno()));
 	}
+
+	/* Purposely free the struct passed in here, so the caller doesn't have
+	 * to keep track of it, but don't destroy the mutex and condition 
+	 * variables contained. */ 
+	pthread_mutex_lock(   cancel_info->num_active_threads_lock );
+	(*(cancel_info->num_active_threads))--;
+	pthread_cond_signal(  cancel_info->num_active_threads_cond );
+	pthread_mutex_unlock( cancel_info->num_active_threads_lock );
+
+	xfree(cancel_info);
+	return NULL;
 }
 
 /* _confirmation - Confirm job cancel request interactively */
diff --git a/src/scancel/scancel.h b/src/scancel/scancel.h
index 333b78b8543abbeab79bc488dfc249119a508dd2..8e8905ee54d7506d323d7c9b6316a4e624f0bc7e 100644
--- a/src/scancel/scancel.h
+++ b/src/scancel/scancel.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette<jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -57,6 +58,8 @@ typedef struct scancel_options {
 	uint16_t job_cnt;	/* count of job_id's specified	*/
 	uint32_t *job_id;	/* list of job_id's		*/
 	uint32_t *step_id;	/* list of job step id's	*/
+	char *wckey;		/* --wckey			*/
+	char *nodelist;		/* --nodelist, -w		*/
 } opt_t;
 
 opt_t opt;
diff --git a/src/scontrol/Makefile.am b/src/scontrol/Makefile.am
index 31b51399c886401b5ce008d4e8feb827f885418f..7e5907dbb0e2053ee4913c1aa674154f662b8df5 100644
--- a/src/scontrol/Makefile.am
+++ b/src/scontrol/Makefile.am
@@ -7,16 +7,18 @@ INCLUDES = -I$(top_srcdir)
 bin_PROGRAMS = scontrol
 
 scontrol_SOURCES =	\
+	create_res.c	\
 	info_job.c	\
 	info_node.c	\
 	info_part.c	\
+	info_res.c	\
 	scontrol.c	\
 	scontrol.h	\
 	update_job.c	\
 	update_node.c	\
 	update_part.c
 
-convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl
+convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl -lm
 
 
 scontrol_LDADD = \
diff --git a/src/scontrol/Makefile.in b/src/scontrol/Makefile.in
index 644cb23a23d6b885bcf31bc4ae2ce7870290e93c..e958dece92f3baeecb8d2737d32efcc9a944dc95 100644
--- a/src/scontrol/Makefile.in
+++ b/src/scontrol/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -71,9 +75,10 @@ CONFIG_CLEAN_FILES =
 am__installdirs = "$(DESTDIR)$(bindir)"
 binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(bin_PROGRAMS)
-am_scontrol_OBJECTS = info_job.$(OBJEXT) info_node.$(OBJEXT) \
-	info_part.$(OBJEXT) scontrol.$(OBJEXT) update_job.$(OBJEXT) \
-	update_node.$(OBJEXT) update_part.$(OBJEXT)
+am_scontrol_OBJECTS = create_res.$(OBJEXT) info_job.$(OBJEXT) \
+	info_node.$(OBJEXT) info_part.$(OBJEXT) info_res.$(OBJEXT) \
+	scontrol.$(OBJEXT) update_job.$(OBJEXT) update_node.$(OBJEXT) \
+	update_part.$(OBJEXT)
 scontrol_OBJECTS = $(am_scontrol_OBJECTS)
 am__DEPENDENCIES_1 = $(top_builddir)/src/api/libslurm.o
 am__DEPENDENCIES_2 =
@@ -108,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -270,16 +279,18 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 INCLUDES = -I$(top_srcdir)
 scontrol_SOURCES = \
+	create_res.c	\
 	info_job.c	\
 	info_node.c	\
 	info_part.c	\
+	info_res.c	\
 	scontrol.c	\
 	scontrol.h	\
 	update_job.c	\
 	update_node.c	\
 	update_part.c
 
-convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl
+convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl -lm
 scontrol_LDADD = \
 	$(convenience_libs) \
 	$(READLINE_LIBS)
@@ -356,9 +367,11 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/create_res.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_job.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_node.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_part.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_res.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scontrol.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/update_job.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/update_node.Po@am__quote@
diff --git a/src/scontrol/create_res.c b/src/scontrol/create_res.c
new file mode 100644
index 0000000000000000000000000000000000000000..4e1df350f6c29a3a9bf1d3942004077126e597b6
--- /dev/null
+++ b/src/scontrol/create_res.c
@@ -0,0 +1,400 @@
+/*****************************************************************************\
+ *  create_res.c - reservation creation function for scontrol.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by David Bremer <dbremer@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/scontrol/scontrol.h"
+#include "src/slurmctld/reservation.h"
+
+
+/*
+ *  _process_plus_minus is used to convert a string like
+ *       Users+=a,b,c
+ *  to   Users=+a,+b,+c
+ */
+
+static char * _process_plus_minus(char plus_or_minus, char *src)
+{
+	int num_commas = 0;
+	int ii;
+	int srclen = strlen(src);
+	char *dst, *ret;
+
+	for (ii=0; ii<srclen; ii++) {
+		if (src[ii] == ',')
+			num_commas++;
+	}
+	ret = dst = xmalloc(srclen + 2 + num_commas);
+
+	*dst++ = plus_or_minus;
+	for (ii=0; ii<srclen; ii++) {
+		if (*src == ',') {
+			*dst++ = *src++;
+			*dst++ = plus_or_minus;
+		} else {
+			*dst++ = *src++;
+		}
+	}
+	*dst = '\0';
+
+	return ret;
+}
+
+
+/*
+ *  _parse_flags  is used to parse the Flags= option.  It handles
+ *  daily, weekly, and maint, optionally preceded by + or -, 
+ *  separated by a comma but no spaces.
+ */
+static uint32_t _parse_flags(const char *flagstr, const char *msg)
+{
+	int flip;
+	uint32_t outflags = 0;
+	const char *curr = flagstr;
+	int taglen = 0;
+
+	while (*curr != '\0') {
+		flip = 0;
+		if (*curr == '+') {
+			curr++;
+		} else if (*curr == '-') {
+			flip = 1;
+			curr++;
+		}
+		taglen = 0;
+		while (curr[taglen] != ',' && curr[taglen] != '\0')
+			taglen++;
+
+		if (strncasecmp(curr, "Maintenance", MAX(taglen,1)) == 0) {
+			curr += taglen;
+			if (flip)
+				outflags |= RESERVE_FLAG_NO_MAINT;
+			else 
+				outflags |= RESERVE_FLAG_MAINT;
+		} else if (strncasecmp(curr, "Daily", MAX(taglen,1)) == 0) {
+			curr += taglen;
+			if (flip)
+				outflags |= RESERVE_FLAG_NO_DAILY;
+			else 
+				outflags |= RESERVE_FLAG_DAILY;
+		} else if (strncasecmp(curr, "Weekly", MAX(taglen,1)) == 0) {
+			curr += taglen;
+			if (flip)
+				outflags |= RESERVE_FLAG_NO_WEEKLY;
+			else 
+				outflags |= RESERVE_FLAG_WEEKLY;
+		} else {
+			error("Error parsing flags %s.  %s", flagstr, msg);
+			return 0xffffffff;
+		}
+
+		if (*curr == ',') {
+			curr++;
+		}
+	}
+	return outflags;
+}
+
+
+
+/* 
+ * scontrol_parse_res_options   parse options for creating or updating a 
+                                reservation
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ * IN msg  - a string to append to any error message
+ * OUT resv_msg_ptr - struct holding reservation parameters
+ * OUT free_user_str - bool indicating that resv_msg_ptr->users should be freed 
+ * OUT free_acct_str - bool indicating that resv_msg_ptr->accounts should be freed 
+ * RET 0 on success, -1 on err and prints message
+ */
+extern int
+scontrol_parse_res_options(int argc, char *argv[], const char *msg, 
+			   resv_desc_msg_t  *resv_msg_ptr, 
+			   int *free_user_str, int *free_acct_str)
+{
+	int i;
+	int duration = -3;   /* -1 == INFINITE, -2 == error, -3 == not set */
+
+	*free_user_str = 0;
+	*free_acct_str = 0;
+
+	for (i=0; i<argc; i++) {
+		char *tag = argv[i];
+		int taglen = 0;
+		char plus_minus = '\0';
+
+		char *val = strchr(argv[i], '=');
+		taglen = val - argv[i];
+
+		if (!val && strncasecmp(argv[i], "res", 3) == 0) {
+			continue;
+		} else if (!val || taglen == 0) {
+			exit_code = 1;
+			error("Unknown parameter %s.  %s", argv[i], msg);
+			return -1;
+		}
+		if (val[-1] == '+' || val[-1] == '-') {
+			plus_minus = val[-1];
+			taglen--;
+		}
+		val++;
+
+		if (strncasecmp(tag, "ReservationName", MAX(taglen, 1)) == 0) {
+			resv_msg_ptr->name = val;
+
+		} else if (strncasecmp(tag, "StartTime", MAX(taglen, 1)) == 0) {
+			time_t  t = parse_time(val, 0);
+			if (t == 0) {
+				exit_code = 1;
+				error("Invalid start time %s.  %s", 
+				      argv[i], msg);
+				return -1;
+			}
+			resv_msg_ptr->start_time = t;
+
+		} else if (strncasecmp(tag, "EndTime", MAX(taglen, 1)) == 0) {
+			time_t  t = parse_time(val, 0);
+			if (t == 0) {
+				exit_code = 1;
+				error("Invalid end time %s.  %s", argv[i], msg);
+				return -1;
+			}
+			resv_msg_ptr->end_time = t;
+
+		} else if (strncasecmp(tag, "Duration", MAX(taglen, 1)) == 0) {
+			/* -1 == INFINITE, -2 == error, -3 == not set */
+			duration = time_str2mins(val);
+			if (duration < 0 && duration != INFINITE) {
+				exit_code = 1;
+				error("Invalid duration %s.  %s", argv[i], msg);
+				return -1;
+			}
+			resv_msg_ptr->duration = (uint32_t)duration;
+
+		} else if (strncasecmp(tag, "Flags", MAX(taglen, 2)) == 0) {
+			uint32_t f;
+			if (plus_minus) {
+				char *tmp =
+					_process_plus_minus(plus_minus, val);
+				f = _parse_flags(tmp, msg);
+				xfree(tmp);
+			} else {
+				f = _parse_flags(val, msg);
+			}
+			if (f == 0xffffffff) {
+				return -1;
+			} else {
+				resv_msg_ptr->flags = f;
+			}
+		} else if (strncasecmp(tag, "NodeCnt", MAX(taglen,5)) == 0 || 
+			   strncasecmp(tag, "NodeCount", MAX(taglen,5)) == 0) {
+			char *endptr = NULL;
+			resv_msg_ptr->node_cnt = strtol(val, &endptr, 10);
+
+			if (endptr == NULL || *endptr != '\0' || 
+                            *val == '\0') {
+				exit_code = 1;
+				error("Invalid node count %s.  %s", 
+				      argv[i], msg);
+				return -1;
+			}
+		} else if (strncasecmp(tag, "Nodes", MAX(taglen, 5)) == 0) {
+			resv_msg_ptr->node_list = val;
+
+		} else if (strncasecmp(tag, "Features", MAX(taglen, 2)) == 0) {
+			resv_msg_ptr->features = val;
+
+		} else if (strncasecmp(tag, "PartitionName", MAX(taglen, 1)) == 0) {
+			resv_msg_ptr->partition = val;
+
+		} else if (strncasecmp(tag, "Users", MAX(taglen, 1)) == 0) {
+			if (plus_minus) {
+				resv_msg_ptr->users = 
+					_process_plus_minus(plus_minus, val);
+				*free_user_str = 1;
+			} else {
+				resv_msg_ptr->users = val;
+			}
+		} else if (strncasecmp(tag, "Accounts", MAX(taglen, 1)) == 0) {
+			if (plus_minus) {
+				resv_msg_ptr->accounts = 
+					_process_plus_minus(plus_minus, val);
+				*free_acct_str = 1;
+			} else {
+				resv_msg_ptr->accounts = val;
+			}
+		} else if (strncasecmp(tag, "res", 3) == 0) {
+			continue;
+		} else {
+			exit_code = 1;
+			error("Unknown parameter %s.  %s", argv[i], msg);
+			return -1;
+		}
+	}
+	return 0;
+}
+
+
+
+/* 
+ * scontrol_update_res - update the slurm reservation configuration per the 
+ *     supplied arguments 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ * RET 0 if no slurm error, errno otherwise. parsing error prints 
+ *     error message and returns 0.
+ */
+extern int
+scontrol_update_res(int argc, char *argv[])
+{
+	resv_desc_msg_t   resv_msg;
+	int err, ret = 0;
+	int free_user_str = 0, free_acct_str = 0;
+
+	slurm_init_resv_desc_msg (&resv_msg);
+	err = scontrol_parse_res_options(argc, argv, "No reservation update.",
+					 &resv_msg, &free_user_str, 
+					 &free_acct_str);
+	if (err)
+		goto SCONTROL_UPDATE_RES_CLEANUP;
+
+	if (resv_msg.name == NULL) {
+		exit_code = 1;
+		error("Reservation must be given.  No reservation update.");
+		goto SCONTROL_UPDATE_RES_CLEANUP;
+	}
+
+	err = slurm_update_reservation(&resv_msg);
+	if (err) {
+		exit_code = 1;
+		slurm_perror("Error updating the reservation");
+		ret = slurm_get_errno();
+	} else {
+		printf("Reservation updated.\n");
+	}
+
+SCONTROL_UPDATE_RES_CLEANUP:
+	if (free_user_str)
+		xfree(resv_msg.users);
+	if (free_acct_str)
+		xfree(resv_msg.accounts);
+	return ret;
+}
+
+
+
+/* 
+ * scontrol_create_res - create the slurm reservation configuration per the 
+ *     supplied arguments 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ * RET 0 if no slurm error, errno otherwise. parsing error prints 
+ *     error message and returns 0.
+ */
+extern int
+scontrol_create_res(int argc, char *argv[])
+{
+	resv_desc_msg_t   resv_msg;
+	char *new_res_name = NULL;
+	int free_user_str = 0, free_acct_str = 0;
+	int err, ret = 0;
+
+	slurm_init_resv_desc_msg (&resv_msg);
+	err = scontrol_parse_res_options(argc, argv, "No reservation created.", 
+					 &resv_msg, &free_user_str, &free_acct_str);
+	if (err)
+		goto SCONTROL_CREATE_RES_CLEANUP;
+
+	if (resv_msg.start_time == (time_t)NO_VAL) {
+		exit_code = 1;
+		error("A start time must be given.  No reservation created.");
+		goto SCONTROL_CREATE_RES_CLEANUP;
+	}
+	if (resv_msg.end_time == (time_t)NO_VAL && 
+	    resv_msg.duration == (uint32_t)NO_VAL) {
+		exit_code = 1;
+		error("An end time or duration must be given.  "
+		      "No reservation created.");
+		goto SCONTROL_CREATE_RES_CLEANUP;
+	}
+	if (resv_msg.end_time != (time_t)NO_VAL && 
+	    resv_msg.duration != (uint32_t)NO_VAL && 
+            resv_msg.start_time + resv_msg.duration*60 != resv_msg.end_time) {
+		exit_code = 1;
+		error("StartTime + Duration does not equal EndTime.  "
+		      "No reservation created.");
+		goto SCONTROL_CREATE_RES_CLEANUP;
+	}
+	if (resv_msg.start_time > resv_msg.end_time && 
+	    resv_msg.end_time != (time_t)NO_VAL) {
+		exit_code = 1;
+		error("Start time cannot be after end time.  "
+		      "No reservation created.");
+		goto SCONTROL_CREATE_RES_CLEANUP;
+	}
+	if (resv_msg.node_cnt == NO_VAL && 
+	    (resv_msg.node_list == NULL || resv_msg.node_list[0] == '\0')) {
+		exit_code = 1;
+		error("Either Nodes or NodeCnt must be specified.  "
+		      "No reservation created.");
+		goto SCONTROL_CREATE_RES_CLEANUP;
+	}
+	if ((resv_msg.users == NULL    || resv_msg.users[0] == '\0') && 
+	    (resv_msg.accounts == NULL || resv_msg.accounts[0] == '\0')) {
+		exit_code = 1;
+		error("Either Users or Accounts must be specified.  "
+		      "No reservation created.");
+		goto SCONTROL_CREATE_RES_CLEANUP;
+	}
+
+	new_res_name = slurm_create_reservation(&resv_msg);
+	if (!new_res_name) {
+		exit_code = 1;
+		slurm_perror("Error creating the reservation");
+		ret = slurm_get_errno();
+	} else {
+		printf("Reservation created: %s\n", new_res_name);
+		free(new_res_name);
+	}
+
+SCONTROL_CREATE_RES_CLEANUP:
+	if (free_user_str)  
+		xfree(resv_msg.users);
+	if (free_acct_str)  
+		xfree(resv_msg.accounts);
+	return ret;
+}
diff --git a/src/scontrol/info_job.c b/src/scontrol/info_job.c
index cb8b944ff7e7eb0254807d8b75615ce2e0d118a0..41553593450bc276f9c9fc2c703ec522f199b2ba 100644
--- a/src/scontrol/info_job.c
+++ b/src/scontrol/info_job.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/scontrol/info_node.c b/src/scontrol/info_node.c
index 78acfbc05a1ed55c66c23ce94731019c64ac93b2..29e117e346406588148c7d641de23cf4454fb693 100644
--- a/src/scontrol/info_node.c
+++ b/src/scontrol/info_node.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  info_node.c - node information functions for scontrol.
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -183,3 +185,55 @@ scontrol_print_node_list (char *node_list)
 	return;
 }
 
+/*
+ * scontrol_print_topo - print the switch topology above the specified node
+ * IN node_name - NULL to print all topology information
+ */
+extern void	scontrol_print_topo (char *node_list)
+{
+	static topo_info_response_msg_t *topo_info_msg = NULL;
+	int i, match, match_cnt = 0;
+	hostset_t hs;
+
+	if ((topo_info_msg == NULL) &&
+	    slurm_load_topo(&topo_info_msg)) {
+		slurm_perror ("slurm_load_topo error");
+		return;
+	}
+
+	if ((node_list == NULL) || (node_list[0] == '\0')) {
+		slurm_print_topo_info_msg(stdout, topo_info_msg, one_liner);
+		return;
+	}
+
+	/* Search for matching switch name */
+	for (i=0; i<topo_info_msg->record_count; i++) {
+		if (strcmp(topo_info_msg->topo_array[i].name, node_list))
+			continue;
+		slurm_print_topo_record(stdout, &topo_info_msg->topo_array[i], 
+					one_liner);
+		return;
+	}
+
+	/* Search for matching node name */
+	for (i=0; i<topo_info_msg->record_count; i++) {
+		if ((topo_info_msg->topo_array[i].nodes == NULL) ||
+		    (topo_info_msg->topo_array[i].nodes[0] == '\0')) 
+			continue;
+		hs = hostset_create(topo_info_msg->topo_array[i].nodes);
+		if (hs == NULL)
+			fatal("hostset_create: memory allocation failure");
+		match = hostset_within(hs, node_list);
+		hostset_destroy(hs);
+		if (!match)
+			continue;
+		match_cnt++;
+		slurm_print_topo_record(stdout, &topo_info_msg->topo_array[i], 
+					one_liner);
+	}
+
+	if (match_cnt == 0) {
+		error("Topology information contains no switch or "
+		      "node named %s", node_list);
+	}
+}
diff --git a/src/scontrol/info_part.c b/src/scontrol/info_part.c
index 8c9e060696813ffe363deeef7c8066dcd9b40435..e3bbe2591d0fe5e320160fc4e83c0ceced53ac78 100644
--- a/src/scontrol/info_part.c
+++ b/src/scontrol/info_part.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -79,7 +80,7 @@ scontrol_load_partitions (partition_info_msg_t **part_buffer_pptr)
 }
 
 /*
- * scontrol_print_par - print the specified partition's information
+ * scontrol_print_part - print the specified partition's information
  * IN partition_name - NULL to print information about all partition 
  */
 extern void 
diff --git a/src/scontrol/info_res.c b/src/scontrol/info_res.c
new file mode 100644
index 0000000000000000000000000000000000000000..afe56ebc8c9682e16e0fd4cf39d5805bbaf89a4b
--- /dev/null
+++ b/src/scontrol/info_res.c
@@ -0,0 +1,140 @@
+/*****************************************************************************\
+ *  info_res.c - reservation information functions for scontrol.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by David Bremer <dbremer@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "scontrol.h"
+
+/* Load current reservation table information into *res_buffer_pptr */
+extern int 
+scontrol_load_reservations(reserve_info_msg_t **res_buffer_pptr)
+{
+	int error_code;
+	static reserve_info_msg_t *old_res_info_ptr = NULL;
+	reserve_info_msg_t *res_info_ptr = NULL;
+
+	if (old_res_info_ptr) {
+		error_code = slurm_load_reservations (
+						old_res_info_ptr->last_update,
+						&res_info_ptr);
+		if (error_code == SLURM_SUCCESS) {
+			slurm_free_reservation_info_msg (old_res_info_ptr);
+
+		} else if (slurm_get_errno () == SLURM_NO_CHANGE_IN_DATA) {
+			res_info_ptr = old_res_info_ptr;
+			error_code = SLURM_SUCCESS;
+			if (quiet_flag == -1)
+				printf ("slurm_load_reservations: no change in data\n");
+		}
+	}
+	else
+		error_code = slurm_load_reservations((time_t) NULL,
+						     &res_info_ptr);
+
+	if (error_code == SLURM_SUCCESS) {
+		old_res_info_ptr = res_info_ptr;
+		*res_buffer_pptr = res_info_ptr;
+	}
+
+	return error_code;
+}
+
+/*
+ * scontrol_print_res - print the specified reservation's information
+ * IN reservation_name - NULL to print information about all reservations
+ */
+extern void 
+scontrol_print_res (char *reservation_name) 
+{
+	int error_code, i, print_cnt = 0;
+	reserve_info_msg_t *res_info_ptr = NULL;
+	reserve_info_t *res_ptr = NULL;
+
+	error_code = scontrol_load_reservations(&res_info_ptr);
+	if (error_code) {
+		exit_code = 1;
+		if (quiet_flag != 1)
+			slurm_perror ("slurm_load_reservations error");
+		return;
+	}
+
+	if (quiet_flag == -1) {
+		char time_str[32];
+		slurm_make_time_str ((time_t *)&res_info_ptr->last_update, 
+			       time_str, sizeof(time_str));
+		printf ("last_update_time=%s, records=%d\n", 
+			time_str, res_info_ptr->record_count);
+	}
+
+	res_ptr = res_info_ptr->reservation_array;
+	for (i = 0; i < res_info_ptr->record_count; i++) {
+		if (reservation_name && 
+		    strcmp (reservation_name, res_ptr[i].name) != 0)
+			continue;
+		print_cnt++;
+		slurm_print_reservation_info (stdout, & res_ptr[i], 
+		                              one_liner ) ;
+		if (reservation_name)
+			break;
+	}
+
+	if (print_cnt == 0) {
+		if (reservation_name) {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				printf ("Reservation %s not found\n", 
+				        reservation_name);
+		} else if (quiet_flag != 1)
+			printf ("No reservations in the system\n");
+	}
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/scontrol/scontrol.c b/src/scontrol/scontrol.c
index 8118836f10d6f8c6d67bbe438c20c90766e320f1..84d7045b2b254be995b276e1c0c553f7332bdbf1 100644
--- a/src/scontrol/scontrol.c
+++ b/src/scontrol/scontrol.c
@@ -3,14 +3,15 @@
  *	provides interface to read, write, update, and configurations.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Portions Copyright (C) 2008 Vijay Ramasubramanian.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -51,7 +52,9 @@ int one_liner;		/* one record per line if =1 */
 int quiet_flag;		/* quiet=1, verbose=-1, normal=0 */
 int verbosity;		/* count of "-v" options */
 
+static void	_create_it (int argc, char *argv[]);
 static void	_delete_it (int argc, char *argv[]);
+static void     _show_it (int argc, char *argv[]);
 static int	_get_command (int *argc, char *argv[]);
 static void     _ping_slurmctld(char *control_machine, char *backup_controller);
 static void	_print_config (char *config_param);
@@ -457,19 +460,29 @@ static int
 _process_command (int argc, char *argv[]) 
 {
 	int error_code = 0;
+	char *tag = argv[0];
+	int taglen = 0;
 
 	if (argc < 1) {
 		exit_code = 1;
 		if (quiet_flag == -1)
 			fprintf(stderr, "no input");
+		return 0;
+	} else if(tag)
+		taglen = strlen(tag);
+	else {
+		if (quiet_flag == -1)
+			fprintf(stderr, "input problem");
+		return 0;
 	}
-	else if (strncasecmp (argv[0], "abort", 5) == 0) {
+
+if (strncasecmp (tag, "abort", MAX(taglen, 5)) == 0) {
 		/* require full command name */
 		if (argc > 2) {
 			exit_code = 1;
 			fprintf (stderr,
 				 "too many arguments for keyword:%s\n", 
-				 argv[0]);
+				 tag);
 		}
 		error_code = slurm_shutdown (1);
 		if (error_code) {
@@ -478,92 +491,101 @@ _process_command (int argc, char *argv[])
 				slurm_perror ("slurm_shutdown error");
 		}
 	}
-	else if (strncasecmp (argv[0], "all", 3) == 0)
+	else if (strncasecmp (tag, "all", MAX(taglen, 2)) == 0)
 		all_flag = 1;
-	else if (strncasecmp (argv[0], "completing", 3) == 0) {
+	else if (strncasecmp (tag, "completing", MAX(taglen, 2)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "too many arguments for keyword:%s\n", 
-				 argv[0]);
+				 tag);
 		}
 		scontrol_print_completing();
 	}
-	else if (strncasecmp (argv[0], "exit", 1) == 0) {
+	else if (strncasecmp (tag, "create", MAX(taglen, 2)) == 0) {
+		if (argc < 2) {
+			exit_code = 1;
+			fprintf (stderr, "too few arguments for %s keyword\n",
+				 tag);
+			return 0;
+		}		
+		_create_it ((argc - 1), &argv[1]);
+	}
+	else if (strncasecmp (tag, "exit", MAX(taglen, 1)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "too many arguments for keyword:%s\n", 
-				 argv[0]);
+				 tag);
 		}
 		exit_flag = 1;
 	}
-	else if (strncasecmp (argv[0], "help", 2) == 0) {
+	else if (strncasecmp (tag, "help", MAX(taglen, 2)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "too many arguments for keyword:%s\n",
-				 argv[0]);
+				 tag);
 		}
 		_usage ();
 	}
-	else if (strncasecmp (argv[0], "hide", 2) == 0)
+	else if (strncasecmp (tag, "hide", MAX(taglen, 2)) == 0)
 		all_flag = 0;
-	else if (strncasecmp (argv[0], "oneliner", 1) == 0) {
+	else if (strncasecmp (tag, "oneliner", MAX(taglen, 1)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "too many arguments for keyword:%s\n",
-				 argv[0]);
+				 tag);
 		}
 		one_liner = 1;
 	}
-	else if (strncasecmp (argv[0], "pidinfo", 3) == 0) {
+	else if (strncasecmp (tag, "pidinfo", MAX(taglen, 3)) == 0) {
 		if (argc > 2) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "too many arguments for keyword:%s\n", 
-				 argv[0]);
+				 tag);
 		} else if (argc < 2) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "missing argument for keyword:%s\n", 
-				 argv[0]);
+				 tag);
 		} else
 			scontrol_pid_info ((pid_t) atol (argv[1]) );
 	}
-	else if (strncasecmp (argv[0], "ping", 3) == 0) {
+	else if (strncasecmp (tag, "ping", MAX(taglen, 3)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "too many arguments for keyword:%s\n",
-				 argv[0]);
+				 tag);
 		}
 		_print_ping ();
 	}
-	else if ((strncasecmp (argv[0], "\\q", 2) == 0) ||
-		 (strncasecmp (argv[0], "quiet", 4) == 0)) {
+	else if ((strncasecmp (tag, "\\q", 2) == 0) ||
+		 (strncasecmp (tag, "quiet", MAX(taglen, 4)) == 0)) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr, "too many arguments for keyword:%s\n",
-				 argv[0]);
+				 tag);
 		}
 		quiet_flag = 1;
 	}
-	else if (strncasecmp (argv[0], "quit", 4) == 0) {
+	else if (strncasecmp (tag, "quit", MAX(taglen, 4)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "too many arguments for keyword:%s\n", 
-				 argv[0]);
+				 tag);
 		}
 		exit_flag = 1;
 	}
-	else if (strncasecmp (argv[0], "reconfigure", 3) == 0) {
+	else if (strncasecmp (tag, "reconfigure", MAX(taglen, 3)) == 0) {
 		if (argc > 2) {
 			exit_code = 1;
 			fprintf (stderr, "too many arguments for keyword:%s\n",
-			         argv[0]);
+			         tag);
 		}
 		error_code = slurm_reconfigure ();
 		if (error_code) {
@@ -572,43 +594,44 @@ _process_command (int argc, char *argv[])
 				slurm_perror ("slurm_reconfigure error");
 		}
 	}
-	else if (strncasecmp (argv[0], "checkpoint", 5) == 0) {
-		if (argc > 3) {
+	else if (strncasecmp (tag, "checkpoint", MAX(taglen, 2)) == 0) {
+		if (argc > 5) {
 			exit_code = 1;
 			if (quiet_flag != 1)
 				fprintf(stderr, 
 				        "too many arguments for keyword:%s\n", 
-				        argv[0]);
+				        tag);
 		}
 		else if (argc < 3) {
 			exit_code = 1;
 			if (quiet_flag != 1)
 				fprintf(stderr, 
 				        "too few arguments for keyword:%s\n", 
-				        argv[0]);
+				        tag);
 		}
 		else {
-			error_code = scontrol_checkpoint(argv[1], argv[2]);
+			error_code = scontrol_checkpoint(argv[1], argv[2], 
+							 argc - 3, &argv[3]);
 			if (error_code) {
 				exit_code = 1;
 				if (quiet_flag != 1)
-					slurm_perror ("slurm_checkpoint error");
+					slurm_perror ("scontrol_checkpoint error");
 			}
 		}
 	}
-	else if (strncasecmp (argv[0], "requeue", 3) == 0) {
+	else if (strncasecmp (tag, "requeue", MAX(taglen, 3)) == 0) {
 		if (argc > 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
 				fprintf(stderr,
 					"too many arguments for keyword:%s\n",
-					argv[0]);
+					tag);
 		} else if (argc < 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
 				fprintf(stderr,
 					"too few arguments for keyword:%s\n",
-					argv[0]);
+					tag);
 		} else {
 			error_code = scontrol_requeue(argv[1]);
 			if (error_code) {
@@ -619,21 +642,21 @@ _process_command (int argc, char *argv[])
 		}
 				
 	}
-	else if ((strncasecmp (argv[0], "suspend", 3) == 0)
-	||       (strncasecmp (argv[0], "resume", 3) == 0)) {
+	else if ((strncasecmp (tag, "suspend", MAX(taglen, 2)) == 0)
+	||       (strncasecmp (tag, "resume", MAX(taglen, 3)) == 0)) {
 		if (argc > 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
 				fprintf(stderr,
 					"too many arguments for keyword:%s\n",
-					argv[0]);
+					tag);
 		}
 		else if (argc < 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
 				fprintf(stderr,
 					"too few arguments for keyword:%s\n",
-					argv[0]);
+					tag);
 		} else {
 			error_code =scontrol_suspend(argv[0], argv[1]);
 			if (error_code) {
@@ -643,23 +666,26 @@ _process_command (int argc, char *argv[])
 			}
 		}
 	}
-	else if (strncasecmp (argv[0], "setdebug", 4) == 0) {
+	else if (strncasecmp (tag, "setdebug", MAX(taglen, 2)) == 0) {
 		if (argc > 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
-				fprintf(stderr, "too many arguments for keyword:%s\n",
-					argv[0]);
+				fprintf(stderr, 
+					"too many arguments for keyword:%s\n",
+					tag);
 		} else if (argc < 2) {
 			exit_code = 1;
 			if (quiet_flag != 1)
-				fprintf(stderr, "too few arguments for keyword:%s\n",
-					argv[0]);
+				fprintf(stderr, 
+					"too few arguments for keyword:%s\n",
+					tag);
 		} else {
 			int level = -1;
 			char *endptr;
 			char *levels[] = {
 				"quiet", "fatal", "error", "info", "verbose",
-				"debug", "debug2", "debug3", "debug4", "debug5", NULL};
+				"debug", "debug2", "debug3", "debug4", 
+				"debug5", NULL};
 			int index = 0;
 			while (levels[index]) {
 				if (strcasecmp(argv[1], levels[index]) == 0) {
@@ -669,13 +695,14 @@ _process_command (int argc, char *argv[])
 				index ++;
 			}
 			if (level == -1) {
-				level = (int)strtoul (argv[1], &endptr, 10);    /* effective levels: 0 - 9 */
+				/* effective levels: 0 - 9 */
+				level = (int)strtoul (argv[1], &endptr, 10);
 				if (*endptr != '\0' || level > 9) {
 					level = -1;
 					exit_code = 1;
 					if (quiet_flag != 1)
-						fprintf(stderr, "invalid debug level: %s\n",
-							argv[1]);
+						fprintf(stderr, "invalid debug "
+							"level: %s\n", argv[1]);
 				}
 			}
 			if (level != -1) {
@@ -688,91 +715,31 @@ _process_command (int argc, char *argv[])
 			}
 		}
 	}
-	else if (strncasecmp (argv[0], "show", 3) == 0) {
-		if (argc > 3) {
-			exit_code = 1;
-			if (quiet_flag != 1)
-				fprintf(stderr, 
-				        "too many arguments for keyword:%s\n", 
-				        argv[0]);
-		}
-		else if (argc < 2) {
-			exit_code = 1;
-			if (quiet_flag != 1)
-				fprintf(stderr, 
-				        "too few arguments for keyword:%s\n", 
-				        argv[0]);
-		}
-		else if (strncasecmp (argv[1], "config", 3) == 0) {
-			if (argc > 2)
-				_print_config (argv[2]);
-			else
-				_print_config (NULL);
-		}
-		else if (strncasecmp (argv[1], "daemons", 3) == 0) {
-			if (argc > 2) {
+	else if (strncasecmp (tag, "show", MAX(taglen, 3)) == 0) {
+		_show_it (argc, argv);
+	}
+	else if (strncasecmp (tag, "takeover", MAX(taglen, 8)) == 0) {
+		char *secondary = NULL;
+		slurm_ctl_conf_info_msg_t  *slurm_ctl_conf_ptr = NULL;
+
+		slurm_ctl_conf_ptr = slurm_conf_lock();
+		secondary = xstrdup(slurm_ctl_conf_ptr->backup_controller);
+		slurm_conf_unlock();
+
+		if ( secondary && secondary[0] != '\0' ) {
+			error_code = slurm_takeover();
+			if (error_code) {
 				exit_code = 1;
 				if (quiet_flag != 1)
-					fprintf(stderr,
-					        "too many arguments for keyword:%s\n", 
-					        argv[0]);
+					slurm_perror("slurm_takeover error");
 			}
-			_print_daemons ();
-		}
-		else if (strncasecmp (argv[1], "jobs", 3) == 0) {
-			if (argc > 2)
-				scontrol_print_job (argv[2]);
-			else
-				scontrol_print_job (NULL);
-		}
-		else if (strncasecmp (argv[1], "hostnames", 5) == 0) {
-			if (argc > 2)
-				scontrol_print_hosts(argv[2]);
-			else
-				scontrol_print_hosts(getenv("SLURM_NODELIST"));
-		}
-		else if (strncasecmp (argv[1], "hostlist", 5) == 0) {
-			if (argc != 3) {
-				exit_code = 1;
-				fprintf(stderr, "invalid encode argument\n");
-				_usage();
-			} else if (scontrol_encode_hostlist(argv[2]))
-				exit_code = 1;
-		}
-		else if (strncasecmp (argv[1], "nodes", 3) == 0) {
-			if (argc > 2)
-				scontrol_print_node_list (argv[2]);
-			else
-				scontrol_print_node_list (NULL);
-		}
-		else if (strncasecmp (argv[1], "partitions", 3) == 0) {
-			if (argc > 2)
-				scontrol_print_part (argv[2]);
-			else
-				scontrol_print_part (NULL);
-		}
-		else if (strncasecmp (argv[1], "slurmd", 6) == 0) {
-			if (argc > 2)
-				_print_slurmd(argv[2]);
-			else
-				_print_slurmd(NULL);
-		}
-		else if (strncasecmp (argv[1], "steps", 3) == 0) {
-			if (argc > 2)
-				scontrol_print_step (argv[2]);
-			else
-				scontrol_print_step (NULL);
+		} else {
+			fprintf(stderr, "slurm_takeover error: no backup "
+				"controller defined\n");
 		}
-		else {
-			exit_code = 1;
-			if (quiet_flag != 1)
-				fprintf (stderr,
-					 "invalid entity:%s for keyword:%s \n",
-					 argv[1], argv[0]);
-		}		
-
+		xfree(secondary);
 	}
-	else if (strncasecmp (argv[0], "shutdown", 8) == 0) {
+	else if (strncasecmp (tag, "shutdown", MAX(taglen, 8)) == 0) {
 		/* require full command name */
 		uint16_t options = 0;
 		if (argc == 2) {
@@ -790,7 +757,7 @@ _process_command (int argc, char *argv[])
 			exit_code = 1;
 			fprintf (stderr,
 				 "too many arguments for keyword:%s\n", 
-				 argv[0]);
+				 tag);
 		}
 		if (error_code == 0) {
 			error_code = slurm_shutdown(options);
@@ -801,59 +768,59 @@ _process_command (int argc, char *argv[])
 			}
 		}
 	}
-	else if (strncasecmp (argv[0], "update", 1) == 0) {
+	else if (strncasecmp (tag, "update", MAX(taglen, 1)) == 0) {
 		if (argc < 2) {
 			exit_code = 1;
 			fprintf (stderr, "too few arguments for %s keyword\n",
-				 argv[0]);
+				 tag);
 			return 0;
 		}		
 		_update_it ((argc - 1), &argv[1]);
 	}
-	else if (strncasecmp (argv[0], "delete", 3) == 0) {
+	else if (strncasecmp (tag, "delete", MAX(taglen, 1)) == 0) {
 		if (argc < 2) {
 			exit_code = 1;
 			fprintf (stderr, "too few arguments for %s keyword\n",
-				 argv[0]);
+				 tag);
 			return 0;
 		}
 		_delete_it ((argc - 1), &argv[1]);
 	}
-	else if (strncasecmp (argv[0], "verbose", 4) == 0) {
+	else if (strncasecmp (tag, "verbose", MAX(taglen, 4)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
 				 "too many arguments for %s keyword\n",
-				 argv[0]);
+				 tag);
 		}		
 		quiet_flag = -1;
 	}
-	else if (strncasecmp (argv[0], "version", 4) == 0) {
+	else if (strncasecmp (tag, "version", MAX(taglen, 4)) == 0) {
 		if (argc > 1) {
 			exit_code = 1;
 			fprintf (stderr,
 				 "too many arguments for %s keyword\n",
-				 argv[0]);
+				 tag);
 		}		
 		_print_version();
 	}
-	else if (strncasecmp (argv[0], "listpids", 8) == 0) {
+	else if (strncasecmp (tag, "listpids", MAX(taglen, 1)) == 0) {
 		if (argc > 3) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "too many arguments for keyword:%s\n", 
-				 argv[0]);
+				 tag);
 		} else {
 			scontrol_list_pids (argc == 1 ? NULL : argv[1],
 					    argc <= 2 ? NULL : argv[2]);
 		}
 	}
-	else if (strncasecmp (argv[0], "notify", 6) == 0) {
+	else if (strncasecmp (tag, "notify", MAX(taglen, 1)) == 0) {
 		if (argc < 3) {
 			exit_code = 1;
 			fprintf (stderr, 
 				 "too few arguments for keyword:%s\n", 
-				 argv[0]);
+				 tag);
 		} else if (scontrol_job_notify(argc-1, &argv[1])) {
 			exit_code = 1;
 			slurm_perror("job notify failure");
@@ -861,37 +828,210 @@ _process_command (int argc, char *argv[])
 	}
 	else {
 		exit_code = 1;
-		fprintf (stderr, "invalid keyword: %s\n", argv[0]);
+		fprintf (stderr, "invalid keyword: %s\n", tag);
 	}
 
 	return 0;
 }
 
+
 /* 
- * _delete_it - delete the slurm the specified slurm entity 
+ * _create_it - create a slurm configuration per the supplied arguments 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ */
+static void
+_create_it (int argc, char *argv[]) 
+{
+	/* Scan for "res" first, anywhere in the args.  When creating
+	   a reservation there is a partition= option, which we don't
+	   want to mistake for a requestion to create a partition. */
+	int i, error_code = SLURM_SUCCESS;
+	for (i=0; i<argc; i++) {
+		char *tag = argv[i];
+		char *val = strchr(argv[i], '=');
+		int taglen;
+
+		if (val) {
+			taglen = val - argv[i];
+			val++;
+		} else {
+			taglen = strlen(tag);
+		}
+		if (!strncasecmp(tag, "ReservationName", MAX(taglen, 3))) {
+			error_code = scontrol_create_res(argc, argv);
+			break;
+		} else if (!strncasecmp(tag, "PartitionName", MAX(taglen, 3))) {
+			error_code = scontrol_create_part(argc, argv);
+			break;
+		}
+	}
+
+	if (i >= argc) {
+		exit_code = 1;
+		error("Invalid creation entity: %s\n", argv[0]);
+	} else if (error_code) 
+		exit_code = 1;
+}
+
+
+
+
+/* 
+ * _delete_it - delete the specified slurm entity 
  * IN argc - count of arguments
  * IN argv - list of arguments
  */
 static void
 _delete_it (int argc, char *argv[]) 
 {
-	delete_part_msg_t part_msg;
+	char *tag = NULL, *val = NULL;
+	int taglen = 0;
+
+	if (argc != 1) {
+		error("Only one option follows delete.  %d given.\n", argc);
+		exit_code = 1;
+		return;
+	}
+
+	tag = argv[0];
+	val = strchr(argv[0], '=');
+	if (val) {
+		taglen = val - argv[0];
+		val++;
+	} else {
+		error("Proper format is 'delete Partition=p'"
+		      " or 'delete Reservation=r'\n");
+		exit_code = 1;
+		return;
+	}
 
 	/* First identify the entity type to delete */
-	if (strncasecmp (argv[0], "PartitionName=", 14) == 0) {
-		part_msg.name = argv[0] + 14;
+	if (strncasecmp (tag, "PartitionName", MAX(taglen, 1)) == 0) {
+		delete_part_msg_t part_msg;
+		part_msg.name = val;
 		if (slurm_delete_partition(&part_msg)) {
 			char errmsg[64];
 			snprintf(errmsg, 64, "delete_partition %s", argv[0]);
 			slurm_perror(errmsg);
 		}
+	} else if (strncasecmp (tag, "ReservationName", MAX(taglen, 1)) == 0) {
+		reservation_name_msg_t   res_msg;
+		res_msg.name = val;
+		if (slurm_delete_reservation(&res_msg)) {
+			char errmsg[64];
+			snprintf(errmsg, 64, "delete_reservation %s", argv[0]);
+			slurm_perror(errmsg);
+		}
 	} else {
 		exit_code = 1;
-		fprintf(stderr, "Invalid deletion entity: %s\n", argv[1]);
+		fprintf(stderr, "Invalid deletion entity: %s\n", argv[0]);
 	}
 }
 
 
+/* 
+ * _show_it - print a description of the specified slurm entity 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ */
+static void
+_show_it (int argc, char *argv[])
+{
+	char *tag = NULL, *val = NULL;
+	int taglen = 0;
+
+	if (argc > 3) {
+		exit_code = 1;
+		if (quiet_flag != 1)
+			fprintf(stderr, 
+			        "too many arguments for keyword:%s\n", 
+			        argv[0]);
+		return;
+	}
+	else if (argc < 2) {
+		exit_code = 1;
+		if (quiet_flag != 1)
+			fprintf(stderr, 
+			        "too few arguments for keyword:%s\n", argv[0]);
+		return;
+	}
+
+	tag = argv[1];
+	taglen = strlen(tag);
+	val = strchr(argv[1], '=');
+	if (val) {
+		taglen = val - argv[1];
+		val++;
+	} else if (argc == 3) {
+		val = argv[2];
+	} else {
+		val = NULL;
+	}
+
+	if (strncasecmp (tag, "config", MAX(taglen, 1)) == 0) {
+		_print_config (val);
+	}
+	else if (strncasecmp (tag, "daemons", MAX(taglen, 1)) == 0) {
+		if (val) {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				fprintf(stderr,
+				        "too many arguments for keyword:%s\n", 
+				        argv[0]);
+		}
+		_print_daemons ();
+	}
+	else if (strncasecmp (tag, "jobs", MAX(taglen, 1)) == 0 ||
+		 strncasecmp (tag, "jobid", MAX(taglen, 1)) == 0 ) {
+		scontrol_print_job (val);
+	}
+	else if (strncasecmp (tag, "hostnames", MAX(taglen, 5)) == 0) {
+		if (val)
+			scontrol_print_hosts(val);
+		else
+			scontrol_print_hosts(getenv("SLURM_NODELIST"));
+	}
+	else if (strncasecmp (tag, "hostlist", MAX(taglen, 5)) == 0) {
+		if (!val) {
+			exit_code = 1;
+			fprintf(stderr, "invalid encode argument\n");
+			_usage();
+		} else if (scontrol_encode_hostlist(val))
+			exit_code = 1;
+	}
+	else if (strncasecmp (tag, "nodes", MAX(taglen, 1)) == 0) {
+		scontrol_print_node_list (val);
+	}
+	else if (strncasecmp (tag, "partitions", MAX(taglen, 1)) == 0 || 
+		 strncasecmp (tag, "partitionname", MAX(taglen, 1)) == 0) {
+		scontrol_print_part (val);
+	}
+	else if (strncasecmp (tag, "reservations", MAX(taglen, 1)) == 0 || 
+		 strncasecmp (tag, "reservationname", MAX(taglen, 1)) == 0) {
+		scontrol_print_res (val);
+	}
+	else if (strncasecmp (tag, "slurmd", MAX(taglen, 2)) == 0) {
+		_print_slurmd (val);
+	}
+	else if (strncasecmp (tag, "steps", MAX(taglen, 2)) == 0) {
+		scontrol_print_step (val);
+	}
+	else if (strncasecmp (tag, "topology", MAX(taglen, 1)) == 0) {
+		scontrol_print_topo (val);
+	}
+	else {
+		exit_code = 1;
+		if (quiet_flag != 1)
+			fprintf (stderr,
+				 "invalid entity:%s for keyword:%s \n",
+				 tag, argv[0]);
+	}		
+
+}
+
+
+
 /* 
  * _update_it - update the slurm configuration per the supplied arguments 
  * IN argc - count of arguments
@@ -901,29 +1041,54 @@ static void
 _update_it (int argc, char *argv[]) 
 {
 	int i, error_code = SLURM_SUCCESS;
+	int nodetag=0, partag=0, jobtag=0;
+	int blocktag=0, subtag=0, restag=0;
 
 	/* First identify the entity to update */
 	for (i=0; i<argc; i++) {
-		if (strncasecmp (argv[i], "NodeName=", 9) == 0) {
-			error_code = scontrol_update_node (argc, argv);
-			break;
-		} else if (strncasecmp (argv[i], "PartitionName=", 14) == 0) {
-			error_code = scontrol_update_part (argc, argv);
-			break;
-		} else if (strncasecmp (argv[i], "JobId=", 6) == 0) {
-			error_code = scontrol_update_job (argc, argv);
-			break;
-		} else if (strncasecmp (argv[i], "BlockName=", 10) == 0) {
-			error_code = _update_bluegene_block (argc, argv);
-			break;
-		} else if (strncasecmp (argv[i], "SubBPName=", 10) == 0) {
-			error_code = _update_bluegene_subbp (argc, argv);
-			break;
+		char *tag = argv[i];
+		int taglen = 0;
+		char *val = strchr(argv[i], '=');
+		if (!val)
+			continue;
+		taglen = val - argv[i];
+		val++;
+
+		if (strncasecmp (tag, "NodeName", MAX(taglen, 5)) == 0) {
+			nodetag=1;
+		} else if (strncasecmp (tag, "PartitionName", MAX(taglen, 3)) == 0) {
+			partag=1;
+		} else if (strncasecmp (tag, "JobId", MAX(taglen, 3)) == 0) {
+			jobtag=1;
+		} else if (strncasecmp (tag, "BlockName", MAX(taglen, 3)) == 0) {
+			blocktag=1;
+		} else if (strncasecmp (tag, "SubBPName", MAX(taglen, 3)) == 0) {
+			subtag=1;
+		} else if (strncasecmp (tag, "ReservationName", MAX(taglen, 3)) == 0) {
+			restag=1;
 		}
-		
 	}
-	
-	if (i >= argc) {
+
+	/* The order of tests matters here.  An update job request can include
+	   partition and reservation tags, possibly before the jobid tag, but
+	   none of the other updates have a jobid tag, so check jobtag first.
+	   Likewise, check restag next, because reservations can have a 
+	   partition tag.  The order of the rest doesn't matter because there
+	   aren't any other duplicate tags.  */
+
+	if (jobtag)
+		error_code = scontrol_update_job (argc, argv);
+	else if (restag)
+		error_code = scontrol_update_res (argc, argv);
+	else if (nodetag)
+		error_code = scontrol_update_node (argc, argv);
+	else if (partag)
+		error_code = scontrol_update_part (argc, argv);
+	else if (blocktag)
+		error_code = _update_bluegene_block (argc, argv);
+	else if (subtag)
+		error_code = _update_bluegene_subbp (argc, argv);
+	else {
 		exit_code = 1;
 		fprintf(stderr, "No valid entity in update command\n");
 		fprintf(stderr, "Input line must include \"NodeName\", ");
@@ -931,9 +1096,11 @@ _update_it (int argc, char *argv[])
 		fprintf(stderr, "\"BlockName\", \"SubBPName\" "
 			"(i.e. bgl000[0-3]),");
 #endif
-		fprintf(stderr, "\"PartitionName\", or \"JobId\"\n");
+		fprintf(stderr, "\"PartitionName\", \"Reservation\", "
+				"or \"JobId\"\n");
 	}
-	else if (error_code) {
+
+	if (error_code) {
 		exit_code = 1;
 		slurm_perror ("slurm_update error");
 	}
@@ -959,12 +1126,26 @@ _update_bluegene_block (int argc, char *argv[])
 	part_msg.hidden = (uint16_t)INFINITE;
 
 	for (i=0; i<argc; i++) {
-		if (strncasecmp(argv[i], "BlockName=", 10) == 0)
-			part_msg.name = &argv[i][10];
-		else if (strncasecmp(argv[i], "State=", 6) == 0) {
-			if (strcasecmp(&argv[i][6], "ERROR") == 0)
+		char *tag = argv[i];
+		char *val = strchr(argv[i], '=');
+		int taglen = 0, vallen = 0;
+
+		if (val) {
+			taglen = val - argv[i];
+			val++;
+			vallen = strlen(val);
+		} else {
+			exit_code = 1;
+			error("Invalid input for BlueGene block update %s", argv[i]);
+			return 0;
+		}
+
+		if (strncasecmp(tag, "BlockName", MAX(taglen, 2)) == 0) {
+			part_msg.name = val;
+		} else if (strncasecmp(tag, "State", MAX(taglen, 2)) == 0) {
+			if (strncasecmp(val, "ERROR", MAX(vallen, 1)) == 0)
 				part_msg.state_up = 0;
-			else if (strcasecmp(&argv[i][6], "FREE") == 0)
+			else if (strncasecmp(val, "FREE", MAX(vallen, 1)) == 0)
 				part_msg.state_up = 1;
 			else {
 				exit_code = 1;
@@ -975,8 +1156,13 @@ _update_bluegene_block (int argc, char *argv[])
 				return 0;
 			}
 			update_cnt++;
+		} else {
+			exit_code = 1;
+			error("Invalid input for BlueGene block update %s", argv[i]);
+			return 0;
 		}
 	}
+
 	if(!part_msg.name) {
 		error("You didn't supply a name.");
 		return 0;
@@ -1012,12 +1198,27 @@ _update_bluegene_subbp (int argc, char *argv[])
 	part_msg.root_only = (uint16_t)INFINITE;
 
 	for (i=0; i<argc; i++) {
-		if (strncasecmp(argv[i], "SubBPName=", 10) == 0)
-			part_msg.name = &argv[i][10];
-		else if (strncasecmp(argv[i], "State=", 6) == 0) {
-			if (strcasecmp(&argv[i][6], "ERROR") == 0)
+		char *tag = argv[i];
+		char *val = strchr(argv[i], '=');
+		int taglen = 0, vallen = 0;
+
+		if (val) {
+			taglen = val - argv[i];
+			val++;
+			vallen = strlen(val);
+		} else {
+			exit_code = 1;
+			error("Invalid input for BlueGene SubBPName update %s",
+			      argv[i]);
+			return 0;
+		}
+
+		if (strncasecmp(tag, "SubBPName", MAX(taglen, 2)) == 0)
+			part_msg.name = val;
+		else if (strncasecmp(tag, "State", MAX(taglen, 2)) == 0) {
+			if (strncasecmp(val, "ERROR", MAX(vallen, 1)) == 0)
 				part_msg.state_up = 0;
-			else if (strcasecmp(&argv[i][6], "FREE") == 0)
+			else if (strncasecmp(val, "FREE", MAX(vallen, 1)) == 0)
 				part_msg.state_up = 1;
 			else {
 				exit_code = 1;
@@ -1028,8 +1229,14 @@ _update_bluegene_subbp (int argc, char *argv[])
 				return 0;
 			}
 			update_cnt++;
+		} else {
+			exit_code = 1;
+			error("Invalid input for BlueGene SubBPName update %s",
+			      argv[i]);
+			return 0;
 		}
 	}
+
 	if(!part_msg.name) {
 		error("You didn't supply a name.");
 		return 0;
@@ -1068,11 +1275,12 @@ scontrol [<OPTION>] [<COMMAND>]                                            \n\
                               generating a core file.                      \n\
      all                      display information about all partitions,    \n\
                               including hidden partitions.                 \n\
-     checkpoint <CH_OP><step> perform a checkpoint operation on identified \n\
-                              job step \n\
+     checkpoint <CH_OP><ID>   perform a checkpoint operation on identified \n\
+                              job or job step \n\
      completing               display jobs in completing state along with  \n\
                               their completing or down nodes               \n\
-     delete <SPECIFICATIONS>  delete the specified partition, kill its jobs\n\
+     create <SPECIFICATIONS>  create a new partition or reservation        \n\
+     delete <SPECIFICATIONS>  delete the specified partition or reservation\n\
      exit                     terminate scontrol                           \n\
      help                     print this description of use.               \n\
      hide                     do not display information about hidden      \n\
@@ -1095,20 +1303,24 @@ scontrol [<OPTION>] [<COMMAND>]                                            \n\
      show <ENTITY> [<ID>]     display state of identified entity, default  \n\
                               is all records.                              \n\
      shutdown <OPTS>          shutdown slurm daemons                       \n\
+     takeover                 ask slurm backup controller to take over     \n\
+                              (the primary controller will be stopped)     \n\
      suspend <job_id>         susend specified job                         \n\
      resume <job_id>          resume previously suspended job              \n\
-     update <SPECIFICATIONS>  update job, node, partition, or bluegene     \n\
-                              block/subbp configuration                    \n\
+     update <SPECIFICATIONS>  update job, node, partition, reservation, or \n\
+                              bluegene block/subbp configuration           \n\
      verbose                  enable detailed logging.                     \n\
      version                  display tool version number.                 \n\
      !!                       Repeat the last command entered.             \n\
                                                                            \n\
   <ENTITY> may be \"config\", \"daemons\", \"job\", \"node\", \"partition\"\n\
-           \"hostlist\", \"hostnames\", \"slurmd\",                        \n\
-           (for BlueGene only: \"block\", \"subbp\" or \"step\").          \n\
+       \"reservation\", \"hostlist\", \"hostnames\", \"slurmd\",           \n\
+       \"topology\", or \"step\"                                           \n\
+       (also for BlueGene only: \"block\" or \"subbp\").                  \n\
                                                                            \n\
   <ID> may be a configuration parameter name, job id, node name, partition \n\
-       name, job step id, or hostlist or pathname to a list of host names. \n\
+       name, reservation name, job step id, or hostlist or pathname to a   \n\
+       list of host names.                                                 \n\
                                                                            \n\
   <HOSTLIST> may either be a comma separated list of host names or the     \n\
        absolute pathname of a file (with leading '/' containing host names \n\
@@ -1122,7 +1334,7 @@ scontrol [<OPTION>] [<COMMAND>]                                            \n\
        otherwise all slurm daemons are shutdown                            \n\
                                                                            \n\
   Node names may be specified using simple range expressions,              \n\
-  (e.g. \"lx[10-20]\" corresponsds to lx10, lx11, lx12, ...)               \n\
+  (e.g. \"lx[10-20]\" corresponds to lx10, lx11, lx12, ...)                \n\
   The job step id is the job id followed by a period and the step id.      \n\
                                                                            \n\
   <SPECIFICATIONS> are specified in the same format as the configuration   \n\
@@ -1133,6 +1345,8 @@ scontrol [<OPTION>] [<COMMAND>]                                            \n\
                                                                            \n\
   <CH_OP> identify checkpoint operations and may be \"able\", \"disable\", \n\
   \"enable\", \"create\", \"vacate\", \"restart\", or \"error\".           \n\
+  Additional options include \"ImageDir=<dir>\", \"MaxWait=<seconds>\" and \n\
+  \"StickToNodes\"   \n\
                                                                            \n\
   All commands and options are case-insensitive, although node names and   \n\
   partition names tests are case-sensitive (node names \"LX\" and \"lx\"   \n\
diff --git a/src/scontrol/scontrol.h b/src/scontrol/scontrol.h
index 3fd1d289736780c86813396c74705a571e0ab513..d6b4251e4e2e9d845117558ba8334ecc31ede96c 100644
--- a/src/scontrol/scontrol.h
+++ b/src/scontrol/scontrol.h
@@ -2,12 +2,14 @@
  *  scontrol.h - definitions for all scontrol modules
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -97,31 +99,37 @@ extern int input_words;	/* number of words of input permitted */
 extern int one_liner;	/* one record per line if =1 */
 extern int quiet_flag;	/* quiet=1, verbose=-1, normal=0 */
 
-extern int	scontrol_checkpoint(char *op, char *job_step_id_str);
+extern int	scontrol_checkpoint(char *op, char *job_step_id_str, int argc, 
+				    char **argv);
 extern int	scontrol_encode_hostlist(char *hostlist);
 extern int	scontrol_job_notify(int argc, char *argv[]);
 extern int 	scontrol_load_jobs (job_info_msg_t ** job_buffer_pptr);
 extern int 	scontrol_load_nodes (node_info_msg_t ** node_buffer_pptr, 
-			uint16_t show_flags);
+				     uint16_t show_flags);
 extern int 	scontrol_load_partitions (partition_info_msg_t 
-			**part_info_pptr);
+					  **part_info_pptr);
 extern void	scontrol_pid_info(pid_t job_pid);
 extern void	scontrol_print_completing (void);
 extern void	scontrol_print_completing_job(job_info_t *job_ptr, 
-				node_info_msg_t *node_info_msg);
+					      node_info_msg_t *node_info_msg);
 extern void	scontrol_print_job (char * job_id_str);
 extern void	scontrol_print_hosts (char * node_list);
 extern void	scontrol_print_node (char *node_name, 
-			node_info_msg_t *node_info_ptr);
+				     node_info_msg_t *node_info_ptr);
 extern void	scontrol_print_node_list (char *node_list);
 extern void	scontrol_print_part (char *partition_name);
+extern void	scontrol_print_res (char *reservation_name);
 extern void	scontrol_print_step (char *job_step_id_str);
+extern void	scontrol_print_topo (char *node_list);
 extern int	scontrol_requeue(char *job_step_id_str);
 extern int	scontrol_suspend(char *op, char *job_id_str);
 extern int	scontrol_update_job (int argc, char *argv[]);
 extern int	scontrol_update_node (int argc, char *argv[]);
 extern int	scontrol_update_part (int argc, char *argv[]);
+extern int	scontrol_update_res (int argc, char *argv[]);
 extern void     scontrol_list_pids(const char *jobid_str,
 				   const char *node_name);
+extern int	scontrol_create_part(int argc, char *argv[]);
+extern int	scontrol_create_res(int argc, char *argv[]);
 
 #endif
diff --git a/src/scontrol/update_job.c b/src/scontrol/update_job.c
index 27df3af0c6003f429b935675c3cf99eb4cd32ed6..29e95aeb342538a677516182d4104eb80d818590 100644
--- a/src/scontrol/update_job.c
+++ b/src/scontrol/update_job.c
@@ -2,13 +2,14 @@
  *  update_job.c - update job functions for scontrol.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -38,22 +39,32 @@
 
 #include "scontrol.h"
 
+static int _parse_checkpoint_args(int argc, char **argv,
+				  uint16_t *max_wait, char **image_dir);
+static int _parse_restart_args(int argc, char **argv,
+			       uint16_t *stick, char **image_dir);
+
 /* 
  * scontrol_checkpoint - perform some checkpoint/resume operation
  * IN op - checkpoint operation
  * IN job_step_id_str - either a job name (for all steps of the given job) or 
  *			a step name: "<jid>.<step_id>"
+ * IN argc - argument count
+ * IN argv - arguments of the operation
  * RET 0 if no slurm error, errno otherwise. parsing error prints 
  *			error message and returns 0
  */
 extern int 
-scontrol_checkpoint(char *op, char *job_step_id_str)
+scontrol_checkpoint(char *op, char *job_step_id_str, int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
 	uint32_t job_id = 0, step_id = 0, step_id_set = 0;
 	char *next_str;
 	uint32_t ckpt_errno;
 	char *ckpt_strerror = NULL;
+	int oplen = strlen(op);
+	uint16_t max_wait = CKPT_WAIT, stick = 0;
+	char *image_dir = NULL;
 
 	if (job_step_id_str) {
 		job_id = (uint32_t) strtol (job_step_id_str, &next_str, 10);
@@ -71,7 +82,7 @@ scontrol_checkpoint(char *op, char *job_step_id_str)
 		return 0;
 	}
 
-	if (strncasecmp(op, "able", 2) == 0) {
+	if (strncasecmp(op, "able", MAX(oplen, 1)) == 0) {
 		time_t start_time;
 		rc = slurm_checkpoint_able (job_id, step_id, &start_time);
 		if (rc == SLURM_SUCCESS) {
@@ -89,7 +100,7 @@ scontrol_checkpoint(char *op, char *job_step_id_str)
 			rc = SLURM_SUCCESS;	/* not real error */
 		}
 	}
-	else if (strncasecmp(op, "complete", 3) == 0) {
+	else if (strncasecmp(op, "complete", MAX(oplen, 2)) == 0) {
 		/* Undocumented option used for testing purposes */
 		static uint32_t error_code = 1;
 		char error_msg[64];
@@ -97,17 +108,29 @@ scontrol_checkpoint(char *op, char *job_step_id_str)
 		rc = slurm_checkpoint_complete(job_id, step_id, (time_t) 0,
 			error_code++, error_msg);
 	}
-	else if (strncasecmp(op, "disable", 3) == 0)
+	else if (strncasecmp(op, "disable", MAX(oplen, 1)) == 0)
 		rc = slurm_checkpoint_disable (job_id, step_id);
-	else if (strncasecmp(op, "enable", 2) == 0)
+	else if (strncasecmp(op, "enable", MAX(oplen, 2)) == 0)
 		rc = slurm_checkpoint_enable (job_id, step_id);
-	else if (strncasecmp(op, "create", 2) == 0)
-		rc = slurm_checkpoint_create (job_id, step_id, CKPT_WAIT);
-	else if (strncasecmp(op, "vacate", 2) == 0)
-		rc = slurm_checkpoint_vacate (job_id, step_id, CKPT_WAIT);
-	else if (strncasecmp(op, "restart", 2) == 0)
-		rc = slurm_checkpoint_restart (job_id, step_id);
-	else if (strncasecmp(op, "error", 2) == 0) {
+	else if (strncasecmp(op, "create", MAX(oplen, 2)) == 0) {
+		if (_parse_checkpoint_args(argc, argv, &max_wait, &image_dir)) {
+			return 0;
+		}
+		rc = slurm_checkpoint_create (job_id, step_id, max_wait, image_dir);
+
+	} else if (strncasecmp(op, "vacate", MAX(oplen, 2)) == 0) {
+		if (_parse_checkpoint_args(argc, argv, &max_wait, &image_dir)) {
+			return 0;
+		}
+		rc = slurm_checkpoint_vacate (job_id, step_id, max_wait, image_dir);
+
+	} else if (strncasecmp(op, "restart", MAX(oplen, 2)) == 0) {
+		if (_parse_restart_args(argc, argv, &stick, &image_dir)) {
+			return 0;
+		}
+		rc = slurm_checkpoint_restart (job_id, step_id, stick, image_dir);
+
+	} else if (strncasecmp(op, "error", MAX(oplen, 2)) == 0) {
 		rc = slurm_checkpoint_error (job_id, step_id, 
 			&ckpt_errno, &ckpt_strerror);
 		if (rc == SLURM_SUCCESS) {
@@ -124,6 +147,47 @@ scontrol_checkpoint(char *op, char *job_step_id_str)
 	return rc;
 }
 
+static int
+_parse_checkpoint_args(int argc, char **argv, uint16_t *max_wait, char **image_dir)
+{
+	int i;
+	
+	for (i=0; i< argc; i++) {
+		if (strncasecmp(argv[i], "MaxWait=", 8) == 0) {
+			*max_wait = (uint16_t) strtol(&argv[i][8], 
+						      (char **) NULL, 10);
+		} else if (strncasecmp(argv[i], "ImageDir=", 9) == 0) {
+			*image_dir = &argv[i][9];
+		} else {
+			exit_code = 1;
+			error("Invalid input: %s", argv[i]);
+			error("Request aborted");
+			return -1;
+		}
+	}
+	return 0;
+}
+
+static int
+_parse_restart_args(int argc, char **argv, uint16_t *stick, char **image_dir)
+{
+	int i;
+	
+	for (i=0; i< argc; i++) {
+		if (strncasecmp(argv[i], "StickToNodes", 5) == 0) {
+			*stick = 1;
+		} else if (strncasecmp(argv[i], "ImageDir=", 9) == 0) {
+			*image_dir = &argv[i][9];
+		} else {
+			exit_code = 1;
+			error("Invalid input: %s", argv[i]);
+			error("Request aborted");
+			return -1;
+		}
+	}
+	return 0;
+}
+
 /*
  * scontrol_suspend - perform some suspend/resume operation
  * IN op - suspend/resume operation
@@ -151,7 +215,7 @@ scontrol_suspend(char *op, char *job_id_str)
 		return 0;
 	}
 
-	if (strncasecmp(op, "suspend", 3) == 0)
+	if (strncasecmp(op, "suspend", MAX(strlen(op), 2)) == 0)
 		rc = slurm_suspend (job_id);
 	else
 		rc = slurm_resume (job_id);
@@ -201,21 +265,42 @@ extern int
 scontrol_update_job (int argc, char *argv[]) 
 {
 	int i, update_cnt = 0;
+	char *tag, *val;
+	int taglen, vallen;
 	job_desc_msg_t job_msg;
 
 	slurm_init_job_desc_msg (&job_msg);	
 
 	for (i=0; i<argc; i++) {
-		if (strncasecmp(argv[i], "JobId=", 6) == 0)
-			job_msg.job_id = 
-				(uint32_t) strtol(&argv[i][6], 
-						 (char **) NULL, 10);
-		else if (strncasecmp(argv[i], "Comment=", 8) == 0) {
-			job_msg.comment = &argv[i][8];
+		tag = argv[i];
+		val = strchr(argv[i], '=');
+		if (val) {
+			taglen = val - argv[i];
+			val++;
+			vallen = strlen(val);
+		} else if (strncasecmp(tag, "Nice", MAX(strlen(tag), 2)) == 0) {
+			/* "Nice" is the only tag that might not have an 
+			   equal sign, so it is handled specially. */
+			job_msg.nice = NICE_OFFSET + 100;
 			update_cnt++;
+			continue;
+		} else {
+			exit_code = 1;
+			fprintf (stderr, "Invalid input: %s\n", argv[i]);
+			fprintf (stderr, "Request aborted\n");
+			return -1;
 		}
-		else if (strncasecmp(argv[i], "TimeLimit=", 10) == 0) {
-			int time_limit = time_str2mins(&argv[i][10]);
+
+		if (strncasecmp(tag, "JobId", MAX(taglen, 1)) == 0) {
+			job_msg.job_id = 
+				(uint32_t) strtol(val, (char **) NULL, 10);
+		}
+		else if (strncasecmp(tag, "Comment", MAX(taglen, 3)) == 0) {
+			job_msg.comment = val;
+			update_cnt++;
+		} 
+		else if (strncasecmp(tag, "TimeLimit", MAX(taglen, 2)) == 0) {
+			int time_limit = time_str2mins(val);
 			if ((time_limit < 0) && (time_limit != INFINITE)) {
 				error("Invalid TimeLimit value");
 				exit_code = 1;
@@ -224,15 +309,14 @@ scontrol_update_job (int argc, char *argv[])
 			job_msg.time_limit = time_limit;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Priority=", 9) == 0) {
+		else if (strncasecmp(tag, "Priority", MAX(taglen, 2)) == 0) {
 			job_msg.priority = 
-				(uint32_t) strtoll(&argv[i][9], 
-						(char **) NULL, 10);
+				(uint32_t) strtoll(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Nice=", 5) == 0) {
+		else if (strncasecmp(tag, "Nice", MAX(taglen, 2)) == 0) {
 			int nice;
-			nice = strtoll(&argv[i][5], (char **) NULL, 10);
+			nice = strtoll(val, (char **) NULL, 10);
 			if (abs(nice) > NICE_OFFSET) {
 				error("Invalid nice value, must be between "
 					"-%d and %d", NICE_OFFSET, NICE_OFFSET);
@@ -242,29 +326,21 @@ scontrol_update_job (int argc, char *argv[])
 			job_msg.nice = NICE_OFFSET + nice;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Nice", 4) == 0) {
-			job_msg.nice = NICE_OFFSET + 100;
-			update_cnt++;
-		}		
-		else if (strncasecmp(argv[i], "ReqProcs=", 9) == 0) {
+		else if (strncasecmp(tag, "ReqProcs", MAX(taglen, 4)) == 0) {
 			job_msg.num_procs = 
-				(uint32_t) strtol(&argv[i][9], 
-						(char **) NULL, 10);
+				(uint32_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Requeue=", 8) == 0) {
+		else if (strncasecmp(tag, "Requeue", MAX(taglen, 4)) == 0) {
 			job_msg.requeue = 
-				(uint16_t) strtol(&argv[i][8], 
-						(char **) NULL, 10);
+				(uint16_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
 		/* MinNodes was replaced by ReqNodes in SLURM version 1.2 */
-		else if ((strncasecmp(argv[i], "MinNodes=", 9) == 0) ||
-		         (strncasecmp(argv[i], "ReqNodes=", 9) == 0)) {
+		else if ((strncasecmp(tag, "MinNodes", MAX(taglen, 4)) == 0) ||
+		         (strncasecmp(tag, "ReqNodes", MAX(taglen, 8)) == 0)) {
 			char *tmp;
-			job_msg.min_nodes = 
-				(uint32_t) strtol(&argv[i][9],
-						 &tmp, 10);
+			job_msg.min_nodes = (uint32_t) strtol(val, &tmp, 10);
 			if (tmp[0] == '-') {
 				job_msg.max_nodes = (uint32_t)
 					strtol(&tmp[1], (char **) NULL, 10);
@@ -273,133 +349,136 @@ scontrol_update_job (int argc, char *argv[])
 						"minimum value (%u < %u)",
 						job_msg.max_nodes,
 						job_msg.min_nodes);
+					exit_code = 1;
+					return 0;
 				}
 			}
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "ReqSockets=", 11) == 0) {
+		else if (strncasecmp(tag, "ReqSockets", MAX(taglen, 4)) == 0) {
 			job_msg.min_sockets = 
-				(uint16_t) strtol(&argv[i][11],
-						 (char **) NULL, 10);
+				(uint16_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "ReqCores=", 9) == 0) {
+		else if (strncasecmp(tag, "ReqCores", MAX(taglen, 4)) == 0) {
 			job_msg.min_cores = 
-				(uint16_t) strtol(&argv[i][9],
-						 (char **) NULL, 10);
+				(uint16_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-                else if (strncasecmp(argv[i], "TasksPerNode=", 13) == 0) {
+                else if (strncasecmp(tag, "TasksPerNode", MAX(taglen, 2))==0) {
                         job_msg.ntasks_per_node =
-                                (uint16_t) strtol(&argv[i][13],
-                                                 (char **) NULL, 10);
+                                (uint16_t) strtol(val, (char **) NULL, 10);
                         update_cnt++;
                 }
-		else if (strncasecmp(argv[i], "ReqThreads=", 11) == 0) {
+		else if (strncasecmp(tag, "ReqThreads", MAX(taglen, 4)) == 0) {
 			job_msg.min_threads = 
-				(uint16_t) strtol(&argv[i][11],
-						 (char **) NULL, 10);
+				(uint16_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "MinProcs=", 9) == 0) {
+		else if (strncasecmp(tag, "MinProcs", MAX(taglen, 4)) == 0) {
 			job_msg.job_min_procs = 
-				(uint32_t) strtol(&argv[i][9], 
-						(char **) NULL, 10);
+				(uint32_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "MinSockets=", 11) == 0) {
+		else if (strncasecmp(tag, "MinSockets", MAX(taglen, 4)) == 0) {
 			job_msg.job_min_sockets = 
-				(uint16_t) strtol(&argv[i][11], 
-						(char **) NULL, 10);
+				(uint16_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "MinCores=", 9) == 0) {
+		else if (strncasecmp(tag, "MinCores", MAX(taglen, 4)) == 0) {
 			job_msg.job_min_cores = 
-				(uint16_t) strtol(&argv[i][9], 
-						(char **) NULL, 10);
+				(uint16_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "MinThreads=", 11) == 0) {
+		else if (strncasecmp(tag, "MinThreads", MAX(taglen, 5)) == 0) {
 			job_msg.job_min_threads = 
-				(uint16_t) strtol(&argv[i][11], 
-						(char **) NULL, 10);
+				(uint16_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "MinMemoryNode=", 14) == 0) {
+		else if (strncasecmp(tag, "MinMemoryNode", 
+				     MAX(taglen, 10)) == 0) {
 			job_msg.job_min_memory = 
-				(uint32_t) strtol(&argv[i][14], 
-						(char **) NULL, 10);
+				(uint32_t) strtol(val, (char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "MinMemoryCPU=", 13) == 0) {
+		else if (strncasecmp(tag, "MinMemoryCPU", 
+				     MAX(taglen, 10)) == 0) {
 			job_msg.job_min_memory =
-				(uint32_t) strtol(&argv[i][13],
-						(char **) NULL, 10);
+				(uint32_t) strtol(val, (char **) NULL, 10);
 			job_msg.job_min_memory |= MEM_PER_CPU;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "MinTmpDisk=", 11) == 0) {
+		else if (strncasecmp(tag, "MinTmpDisk", MAX(taglen, 5)) == 0) {
 			job_msg.job_min_tmp_disk = 
-				(uint32_t) strtol(&argv[i][11], 
-						(char **) NULL, 10);
+				(uint32_t) strtol(val, (char **) NULL, 10);
+			update_cnt++;
+		}
+		else if (strncasecmp(tag, "PartitionName", 
+				     MAX(taglen, 2)) == 0) {
+			job_msg.partition = val;
+			update_cnt++;
+		}
+		else if (strncasecmp(tag, "ReservationName", 
+				     MAX(taglen, 3)) == 0) {
+			job_msg.reservation = val;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Partition=", 10) == 0) {
-			job_msg.partition = &argv[i][10];
+		else if (strncasecmp(tag, "Name", MAX(taglen, 2)) == 0) {
+			job_msg.name = val;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Name=", 5) == 0) {
-			job_msg.name = &argv[i][5];
+		else if (strncasecmp(tag, "WCKey", MAX(taglen, 1)) == 0) {
+			job_msg.wckey = val;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Shared=", 7) == 0) {
-			if (strcasecmp(&argv[i][7], "YES") == 0)
+		else if (strncasecmp(tag, "Shared", MAX(taglen, 2)) == 0) {
+			if (strncasecmp(val, "YES", MAX(vallen, 1)) == 0)
 				job_msg.shared = 1;
-			else if (strcasecmp(&argv[i][7], "NO") == 0)
+			else if (strncasecmp(val, "NO", MAX(vallen, 1)) == 0)
 				job_msg.shared = 0;
 			else
 				job_msg.shared = 
-					(uint16_t) strtol(&argv[i][7], 
+					(uint16_t) strtol(val, 
 							(char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Contiguous=", 11) == 0) {
-			if (strcasecmp(&argv[i][11], "YES") == 0)
+		else if (strncasecmp(tag, "Contiguous", MAX(taglen, 3)) == 0) {
+			if (strncasecmp(val, "YES", MAX(vallen, 1)) == 0)
 				job_msg.contiguous = 1;
-			else if (strcasecmp(&argv[i][11], "NO") == 0)
+			else if (strncasecmp(val, "NO", MAX(vallen, 1)) == 0)
 				job_msg.contiguous = 0;
 			else
 				job_msg.contiguous = 
-					(uint16_t) strtol(&argv[i][11], 
+					(uint16_t) strtol(val, 
 							(char **) NULL, 10);
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "ExcNodeList=", 12) == 0) {
-			job_msg.exc_nodes = &argv[i][12];
+		else if (strncasecmp(tag, "ExcNodeList", MAX(taglen, 1)) == 0) {
+			job_msg.exc_nodes = val;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "ReqNodeList=", 12) == 0) {
-			job_msg.req_nodes = &argv[i][12];
+		else if (strncasecmp(tag, "ReqNodeList", MAX(taglen, 8)) == 0) {
+			job_msg.req_nodes = val;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Features=", 9) == 0) {
-			job_msg.features = &argv[i][9];
+		else if (strncasecmp(tag, "Features", MAX(taglen, 1)) == 0) {
+			job_msg.features = val;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Account=", 8) == 0) {
-			job_msg.account = &argv[i][8];
+		else if (strncasecmp(tag, "Account", MAX(taglen, 1)) == 0) {
+			job_msg.account = val;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "Dependency=", 11) == 0) {
-			job_msg.dependency = &argv[i][11];
+		else if (strncasecmp(tag, "Dependency", MAX(taglen, 1)) == 0) {
+			job_msg.dependency = val;
 			update_cnt++;
 		}
 #ifdef HAVE_BG
-		else if (strncasecmp(argv[i], "Geometry=", 9) == 0) {
+		else if (strncasecmp(tag, "Geometry", MAX(taglen, 1)) == 0) {
 			char* token, *delimiter = ",x", *next_ptr;
 			int j, rc = 0;
 			uint16_t geo[SYSTEM_DIMENSIONS];
-			char* geometry_tmp = xstrdup(&argv[i][9]);
+			char* geometry_tmp = xstrdup(val);
 			char* original_ptr = geometry_tmp;
 			token = strtok_r(geometry_tmp, delimiter, &next_ptr);
 			for (j=0; j<SYSTEM_DIMENSIONS; j++) {
@@ -435,25 +514,25 @@ scontrol_update_job (int argc, char *argv[])
 			}
 		}
 
-		else if (strncasecmp(argv[i], "Rotate=", 7) == 0) {
+		else if (strncasecmp(tag, "Rotate", MAX(taglen, 2)) == 0) {
 			uint16_t rotate;
-			if (strcasecmp(&argv[i][7], "yes") == 0)
+			if (strncasecmp(val, "YES", MAX(vallen, 1)) == 0)
 				rotate = 1;
-			else if (strcasecmp(&argv[i][7], "no") == 0)
+			else if (strncasecmp(val, "NO", MAX(vallen, 1)) == 0)
 				rotate = 0;
 			else
-				rotate = (uint16_t) strtol(&argv[i][7], 
+				rotate = (uint16_t) strtol(val, 
 							   (char **) NULL, 10);
 			job_msg.rotate = rotate;
 			update_cnt++;
 		}
 #endif
-		else if (strncasecmp(argv[i], "Licenses=", 9) == 0) {
-			job_msg.licenses = &argv[i][9];
+		else if (strncasecmp(tag, "Licenses", MAX(taglen, 1)) == 0) {
+			job_msg.licenses = val;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "StartTime=", 10) == 0) {
-			job_msg.begin_time = parse_time(&argv[i][10], 0);
+		else if (strncasecmp(tag, "StartTime", MAX(taglen, 2)) == 0) {
+			job_msg.begin_time = parse_time(val, 0);
 			update_cnt++;
 		}
 		else {
diff --git a/src/scontrol/update_node.c b/src/scontrol/update_node.c
index 7c97237981bbe3c2c59141545be3576b943baa23..44454db0a942496ff1e421425c4ca5157df3cf8e 100644
--- a/src/scontrol/update_node.c
+++ b/src/scontrol/update_node.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -49,30 +50,76 @@ extern int
 scontrol_update_node (int argc, char *argv[]) 
 {
 	int i, j, k, rc = 0, update_cnt = 0;
+
 	uint16_t state_val;
 	update_node_msg_t node_msg;
 	char *reason_str = NULL;
 	char *user_name;
+	char *tag, *val;
+	int taglen, vallen;
 
-	node_msg.node_names = NULL;
-	node_msg.features = NULL;
-	node_msg.reason = NULL;
-	node_msg.node_state = (uint16_t) NO_VAL;
+	slurm_init_update_node_msg(&node_msg);
 	for (i=0; i<argc; i++) {
-		if (strncasecmp(argv[i], "NodeName=", 9) == 0)
-			node_msg.node_names = &argv[i][9];
-		else if (strncasecmp(argv[i], "Features=", 9) == 0) {
-			node_msg.features = &argv[i][9];
+		tag = argv[i];
+		val = strchr(argv[i], '=');
+		if (val) {
+			taglen = val - argv[i];
+			val++;
+			vallen = strlen(val);
+		} else {
+			exit_code = 1;
+			error("Invalid input: %s  Request aborted", argv[i]);
+			return -1;
+		}
+		if (strncasecmp(tag, "NodeName", MAX(taglen, 1)) == 0)
+			node_msg.node_names = val;
+		else if (strncasecmp(tag, "Features", MAX(taglen, 1)) == 0) {
+			node_msg.features = val;
+			update_cnt++;
+		} else if (strncasecmp(tag, "Weight", MAX(taglen,1)) == 0) {
+			/* Logic borrowed from function _handle_uint32 */
+			char *endptr;
+			unsigned long num;
+			errno = 0;
+			num = strtoul(val, &endptr, 0);
+			if ((endptr[0] == 'k') || (endptr[0] == 'K')) {
+				num *= 1024;
+				endptr++;
+			}
+			if ((num == 0 && errno == EINVAL)
+        		            || (*endptr != '\0')) {
+				if ((strcasecmp(val, "UNLIMITED") == 0) ||
+				    (strcasecmp(val, "INFINITE")  == 0)) {
+					num = (uint32_t) INFINITE;
+				} else {
+					error("Weight value (%s) is not a "
+					      "valid number", val);
+					break;
+				}
+			} else if (errno == ERANGE) {
+				error("Weight value (%s) is out of range", 
+				      val);
+				break;
+			} else if (val[0] == '-') {
+				error("Weight value (%s) is less than zero", 
+				      val);
+				break;
+			} else if (num > 0xfffffff0) {
+				error("Weight value (%s) is greater than %u",
+					val, 0xfffffff0);
+				break;
+			}
+			node_msg.weight = num;
 			update_cnt++;
-		} else if (strncasecmp(argv[i], "Reason=", 7) == 0) {
+		} else if (strncasecmp(tag, "Reason", MAX(taglen, 1)) == 0) {
 			char time_buf[64], time_str[32];
 			time_t now;
-			int len = strlen(&argv[i][7]);
+			int len = strlen(val);
 			reason_str = xmalloc(len+1);
-			if (argv[i][7] == '"')
-				strcpy(reason_str, &argv[i][8]);
+			if (*val == '"')
+				strcpy(reason_str, val+1);
 			else
-				strcpy(reason_str, &argv[i][7]);
+				strcpy(reason_str, val);
 
 			len = strlen(reason_str) - 1;
 			if ((len >= 0) && (reason_str[len] == '"'))
@@ -89,35 +136,46 @@ scontrol_update_node (int argc, char *argv[])
 			}
 			now = time(NULL);
 			slurm_make_time_str(&now, time_str, sizeof(time_str));
-			snprintf(time_buf, sizeof(time_buf), "@%s]", time_str); 
+			snprintf(time_buf, sizeof(time_buf), "@%s]", time_str);
 			xstrcat(reason_str, time_buf);
 				
 			node_msg.reason = reason_str;
 			update_cnt++;
 		}
-		else if (strncasecmp(argv[i], "State=NoResp", 12) == 0) {
-			node_msg.node_state = NODE_STATE_NO_RESPOND;
-			update_cnt++;
-		}
-		else if (strncasecmp(argv[i], "State=DRAIN", 11) == 0) {
-			node_msg.node_state = NODE_STATE_DRAIN;
-			update_cnt++;
-		}
-		else if (strncasecmp(argv[i], "State=FAIL", 10) == 0) {
-			node_msg.node_state = NODE_STATE_FAIL;
-			update_cnt++;
-		}
-		else if (strncasecmp(argv[i], "State=RES", 9) == 0) {
-			node_msg.node_state = NODE_RESUME;
-			update_cnt++;
-		}
-		else if (strncasecmp(argv[i], "State=", 6) == 0) {
-			state_val = (uint16_t) NO_VAL;
-			for (j = 0; j <= NODE_STATE_END; j++) {
-				if (strcasecmp (node_state_string(j), 
-				                &argv[i][6]) == 0) {
-					state_val = (uint16_t) j;
-					break;
+		else if (strncasecmp(tag, "State", MAX(taglen, 1)) == 0) {
+			if (strncasecmp(val, "NoResp", 
+				        MAX(vallen, 3)) == 0) {
+				node_msg.node_state = NODE_STATE_NO_RESPOND;
+				update_cnt++;
+			} else if (strncasecmp(val, "DRAIN", 
+				   MAX(vallen, 3)) == 0) {
+				node_msg.node_state = NODE_STATE_DRAIN;
+				update_cnt++;
+			} else if (strncasecmp(val, "FAIL", 
+				   MAX(vallen, 3)) == 0) {
+				node_msg.node_state = NODE_STATE_FAIL;
+				update_cnt++;
+			} else if (strncasecmp(val, "RESUME", 
+				   MAX(vallen, 3)) == 0) {
+				node_msg.node_state = NODE_RESUME;
+				update_cnt++;
+			} else if (strncasecmp(val, "POWER_DOWN", 
+				   MAX(vallen, 7)) == 0) {
+				node_msg.node_state = NODE_STATE_POWER_SAVE;
+				update_cnt++;
+			} else if (strncasecmp(val, "POWER_UP", 
+				   MAX(vallen, 7)) == 0) {
+				node_msg.node_state = NODE_STATE_POWER_UP;
+				update_cnt++;
+			} else {
+				state_val = (uint16_t) NO_VAL;
+				for (j = 0; j < NODE_STATE_END; j++) {
+					if (strncasecmp (node_state_string(j), 
+							 val, 
+							 MAX(vallen, 3)) == 0) {
+						state_val = (uint16_t) j;
+						break;
+					}
 				}
 				if (j == NODE_STATE_END) {
 					exit_code = 1;
@@ -125,21 +183,24 @@ scontrol_update_node (int argc, char *argv[])
 						argv[i]);
 					fprintf (stderr, "Request aborted\n");
 					fprintf (stderr, "Valid states are: ");
-					fprintf (stderr, "NoResp DRAIN FAIL RESUME ");
+					fprintf (stderr,
+						 "NoResp DRAIN FAIL RESUME "
+						 "POWER_DOWN POWER_UP ");
 					for (k = 0; k < NODE_STATE_END; k++) {
 						fprintf (stderr, "%s ", 
 						         node_state_string(k));
 					}
 					fprintf (stderr, "\n");
-					fprintf (stderr, "Not all states are valid given a "
-						 "node's prior state\n");
+					fprintf (stderr, 
+						 "Not all states are valid "
+						 "given a node's prior "
+						 "state\n");
 					goto done;
 				}
+				node_msg.node_state = state_val;
+				update_cnt++;
 			}
-			node_msg.node_state = state_val;
-			update_cnt++;
-		}
-		else {
+		} else {
 			exit_code = 1;
 			fprintf (stderr, "Invalid input: %s\n", argv[i]);
 			fprintf (stderr, "Request aborted\n");
diff --git a/src/scontrol/update_part.c b/src/scontrol/update_part.c
index 6ec952c928b547703225e54abff50da33d4a012f..dddb971e1c03e8bf6acb59d0b9a40f691d310eff 100644
--- a/src/scontrol/update_part.c
+++ b/src/scontrol/update_part.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -38,157 +39,226 @@
 #include "src/common/proc_args.h"
 #include "src/scontrol/scontrol.h"
 
-
-/* 
- * scontrol_update_part - update the slurm partition configuration per the 
- *	supplied arguments 
- * IN argc - count of arguments
- * IN argv - list of arguments
- * RET 0 if no slurm error, errno otherwise. parsing error prints 
- *			error message and returns 0
- */
 extern int
-scontrol_update_part (int argc, char *argv[]) 
+scontrol_parse_part_options (int argc, char *argv[], int *update_cnt_ptr, 
+			     update_part_msg_t *part_msg_ptr) 
 {
-	int i, min, max, update_cnt = 0;
-	update_part_msg_t part_msg;
+	int i, min, max;
+	char *tag, *val;
+	int taglen, vallen;
+
+	if (!update_cnt_ptr) {
+		error("scontrol_parse_part_options internal error, "
+		      "update_cnt_ptr == NULL");
+		exit_code = 1;
+		return -1; 
+	}
+	if (!part_msg_ptr) {
+		error("scontrol_parse_part_options internal error, "
+		      "part_msg_ptr == NULL");
+		exit_code = 1;
+		return -1; 
+	}
 
-	slurm_init_part_desc_msg ( &part_msg );
 	for (i=0; i<argc; i++) {
-		if (strncasecmp(argv[i], "PartitionName=", 14) == 0)
-			part_msg.name = &argv[i][14];
-		else if (strncasecmp(argv[i], "MaxTime=", 8) == 0) {
-			int max_time = time_str2mins(&argv[i][8]);
+		tag = argv[i];
+		val = strchr(argv[i], '=');
+		if (val) {
+			taglen = val - argv[i];
+			val++;
+			vallen = strlen(val);
+		} else {
+			exit_code = 1;
+			error("Invalid input: %s  Request aborted", argv[i]);
+			return -1;
+		}
+
+		if (strncasecmp(tag, "PartitionName", MAX(taglen, 2)) == 0) {
+			part_msg_ptr->name = val;
+			(*update_cnt_ptr)++;
+		} else if (strncasecmp(tag, "MaxTime", MAX(taglen, 4)) == 0) {
+			int max_time = time_str2mins(val);
 			if ((max_time < 0) && (max_time != INFINITE)) {
 				exit_code = 1;
 				error("Invalid input %s", argv[i]);
-				return 0;
+				return -1;
+			}
+			part_msg_ptr->max_time = max_time;
+			(*update_cnt_ptr)++;
+		}
+		else if (strncasecmp(tag, "DefaultTime", MAX(taglen, 8)) == 0) {
+			int default_time = time_str2mins(val);
+			if ((default_time < 0) && (default_time != INFINITE)) {
+				exit_code = 1;
+				error("Invalid input %s", argv[i]);
+				return -1;
 			}
-			part_msg.max_time = max_time;
-			update_cnt++;
+			part_msg_ptr->default_time = default_time;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "MaxNodes=", 9) == 0) {
-			if ((strcasecmp(&argv[i][9],"UNLIMITED") == 0) ||
-			    (strcasecmp(&argv[i][8],"INFINITE") == 0))
-				part_msg.max_nodes = (uint32_t) INFINITE;
+		else if (strncasecmp(tag, "MaxNodes", MAX(taglen, 4)) == 0) {
+			if ((strcasecmp(val,"UNLIMITED") == 0) ||
+			    (strcasecmp(val,"INFINITE") == 0))
+				part_msg_ptr->max_nodes = (uint32_t) INFINITE;
 			else {
 				min = 1;
-				get_resource_arg_range(&argv[i][9],
+				get_resource_arg_range(val,
 					"MaxNodes", &min, &max, true);
-				part_msg.max_nodes = min;
+				part_msg_ptr->max_nodes = min;
 			}
-			update_cnt++;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "MinNodes=", 9) == 0) {
+		else if (strncasecmp(tag, "MinNodes", MAX(taglen, 2)) == 0) {
 			min = 1;
-			get_resource_arg_range(&argv[i][9],
+			get_resource_arg_range(val,
 				"MinNodes", &min, &max, true);
-			part_msg.min_nodes = min;
-			update_cnt++;
+			part_msg_ptr->min_nodes = min;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "Default=", 8) == 0) {
-			if (strcasecmp(&argv[i][8], "NO") == 0)
-				part_msg.default_part = 0;
-			else if (strcasecmp(&argv[i][8], "YES") == 0)
-				part_msg.default_part = 1;
+		else if (strncasecmp(tag, "Default", MAX(taglen, 7)) == 0) {
+			if (strncasecmp(val, "NO", MAX(vallen, 1)) == 0)
+				part_msg_ptr->default_part = 0;
+			else if (strncasecmp(val, "YES", MAX(vallen, 1)) == 0)
+				part_msg_ptr->default_part = 1;
 			else {
 				exit_code = 1;
 				error("Invalid input: %s", argv[i]);
 				error("Acceptable Default values "
 					"are YES and NO");
-				return 0;
+				return -1;
 			}
-			update_cnt++;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "Hidden=", 4) == 0) {
-			if (strcasecmp(&argv[i][7], "NO") == 0)
-				part_msg.hidden = 0;
-			else if (strcasecmp(&argv[i][7], "YES") == 0)
-				part_msg.hidden = 1;
+		else if (strncasecmp(tag, "Hidden", MAX(taglen, 1)) == 0) {
+			if (strncasecmp(val, "NO", MAX(vallen, 1)) == 0)
+				part_msg_ptr->hidden = 0;
+			else if (strncasecmp(val, "YES", MAX(vallen, 1)) == 0)
+				part_msg_ptr->hidden = 1;
 			else {
 				exit_code = 1;
 				error("Invalid input: %s", argv[i]);
 				error("Acceptable Hidden values "
 					"are YES and NO");
-				return 0;
+				return -1;
 			}
-			update_cnt++;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "RootOnly=", 4) == 0) {
-			if (strcasecmp(&argv[i][9], "NO") == 0)
-				part_msg.root_only = 0;
-			else if (strcasecmp(&argv[i][9], "YES") == 0)
-				part_msg.root_only = 1;
+		else if (strncasecmp(tag, "RootOnly", MAX(taglen, 1)) == 0) {
+			if (strncasecmp(val, "NO", MAX(vallen, 1)) == 0)
+				part_msg_ptr->root_only = 0;
+			else if (strncasecmp(val, "YES", MAX(vallen, 1)) == 0)
+				part_msg_ptr->root_only = 1;
 			else {
 				exit_code = 1;
 				error("Invalid input: %s", argv[i]);
 				error("Acceptable RootOnly values "
 					"are YES and NO");
-				return 0;
+				return -1;
 			}
-			update_cnt++;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "Shared=", 7) == 0) {
-			if (strncasecmp(&argv[i][7], "NO", 2) == 0) {
-				part_msg.max_share = 1;
-			} else if (strncasecmp(&argv[i][7], "EXCLUSIVE", 9) == 0) {
-				part_msg.max_share = 0;
-			} else if (strncasecmp(&argv[i][7], "YES:", 4) == 0) {
-				part_msg.max_share = (uint16_t) strtol(&argv[i][11], 
-					(char **) NULL, 10);
-			} else if (strncasecmp(&argv[i][7], "YES", 3) == 0) {
-				part_msg.max_share = (uint16_t) 4;
-			} else if (strncasecmp(&argv[i][7], "FORCE:", 6) == 0) {
-				part_msg.max_share = (uint16_t) strtol(&argv[i][13],
-					(char **) NULL, 10) | SHARED_FORCE;
-			} else if (strncasecmp(&argv[i][7], "FORCE", 5) == 0) {
-				part_msg.max_share = (uint16_t) 4 |
-					SHARED_FORCE;
+		else if (strncasecmp(tag, "Shared", MAX(taglen, 2)) == 0) {
+			char *colon_pos = strchr(val, ':');
+			if (colon_pos) {
+				*colon_pos = '\0';
+				vallen = strlen(val);
+			}
+			if (strncasecmp(val, "NO", MAX(vallen, 1)) == 0) {
+				part_msg_ptr->max_share = 1;
+
+			} else if (strncasecmp(val, "EXCLUSIVE", MAX(vallen, 1)) == 0) {
+				part_msg_ptr->max_share = 0;
+
+			} else if (strncasecmp(val, "YES", MAX(vallen, 1)) == 0) {
+				if (colon_pos) {
+					part_msg_ptr->max_share = (uint16_t) strtol(colon_pos+1, 
+						(char **) NULL, 10);
+				} else {
+					part_msg_ptr->max_share = (uint16_t) 4;
+				}
+			} else if (strncasecmp(val, "FORCE", MAX(vallen, 1)) == 0) {
+				if (colon_pos) {
+					part_msg_ptr->max_share = (uint16_t) strtol(colon_pos+1,
+						(char **) NULL, 10) | SHARED_FORCE;
+				} else {
+					part_msg_ptr->max_share = (uint16_t) 4 |
+						SHARED_FORCE;
+				}
 			} else {
 				exit_code = 1;
 				error("Invalid input: %s", argv[i]);
 				error("Acceptable Shared values are "
 					"NO, EXCLUSIVE, YES:#, and FORCE:#");
-				return 0;
+				return -1;
 			}
-			update_cnt++;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "Priority=", 9) == 0) {
-			part_msg.priority = (uint16_t) strtol(&argv[i][9], 
+		else if (strncasecmp(tag, "Priority", MAX(taglen, 2)) == 0) {
+			part_msg_ptr->priority = (uint16_t) strtol(val, 
 					(char **) NULL, 10);
-			update_cnt++;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "State=", 6) == 0) {
-			if (strcasecmp(&argv[i][6], "DOWN") == 0)
-				part_msg.state_up = 0;
-			else if (strcasecmp(&argv[i][6], "UP") == 0)
-				part_msg.state_up = 1;
+		else if (strncasecmp(tag, "State", MAX(taglen, 2)) == 0) {
+			if (strncasecmp(val, "DOWN", MAX(vallen, 1)) == 0)
+				part_msg_ptr->state_up = 0;
+			else if (strncasecmp(val, "UP", MAX(vallen, 1)) == 0)
+				part_msg_ptr->state_up = 1;
 			else {
 				exit_code = 1;
 				error("Invalid input: %s", argv[i]);
 				error("Acceptable State values "
 					"are UP and DOWN");
-				return 0;
+				return -1;
 			}
-			update_cnt++;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "Nodes=", 6) == 0) {
-			part_msg.nodes = &argv[i][6];
-			update_cnt++;
+		else if (strncasecmp(tag, "Nodes", MAX(taglen, 1)) == 0) {
+			part_msg_ptr->nodes = val;
+			(*update_cnt_ptr)++;
 		}
-		else if (strncasecmp(argv[i], "AllowGroups=", 12) == 0) {
-			part_msg.allow_groups = &argv[i][12];
-			update_cnt++;
+		else if (strncasecmp(tag, "AllowGroups", MAX(taglen, 1)) == 0) {
+			part_msg_ptr->allow_groups = val;
+			(*update_cnt_ptr)++;
+		}
+		else if (strncasecmp(tag, "AllocNodes", MAX(taglen, 1)) == 0) {
+			part_msg_ptr->allow_alloc_nodes = val;
+			(*update_cnt_ptr)++;
 		}
 		else {
 			exit_code = 1;
 			error("Invalid input: %s", argv[i]);
 			error("Request aborted");
-			return 0;
+			return -1;
 		}
 	}
+	return 0;
+}
 
-	if (update_cnt == 0) {
+
+
+/* 
+ * scontrol_update_part - update the slurm partition configuration per the 
+ *	supplied arguments 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ * RET 0 if no slurm error, errno otherwise. parsing error prints 
+ *			error message and returns 0
+ */
+extern int
+scontrol_update_part (int argc, char *argv[]) 
+{
+	int update_cnt = 0;
+	update_part_msg_t part_msg;
+
+	slurm_init_part_desc_msg ( &part_msg );
+	scontrol_parse_part_options (argc, argv, &update_cnt, &part_msg);
+
+	if (part_msg.name == NULL) {
+		exit_code = 1;
+		error("PartitionName must be given.");
+		return 0;
+	}
+	if (update_cnt <= 1) {
 		exit_code = 1;
 		error("No changes specified");
 		return 0;
@@ -200,3 +270,49 @@ scontrol_update_part (int argc, char *argv[])
 	} else
 		return 0;
 }
+
+
+
+/* 
+ * scontrol_create_part - create a slurm partition configuration per the 
+ *	supplied arguments 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ * RET 0 if no slurm error, errno otherwise. parsing error prints 
+ *			error message and returns 0
+ */
+extern int
+scontrol_create_part (int argc, char *argv[]) 
+{
+	int update_cnt = 0;
+	update_part_msg_t part_msg;
+
+	slurm_init_part_desc_msg ( &part_msg );
+	scontrol_parse_part_options (argc, argv, &update_cnt, &part_msg);
+
+	if (part_msg.name == NULL) {
+		exit_code = 1;
+		error("PartitionName must be given.");
+		return 0;
+	}
+	if (update_cnt == 0) {
+		exit_code = 1;
+		error("No parameters specified");
+		return 0;
+	}
+
+	if (slurm_create_partition(&part_msg)) {
+		exit_code = 1;
+		slurm_perror("Error creating the partition");
+		return slurm_get_errno ();
+	} else
+		return 0;
+}
+
+
+
+
+
+
+
+
diff --git a/src/sinfo/Makefile.in b/src/sinfo/Makefile.in
index 02b1853358d022de2a5217a1c5f6292d863aa0c1..bf4dd0cf9b0c810f42d30a70021a5da272a16664 100644
--- a/src/sinfo/Makefile.in
+++ b/src/sinfo/Makefile.in
@@ -47,14 +47,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/sinfo/opts.c b/src/sinfo/opts.c
index c46efbf2603d35f78a6f9bdb389672554af70c4c..e3b71d7fe9e776572c853f7107d11ee950a6a12a 100644
--- a/src/sinfo/opts.c
+++ b/src/sinfo/opts.c
@@ -2,13 +2,14 @@
  *  opts.c - sinfo command line option processing functions
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -238,7 +239,7 @@ extern void parse_command_line(int argc, char *argv[])
 		} else if (params.list_reasons) {
 			params.format = params.long_output ?  
 			  "%50R %6t %N" : 
-			  "%35R %N";
+			  "%50R %N";
 
 		} else if ((env_val = getenv ("SINFO_FORMAT"))) {
 			params.format = xstrdup(env_val);
@@ -364,16 +365,18 @@ _node_state_list (void)
 	all_states = xstrdup (node_state_string_compact (0));
 	for (i = 1; i < NODE_STATE_END; i++) {
 		xstrcat (all_states, ",");
-		xstrcat (all_states, node_state_string_compact(i));
+		xstrcat (all_states, node_state_string(i));
 	}
 
-	xstrcat (all_states, ",");
-	xstrcat (all_states, 
-		node_state_string_compact(NODE_STATE_DRAIN));
-
-	xstrcat (all_states, ",");
-	xstrcat (all_states, 
-		node_state_string_compact(NODE_STATE_COMPLETING));
+	xstrcat(all_states, ",DRAIN,DRAINED,DRAINING,NO_RESPOND");
+	xstrcat(all_states, ",");
+	xstrcat(all_states, node_state_string(NODE_STATE_COMPLETING));
+	xstrcat(all_states, ",");
+	xstrcat(all_states, node_state_string(NODE_STATE_POWER_SAVE));
+	xstrcat(all_states, ",");
+	xstrcat(all_states, node_state_string(NODE_STATE_FAIL));
+	xstrcat(all_states, ",");
+	xstrcat(all_states, node_state_string(NODE_STATE_MAINT));
 
 	for (i = 0; i < strlen (all_states); i++)
 		all_states[i] = tolower (all_states[i]);
@@ -387,8 +390,8 @@ _node_state_equal (int i, const char *str)
 {
 	int len = strlen (str);
 
-	if (  (strncasecmp (node_state_string_compact(i), str, len) == 0) 
-	   || (strncasecmp (node_state_string(i),         str, len) == 0)) 
+	if ((strncasecmp(node_state_string_compact(i), str, len) == 0) ||
+	    (strncasecmp(node_state_string(i),         str, len) == 0)) 
 		return (true);
 	return (false);
 }
@@ -403,16 +406,30 @@ static int
 _node_state_id (char *str)
 {	
 	int i;
+	int len = strlen (str);
+
 	for (i = 0; i < NODE_STATE_END; i++) {
 		if (_node_state_equal (i, str))
 			return (i);
 	}
 
-	if  (_node_state_equal (NODE_STATE_DRAIN, str))
+	if (strncasecmp("DRAIN", str, len) == 0)
 		return NODE_STATE_DRAIN;
-
+	if (strncasecmp("DRAINED", str, len) == 0)
+		return NODE_STATE_DRAIN | NODE_STATE_IDLE;
+	if ((strncasecmp("DRAINING", str, len) == 0) ||
+	    (strncasecmp("DRNG", str, len) == 0))
+		return NODE_STATE_DRAIN | NODE_STATE_ALLOCATED;
 	if (_node_state_equal (NODE_STATE_COMPLETING, str))
 		return NODE_STATE_COMPLETING;
+	if (strncasecmp("NO_RESPOND", str, len) == 0)
+		return NODE_STATE_NO_RESPOND;
+	if (_node_state_equal (NODE_STATE_POWER_SAVE, str))
+		return NODE_STATE_POWER_SAVE;
+	if (_node_state_equal (NODE_STATE_FAIL, str))
+		return NODE_STATE_FAIL;
+	if (_node_state_equal (NODE_STATE_MAINT, str))
+		return NODE_STATE_MAINT;
 
 	return (-1);
 }
@@ -509,6 +526,12 @@ _parse_format( char* format )
 					field_size, 
 					right_justify, 
 					suffix );
+		} else if (field[0] == 'L') {
+			params.match_flags.default_time_flag = true;
+			format_add_default_time( params.format_list, 
+					field_size, 
+					right_justify, 
+					suffix );
 		} else if (field[0] == 'm') {
 			params.match_flags.memory_flag = true;
 			format_add_memory( params.format_list, 
@@ -547,9 +570,14 @@ _parse_format( char* format )
 		} else if (field[0] == 's') {
 			params.match_flags.job_size_flag = true;
 			format_add_size( params.format_list, 
-					field_size, 
-					right_justify, 
-					suffix );
+					 field_size, 
+					 right_justify, 
+					 suffix );
+		} else if (field[0] == 'S') {
+			format_add_alloc_nodes( params.format_list, 
+						field_size, 
+						right_justify, 
+						suffix );
 		} else if (field[0] == 't') {
 			params.match_flags.state_flag = true;
 			format_add_state_compact( params.format_list, 
@@ -687,6 +715,8 @@ void _print_options( void )
 	printf("bg_flag         = %s\n", params.bg_flag ? "true" : "false");
 	printf("cpus_flag       = %s\n", params.match_flags.cpus_flag ?
 			"true" : "false");
+	printf("default_time_flag =%s\n", params.match_flags.default_time_flag ?
+					"true" : "false");
 	printf("disk_flag       = %s\n", params.match_flags.disk_flag ?
 			"true" : "false");
 	printf("features_flag   = %s\n", params.match_flags.features_flag ?
diff --git a/src/sinfo/print.c b/src/sinfo/print.c
index fc0ae3c7317778a0a4aa89338e3f0cb3d6783ad6..1dfac41961119ea69decbfa825a4769b9bda9538 100644
--- a/src/sinfo/print.c
+++ b/src/sinfo/print.c
@@ -2,13 +2,15 @@
  *  print.c - sinfo print job functions
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov> and 
  *  Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *   
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -485,6 +487,25 @@ int _print_groups(sinfo_data_t * sinfo_data, int width,
 	return SLURM_SUCCESS;
 }
 
+int _print_alloc_nodes(sinfo_data_t * sinfo_data, int width,
+		       bool right_justify, char *suffix)
+{
+	if (sinfo_data) {
+		if (sinfo_data->part_info == NULL)
+			_print_str("n/a", width, right_justify, true);
+		else if (sinfo_data->part_info->allow_alloc_nodes)
+			_print_str(sinfo_data->part_info->allow_alloc_nodes, 
+				   width, right_justify, true);
+		else
+			_print_str("all", width, right_justify, true);
+	} else
+		_print_str("ALLOCNODES", width, right_justify, true);
+	
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
 int _print_memory(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix)
 {
@@ -805,6 +826,26 @@ int _print_time(sinfo_data_t * sinfo_data, int width,
 	return SLURM_SUCCESS;
 }
 
+int _print_default_time(sinfo_data_t * sinfo_data, int width,
+				bool right_justify, char *suffix)
+{
+	if (sinfo_data) {
+		if ((sinfo_data->part_info == NULL) ||
+		    (sinfo_data->part_info->default_time == NO_VAL))	      
+			_print_str("n/a", width, right_justify, true);
+		else if (sinfo_data->part_info->default_time == INFINITE)
+			_print_str("infinite", width, right_justify, true);
+		else
+			_print_secs((sinfo_data->part_info->default_time * 60L),
+					width, right_justify, true);
+	} else
+		_print_str("DEFAULTTIME", width, right_justify, true);
+
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
 int _print_weight(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix)
 {
diff --git a/src/sinfo/print.h b/src/sinfo/print.h
index a7d01c8d580afe06a6f3670937a14824eb6eaf83..542dd27c6766e27e43bfbd1adfabaa7cc8656f06 100644
--- a/src/sinfo/print.h
+++ b/src/sinfo/print.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -116,8 +117,12 @@ int  print_sinfo_list(List sinfo_list);
 	format_add_function(list,wid,right,suffix,_print_state_long)
 #define format_add_time(list,wid,right,suffix) \
 	format_add_function(list,wid,right,suffix,_print_time)
+#define format_add_default_time(list,wid,right,suffix) \
+	format_add_function(list,wid,right,suffix,_print_default_time)
 #define format_add_weight(list,wid,right,suffix) \
 	format_add_function(list,wid,right,suffix,_print_weight)
+#define format_add_alloc_nodes(list,wid,right,suffix) \
+	format_add_function(list,wid,right,suffix,_print_alloc_nodes)
 
 /*****************************************************************************
  * Print Field Functions
@@ -173,7 +178,11 @@ int _print_state_long(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix);
 int _print_time(sinfo_data_t * sinfo_data, int width, 
 			bool right_justify, char *suffix);
+int _print_default_time(sinfo_data_t * sinfo_data, int width, 
+			bool right_justify, char *suffix);
 int _print_weight(sinfo_data_t * sinfo_data, int width,
 			bool right_justify, char *suffix);
+int _print_alloc_nodes(sinfo_data_t * sinfo_data, int width,
+		       bool right_justify, char *suffix);
 
 #endif
diff --git a/src/sinfo/sinfo.c b/src/sinfo/sinfo.c
index 83bfac9a6a9711d00cc0188e6b9d00e5a653898c..d5f9af698ec0365c8df0f23e16dadb5673565c17 100644
--- a/src/sinfo/sinfo.c
+++ b/src/sinfo/sinfo.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -49,6 +50,7 @@
 
 #ifdef HAVE_BG			     
 # include "src/plugins/select/bluegene/wrap_rm_api.h"
+# include "src/plugins/select/bluegene/plugin/bluegene.h"
 #endif
 
 /********************
@@ -341,7 +343,7 @@ static int _build_sinfo_data(List sinfo_list,
 	int i=0;
 	bg_info_record_t *bg_info_record = NULL;
 	int node_scaling = partition_msg->partition_array[0].node_scaling;
-	char *slurm_user = xstrdup(slurmctld_conf.slurm_user_name);
+	int block_error = 0;
 
 	for (i=0; i<node_msg->record_count; i++) {
 		node_ptr = &(node_msg->node_array[i]);
@@ -356,18 +358,29 @@ static int _build_sinfo_data(List sinfo_list,
 		node_ptr->threads = node_scaling;
 		node_ptr->cores = 0;
 		node_ptr->used_cpus = 0;
+		if((node_ptr->node_state & NODE_STATE_BASE) == NODE_STATE_DOWN) 
+			continue;
+
+		if(node_ptr->node_state & NODE_STATE_DRAIN) {
+			if(node_ptr->node_state & NODE_STATE_FAIL) {
+				node_ptr->node_state &= ~NODE_STATE_DRAIN;
+				node_ptr->node_state &= ~NODE_STATE_FAIL;
+			} else {
+				node_ptr->cores += node_scaling;
+			}
+		}
+		node_ptr->node_state |= NODE_STATE_IDLE;
 	}
 
 	for (i=0; i<node_select_msg->record_count; i++) {
 		bg_info_record = &(node_select_msg->bg_info_array[i]);
 		
 		/* this block is idle we won't mark it */
-		if (bg_info_record->state != RM_PARTITION_ERROR
-		    && !strcmp(slurm_user, bg_info_record->owner_name))
+		if (bg_info_record->job_running == NO_JOB_RUNNING)
 			continue;
+
 		_update_nodes_for_bg(node_scaling, node_msg, bg_info_record);
 	}
-	xfree(slurm_user);
 
 #endif
 	/* by default every partition is shown, even if no nodes */
@@ -402,6 +415,14 @@ static int _build_sinfo_data(List sinfo_list,
 			if (params.filtering && _filter_out(node_ptr))
 				continue;
 #ifdef HAVE_BG
+			if((node_ptr->node_state & NODE_STATE_DRAIN) 
+			   && (node_ptr->node_state & NODE_STATE_FAIL)) {
+				node_ptr->node_state &= ~NODE_STATE_DRAIN;
+				node_ptr->node_state &= ~NODE_STATE_FAIL;
+				block_error = 1;
+			} else
+				block_error = 0;
+			node_ptr->threads = node_scaling;
 			for(i=0; i<3; i++) {
 				int norm = 0;
 				switch(i) {
@@ -457,6 +478,9 @@ static int _build_sinfo_data(List sinfo_list,
 						NODE_STATE_FLAGS;
 					node_ptr->node_state |= 
 						NODE_STATE_DRAIN;
+					if(block_error)
+						node_ptr->node_state
+							|= NODE_STATE_FAIL;
 					node_ptr->threads = node_ptr->cores;
 					break;
 				default:
@@ -529,7 +553,33 @@ static bool _filter_out(node_info_t *node_ptr)
 
 		iterator = list_iterator_create(params.state_list);
 		while ((node_state = list_next(iterator))) {
-			if (*node_state & NODE_STATE_FLAGS) {
+			if (*node_state == 
+			    (NODE_STATE_DRAIN | NODE_STATE_ALLOCATED)) {
+				/* We search for anything that gets mapped to
+				 * DRAINING in node_state_string */
+				if (!(node_ptr->node_state & NODE_STATE_DRAIN))
+					continue;
+				if (((node_ptr->node_state & NODE_STATE_BASE) ==
+				     NODE_STATE_ALLOCATED) ||
+				    (node_ptr->node_state & 
+				     NODE_STATE_COMPLETING)) {
+					match = true;
+					break;
+				}
+			} else if (*node_state == 
+				   (NODE_STATE_DRAIN | NODE_STATE_IDLE)) {
+				/* We search for anything that gets mapped to
+				 * DRAINED in node_state_string */
+				if (!(node_ptr->node_state & NODE_STATE_DRAIN))
+					continue;
+				if (((node_ptr->node_state & NODE_STATE_BASE) !=
+				     NODE_STATE_ALLOCATED) &&
+				    (!(node_ptr->node_state & 
+				       NODE_STATE_COMPLETING))) {
+					match = true;
+					break;
+				}
+			} else if (*node_state & NODE_STATE_FLAGS) {
 				if (*node_state & node_ptr->node_state) {
 					match = true;
 					break;
@@ -650,6 +700,10 @@ static bool _match_part_data(sinfo_data_t *sinfo_ptr,
 	    (part_ptr->max_nodes != sinfo_ptr->part_info->max_nodes))
 		return false;
 
+	if (params.match_flags.default_time_flag &&
+	    (part_ptr->default_time != sinfo_ptr->part_info->default_time))
+		return false;
+
 	if (params.match_flags.max_time_flag &&
 	    (part_ptr->max_time != sinfo_ptr->part_info->max_time))
 		return false;
@@ -781,36 +835,47 @@ static void _update_nodes_for_bg(int node_scaling,
 				 bg_info_record_t *bg_info_record)
 {
 	node_info_t *node_ptr = NULL;
-	hostlist_t hl;
-	char *node_name = NULL;
+	int j = 0;
 
 	/* we are using less than one node */
 	if(bg_info_record->conn_type == SELECT_SMALL) 
 		node_scaling = bg_info_record->node_cnt;
-       		   
-	hl = hostlist_create(bg_info_record->nodes);
-	while (1) {
-		if (node_name)
-			free(node_name);
-		node_name = hostlist_shift(hl);
-		if (!node_name)
-			break;
-		node_ptr = _find_node(node_name, node_msg);
-		if (!node_ptr)
-			continue;
-		/* cores is overloaded to be the cnodes in an error
-		 * state and used_cpus is overloaded to be the nodes in
-		 * use.  No block should be sent in here if it isn't
-		 * in use (that doesn't mean in a free state, it means
-		 * the user isn't slurm or the block is in an error state.  
-		 */
-		if(bg_info_record->state == RM_PARTITION_ERROR) 
-			node_ptr->cores += node_scaling;
-		else
-			node_ptr->used_cpus += node_scaling;
-	}
-	hostlist_destroy(hl);
 	
+	j = 0;
+	while(bg_info_record->bp_inx[j] >= 0) {
+		int i2 = 0;
+		for(i2 = bg_info_record->bp_inx[j];
+		    i2 <= bg_info_record->bp_inx[j+1];
+		    i2++) {
+			node_ptr = &(node_msg->node_array[i2]);
+			/* cores is overloaded to be the
+			 * cnodes in an error state and
+			 * used_cpus is overloaded to be the nodes in
+			 * use.  No block should be sent in
+			 * here if it isn't in use (that
+			 * doesn't mean in a free state, it means
+			 * the user isn't slurm or the block 
+			 * is in an error state.  
+			 */
+			if((node_ptr->node_state & NODE_STATE_BASE) 
+			   == NODE_STATE_DOWN) 
+				continue;
+			
+			if(bg_info_record->state
+			   == RM_PARTITION_ERROR) {
+				node_ptr->cores += node_scaling;
+				node_ptr->node_state 
+					|= NODE_STATE_DRAIN;
+				node_ptr->node_state
+					|= NODE_STATE_FAIL;
+			} else if(bg_info_record->job_running
+				  > NO_JOB_RUNNING)  
+				node_ptr->used_cpus += node_scaling;
+			else 
+				error("Hey we didn't get anything here");
+		}
+		j += 2;
+	}
 }
 #endif
 
diff --git a/src/sinfo/sinfo.h b/src/sinfo/sinfo.h
index fbae0bded7a3e10d95037e5b0bb1cb6ec5ddebd7..79a0493bd48f2d6c71e4662f32050078c3e8bf75 100644
--- a/src/sinfo/sinfo.h
+++ b/src/sinfo/sinfo.h
@@ -1,15 +1,16 @@
 /****************************************************************************\
  *  sinfo.h - definitions used for sinfo data functions
  *
- *  $Id: sinfo.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: sinfo.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -123,6 +124,7 @@ struct sinfo_match_flags {
 	bool features_flag;
 	bool groups_flag;
 	bool job_size_flag;
+	bool default_time_flag;
 	bool max_time_flag;
 	bool memory_flag;
 	bool partition_flag;
diff --git a/src/sinfo/sort.c b/src/sinfo/sort.c
index d49b81a2a44a4cdac211067fc770e1af4848f7de..4eb53621cbf3ccae877d4fae1df0b3dbddc35456 100644
--- a/src/sinfo/sort.c
+++ b/src/sinfo/sort.c
@@ -5,10 +5,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, 
  *             Morris Jette <jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/Makefile.am b/src/slurmctld/Makefile.am
index 5ccac1a4f1ae9306b5829a275a6a3f6822c598be..03613a8219dd6073e79f0c43811f8b7d95f1331d 100644
--- a/src/slurmctld/Makefile.am
+++ b/src/slurmctld/Makefile.am
@@ -19,10 +19,9 @@ slurmctld_SOURCES = 	\
 	agent.c  	\
 	agent.h		\
 	backup.c	\
+	basil_interface.c \
+	basil_interface.h \
 	controller.c 	\
-	hilbert.c	\
-	hilbert.h	\
-	hilbert_slurm.c	\
 	job_mgr.c 	\
 	job_scheduler.c \
 	job_scheduler.h \
@@ -36,11 +35,15 @@ slurmctld_SOURCES = 	\
 	partition_mgr.c \
 	ping_nodes.c	\
 	ping_nodes.h	\
+	port_mgr.c	\
+	port_mgr.h	\
 	power_save.c	\
 	proc_req.c	\
 	proc_req.h	\
 	read_config.c	\
 	read_config.h	\
+	reservation.c	\
+	reservation.h	\
 	sched_plugin.c	\
 	sched_plugin.h	\
 	slurmctld.h	\
@@ -49,6 +52,8 @@ slurmctld_SOURCES = 	\
 	state_save.c	\
 	state_save.h	\
 	step_mgr.c	\
+	topo_plugin.c	\
+	topo_plugin.h	\
 	trigger_mgr.c	\
 	trigger_mgr.h
 
diff --git a/src/slurmctld/Makefile.in b/src/slurmctld/Makefile.in
index 598da32ea4cdc52ebc9821618fab71374b0c943e..daa4a42bfc1fcfcab3bcea98672c9813ea364bd4 100644
--- a/src/slurmctld/Makefile.in
+++ b/src/slurmctld/Makefile.in
@@ -45,14 +45,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,14 +77,15 @@ am__installdirs = "$(DESTDIR)$(sbindir)"
 sbinPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(sbin_PROGRAMS)
 am_slurmctld_OBJECTS = acct_policy.$(OBJEXT) agent.$(OBJEXT) \
-	backup.$(OBJEXT) controller.$(OBJEXT) hilbert.$(OBJEXT) \
-	hilbert_slurm.$(OBJEXT) job_mgr.$(OBJEXT) \
-	job_scheduler.$(OBJEXT) licenses.$(OBJEXT) locks.$(OBJEXT) \
-	node_mgr.$(OBJEXT) node_scheduler.$(OBJEXT) \
-	partition_mgr.$(OBJEXT) ping_nodes.$(OBJEXT) \
-	power_save.$(OBJEXT) proc_req.$(OBJEXT) read_config.$(OBJEXT) \
+	backup.$(OBJEXT) basil_interface.$(OBJEXT) \
+	controller.$(OBJEXT) job_mgr.$(OBJEXT) job_scheduler.$(OBJEXT) \
+	licenses.$(OBJEXT) locks.$(OBJEXT) node_mgr.$(OBJEXT) \
+	node_scheduler.$(OBJEXT) partition_mgr.$(OBJEXT) \
+	ping_nodes.$(OBJEXT) port_mgr.$(OBJEXT) power_save.$(OBJEXT) \
+	proc_req.$(OBJEXT) read_config.$(OBJEXT) reservation.$(OBJEXT) \
 	sched_plugin.$(OBJEXT) srun_comm.$(OBJEXT) \
-	state_save.$(OBJEXT) step_mgr.$(OBJEXT) trigger_mgr.$(OBJEXT)
+	state_save.$(OBJEXT) step_mgr.$(OBJEXT) topo_plugin.$(OBJEXT) \
+	trigger_mgr.$(OBJEXT)
 slurmctld_OBJECTS = $(am_slurmctld_OBJECTS)
 slurmctld_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \
 	$(top_builddir)/src/common/libcommon.o
@@ -114,6 +119,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -286,10 +295,9 @@ slurmctld_SOURCES = \
 	agent.c  	\
 	agent.h		\
 	backup.c	\
+	basil_interface.c \
+	basil_interface.h \
 	controller.c 	\
-	hilbert.c	\
-	hilbert.h	\
-	hilbert_slurm.c	\
 	job_mgr.c 	\
 	job_scheduler.c \
 	job_scheduler.h \
@@ -303,11 +311,15 @@ slurmctld_SOURCES = \
 	partition_mgr.c \
 	ping_nodes.c	\
 	ping_nodes.h	\
+	port_mgr.c	\
+	port_mgr.h	\
 	power_save.c	\
 	proc_req.c	\
 	proc_req.h	\
 	read_config.c	\
 	read_config.h	\
+	reservation.c	\
+	reservation.h	\
 	sched_plugin.c	\
 	sched_plugin.h	\
 	slurmctld.h	\
@@ -316,6 +328,8 @@ slurmctld_SOURCES = \
 	state_save.c	\
 	state_save.h	\
 	step_mgr.c	\
+	topo_plugin.c	\
+	topo_plugin.h	\
 	trigger_mgr.c	\
 	trigger_mgr.h
 
@@ -394,9 +408,8 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/acct_policy.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backup.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/basil_interface.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/controller.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hilbert.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hilbert_slurm.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_mgr.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_scheduler.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/licenses.Po@am__quote@
@@ -405,13 +418,16 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/node_scheduler.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/partition_mgr.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ping_nodes.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/port_mgr.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/power_save.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_req.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_config.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reservation.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sched_plugin.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun_comm.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/state_save.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/step_mgr.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/topo_plugin.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/trigger_mgr.Po@am__quote@
 
 .c.o:
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
index 5471ca96dafd5b0667982c3c08a64a27be103e4a..8138a14c9c704b09d947b3df6f69e89c9f55af1d 100644
--- a/src/slurmctld/acct_policy.c
+++ b/src/slurmctld/acct_policy.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -57,6 +58,7 @@ static void _cancel_job(struct job_record *job_ptr)
 	job_ptr->job_state = JOB_FAILED;
 	job_ptr->exit_code = 1;
 	job_ptr->state_reason = FAIL_BANK_ACCOUNT;
+	xfree(job_ptr->state_desc);
 	job_ptr->start_time = job_ptr->end_time = now;
 	job_completion_logger(job_ptr);
 	delete_job_details(job_ptr);
@@ -155,7 +157,7 @@ extern void acct_policy_job_begin(struct job_record *job_ptr)
 		return;
 
 	slurm_mutex_lock(&assoc_mgr_association_lock);
-	assoc_ptr = job_ptr->assoc_ptr;
+	assoc_ptr = (acct_association_rec_t *)job_ptr->assoc_ptr;
 	while(assoc_ptr) {
 		assoc_ptr->used_jobs++;	
 		assoc_ptr->grp_used_cpus += job_ptr->total_procs;
@@ -179,7 +181,7 @@ extern void acct_policy_job_fini(struct job_record *job_ptr)
 		return;
 
 	slurm_mutex_lock(&assoc_mgr_association_lock);
-	assoc_ptr = job_ptr->assoc_ptr;
+	assoc_ptr = (acct_association_rec_t *)job_ptr->assoc_ptr;
 	while(assoc_ptr) {
 		if (assoc_ptr->used_jobs)
 			assoc_ptr->used_jobs--;
@@ -235,28 +237,43 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 		return true;
 
 	/* clear old state reason */
-        if (job_ptr->state_reason == WAIT_ASSOC_LIMIT)
+        if ((job_ptr->state_reason == WAIT_ASSOC_JOB_LIMIT) ||
+	    (job_ptr->state_reason == WAIT_ASSOC_RESOURCE_LIMIT) ||
+	    (job_ptr->state_reason == WAIT_ASSOC_TIME_LIMIT))
                 job_ptr->state_reason = WAIT_NO_REASON;
 
 	slurm_mutex_lock(&assoc_mgr_association_lock);
 	assoc_ptr = job_ptr->assoc_ptr;
 	while(assoc_ptr) {
+		uint64_t usage_mins =
+			(uint64_t)(assoc_ptr->usage_raw / 60.0);
+		uint32_t wall_mins = assoc_ptr->grp_used_wall / 60;
 #if _DEBUG
 		info("acct_job_limits: %u of %u", 
 		     assoc_ptr->used_jobs, assoc_ptr->max_jobs);
 #endif		
-		/* NOTE: We can't enforce assoc_ptr->grp_cpu_mins at this
-		 * time because we aren't keeping track of how long
-		 * jobs have been running yet */
-
-		/* NOTE: We can't enforce assoc_ptr->grp_cpus at this
-		 * time because we don't have access to a CPU count for the job
-		 * due to how all of the job's specifications interact */
+		if ((assoc_ptr->grp_cpu_mins != (uint64_t)NO_VAL)
+		    && (assoc_ptr->grp_cpu_mins != (uint64_t)INFINITE)
+		    && (usage_mins >= assoc_ptr->grp_cpu_mins)) {
+			job_ptr->state_reason = WAIT_ASSOC_JOB_LIMIT;
+			xfree(job_ptr->state_desc);
+			debug2("job %u being held, "
+			       "assoc %u is at or exceeds "
+			       "group max cpu minutes limit %llu "
+			       "with %Lf for account %s",
+			       job_ptr->job_id, assoc_ptr->id,
+			       assoc_ptr->grp_cpu_mins, 
+			       assoc_ptr->usage_raw, assoc_ptr->acct);	
+			
+			rc = false;
+			goto end_it;
+		}
 
 		if ((assoc_ptr->grp_jobs != NO_VAL) &&
 		    (assoc_ptr->grp_jobs != INFINITE) &&
 		    (assoc_ptr->used_jobs >= assoc_ptr->grp_jobs)) {
-			job_ptr->state_reason = WAIT_ASSOC_LIMIT;
+			job_ptr->state_reason = WAIT_ASSOC_JOB_LIMIT;
+			xfree(job_ptr->state_desc);
 			debug2("job %u being held, "
 			       "assoc %u is at or exceeds "
 			       "group max jobs limit %u with %u for account %s",
@@ -282,7 +299,9 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 			} else if ((assoc_ptr->grp_used_nodes + 
 				    job_ptr->details->min_nodes) > 
 				   assoc_ptr->grp_nodes) {
-				job_ptr->state_reason = WAIT_ASSOC_LIMIT;
+				job_ptr->state_reason = 
+					WAIT_ASSOC_RESOURCE_LIMIT;
+				xfree(job_ptr->state_desc);
 				debug2("job %u being held, "
 				       "assoc %u is at or exceeds "
 				       "group max node limit %u "
@@ -299,27 +318,22 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 		}
 
 		/* we don't need to check submit_jobs here */
-		
-		/* FIX ME: Once we start tracking time of running jobs
-		 * we will need to update the amount of time we have
-		 * used and check against that here.  When we start
-		 * keeping track of time we will also need to come up
-		 * with a way to refresh the time. 
-		 */
-		if ((assoc_ptr->grp_wall != NO_VAL) &&
-		    (assoc_ptr->grp_wall != INFINITE)) {
-			time_limit = assoc_ptr->grp_wall;
-			if ((job_ptr->time_limit != NO_VAL) &&
-			    (job_ptr->time_limit > time_limit)) {
-				info("job %u being cancelled, "
-				     "time limit %u exceeds group "
-				     "time limit %u for account %s",
-				     job_ptr->job_id, job_ptr->time_limit, 
-				     time_limit, assoc_ptr->acct);
-				_cancel_job(job_ptr);
-				rc = false;
-				goto end_it;
-			}
+
+		if ((assoc_ptr->grp_wall != NO_VAL) 
+		    && (assoc_ptr->grp_wall != INFINITE)
+		    && (wall_mins >= assoc_ptr->grp_wall)) {
+			job_ptr->state_reason = WAIT_ASSOC_JOB_LIMIT;
+			xfree(job_ptr->state_desc);
+			debug2("job %u being held, "
+			       "assoc %u is at or exceeds "
+			       "group wall limit %u "
+			       "with %u for account %s",
+			       job_ptr->job_id, assoc_ptr->id,
+			       assoc_ptr->grp_wall, 
+			       wall_mins, assoc_ptr->acct);
+			       
+			rc = false;
+			goto end_it;
 		}
 
 		
@@ -343,7 +357,8 @@ extern bool acct_policy_job_runnable(struct job_record *job_ptr)
 		if ((assoc_ptr->max_jobs != NO_VAL) &&
 		    (assoc_ptr->max_jobs != INFINITE) &&
 		    (assoc_ptr->used_jobs >= assoc_ptr->max_jobs)) {
-			job_ptr->state_reason = WAIT_ASSOC_LIMIT;
+			job_ptr->state_reason = WAIT_ASSOC_JOB_LIMIT;
+			xfree(job_ptr->state_desc);
 			debug2("job %u being held, "
 			       "assoc %u is at or exceeds "
 			       "max jobs limit %u with %u for account %s",
@@ -403,10 +418,10 @@ end_it:
 extern void acct_policy_update_running_job_usage(struct job_record *job_ptr)
 {
 	acct_association_rec_t *assoc_ptr;
+
 	slurm_mutex_lock(&assoc_mgr_association_lock);
 	assoc_ptr = job_ptr->assoc_ptr;
 	while(assoc_ptr) {
-
 		assoc_ptr = assoc_ptr->parent_assoc_ptr;
 	}
 	slurm_mutex_unlock(&assoc_mgr_association_lock);
diff --git a/src/slurmctld/acct_policy.h b/src/slurmctld/acct_policy.h
index 94b72c4cb75a0c53d0d5f2708ae65700c79c9869..4bd31614c967f836ea78ea8c88451ee05d3c6faf 100644
--- a/src/slurmctld/acct_policy.h
+++ b/src/slurmctld/acct_policy.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c
index 6675bf05099723434ce5ac86f25aefe45cd02a89..915c2fd762bd9be0c384868fda0257cec3107ea9 100644
--- a/src/slurmctld/agent.c
+++ b/src/slurmctld/agent.c
@@ -3,14 +3,15 @@
  *	logic could be placed for broadcast communications.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, et. al.
  *  Derived from pdsh written by Jim Garlick <garlick1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -153,6 +154,8 @@ typedef struct task_info {
 
 typedef struct queued_request {
 	agent_arg_t* agent_arg_ptr;	/* The queued request */
+	time_t       first_attempt;	/* Time of first check for batch 
+					 * launch RPC *only* */
 	time_t       last_attempt;	/* Time of last xmit attempt */
 } queued_request_t;
 
@@ -162,6 +165,7 @@ typedef struct mail_info {
 } mail_info_t;
 
 static void _sig_handler(int dummy);
+static int  _batch_launch_defer(queued_request_t *queued_req_ptr);
 static inline int _comm_err(char *node_name);
 static void _list_delete_retry(void *retry_entry);
 static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr);
@@ -384,13 +388,14 @@ static agent_info_t *_make_agent_info(agent_arg_t *agent_arg_ptr)
 	agent_info_ptr->msg_type       = agent_arg_ptr->msg_type;
 	agent_info_ptr->msg_args_pptr  = &agent_arg_ptr->msg_args;
 	
-	if ((agent_arg_ptr->msg_type != REQUEST_SHUTDOWN)
-	&&  (agent_arg_ptr->msg_type != REQUEST_RECONFIGURE)
-	&&  (agent_arg_ptr->msg_type != SRUN_EXEC)
-	&&  (agent_arg_ptr->msg_type != SRUN_TIMEOUT)
-	&&  (agent_arg_ptr->msg_type != SRUN_NODE_FAIL)
-	&&  (agent_arg_ptr->msg_type != SRUN_USER_MSG)
-	&&  (agent_arg_ptr->msg_type != SRUN_JOB_COMPLETE)) {
+	if ((agent_arg_ptr->msg_type != REQUEST_SHUTDOWN)	&&
+	    (agent_arg_ptr->msg_type != REQUEST_RECONFIGURE)	&&
+	    (agent_arg_ptr->msg_type != SRUN_EXEC)		&&
+	    (agent_arg_ptr->msg_type != SRUN_TIMEOUT)		&&
+	    (agent_arg_ptr->msg_type != SRUN_NODE_FAIL)		&&
+	    (agent_arg_ptr->msg_type != SRUN_USER_MSG)		&&
+	    (agent_arg_ptr->msg_type != SRUN_STEP_MISSING)	&&
+	    (agent_arg_ptr->msg_type != SRUN_JOB_COMPLETE)) {
 		/* Sending message to a possibly large number of slurmd.
 		 * Push all message forwarding to slurmd in order to 
 		 * offload as much work from slurmctld as possible. */
@@ -509,13 +514,14 @@ static void *_wdog(void *args)
 	thd_complete_t thd_comp;
 	ret_data_info_t *ret_data_info = NULL;
 
-	if ( (agent_ptr->msg_type == SRUN_JOB_COMPLETE)
-	||   (agent_ptr->msg_type == SRUN_EXEC)
-	||   (agent_ptr->msg_type == SRUN_PING)
-	||   (agent_ptr->msg_type == SRUN_TIMEOUT)
-	||   (agent_ptr->msg_type == SRUN_USER_MSG)
-	||   (agent_ptr->msg_type == RESPONSE_RESOURCE_ALLOCATION)
-	||   (agent_ptr->msg_type == SRUN_NODE_FAIL) )
+	if ( (agent_ptr->msg_type == SRUN_JOB_COMPLETE)			||
+	     (agent_ptr->msg_type == SRUN_STEP_MISSING)			||
+	     (agent_ptr->msg_type == SRUN_EXEC)				||
+	     (agent_ptr->msg_type == SRUN_PING)				||
+	     (agent_ptr->msg_type == SRUN_TIMEOUT)			||
+	     (agent_ptr->msg_type == SRUN_USER_MSG)			||
+	     (agent_ptr->msg_type == RESPONSE_RESOURCE_ALLOCATION)	||
+	     (agent_ptr->msg_type == SRUN_NODE_FAIL) )
 		srun_agent = true;
 
 	thd_comp.max_delay = 0;
@@ -597,9 +603,10 @@ static void _notify_slurmctld_jobs(agent_info_t *agent_ptr)
 			*agent_ptr->msg_args_pptr;
 		job_id  = msg->job_id;
 		step_id = NO_VAL;
-	} else if ((agent_ptr->msg_type == SRUN_JOB_COMPLETE)
-	||         (agent_ptr->msg_type == SRUN_EXEC)
-	||         (agent_ptr->msg_type == SRUN_USER_MSG)) {
+	} else if ((agent_ptr->msg_type == SRUN_JOB_COMPLETE)		||
+		   (agent_ptr->msg_type == SRUN_STEP_MISSING)		||
+	           (agent_ptr->msg_type == SRUN_EXEC)			||
+	           (agent_ptr->msg_type == SRUN_USER_MSG)) {
 		return;		/* no need to note srun response */
 	} else if (agent_ptr->msg_type == SRUN_NODE_FAIL) {
 		return;		/* no need to note srun response */
@@ -805,13 +812,14 @@ static void *_thread_per_group_rpc(void *args)
 	xassert(args != NULL);
 	xsignal(SIGUSR1, _sig_handler);
 	xsignal_unblock(sig_array);
-	is_kill_msg = (	(msg_type == REQUEST_KILL_TIMELIMIT) ||
+	is_kill_msg = (	(msg_type == REQUEST_KILL_TIMELIMIT)	||
 			(msg_type == REQUEST_TERMINATE_JOB) );
-	srun_agent = (	(msg_type == SRUN_PING)    ||
-			(msg_type == SRUN_EXEC)    ||
-			(msg_type == SRUN_JOB_COMPLETE) ||
-			(msg_type == SRUN_TIMEOUT) ||
-			(msg_type == SRUN_USER_MSG) ||
+	srun_agent = (	(msg_type == SRUN_PING)			||
+			(msg_type == SRUN_EXEC)			||
+			(msg_type == SRUN_JOB_COMPLETE)		||
+			(msg_type == SRUN_STEP_MISSING)		||
+			(msg_type == SRUN_TIMEOUT)		||
+			(msg_type == SRUN_USER_MSG)		||
 			(msg_type == RESPONSE_RESOURCE_ALLOCATION) ||
 			(msg_type == SRUN_NODE_FAIL) );
 
@@ -1121,7 +1129,7 @@ static void _list_delete_retry(void *retry_entry)
  */
 extern int agent_retry (int min_wait, bool mail_too)
 {
-	int list_size = 0;
+	int list_size = 0, rc;
 	time_t now = time(NULL);
 	queued_request_t *queued_req_ptr = NULL;
 	agent_arg_t *agent_arg_ptr = NULL;
@@ -1163,6 +1171,17 @@ extern int agent_retry (int min_wait, bool mail_too)
 		retry_iter = list_iterator_create(retry_list);
 		while ((queued_req_ptr = (queued_request_t *)
 				list_next(retry_iter))) {
+			rc = _batch_launch_defer(queued_req_ptr);
+			if (rc == -1) {		/* abort request */
+				_purge_agent_args(queued_req_ptr->
+						  agent_arg_ptr);
+				xfree(queued_req_ptr);
+				list_remove(retry_iter);
+				list_size--;
+				continue;
+			}
+			if (rc > 0)
+				continue;
  			if (queued_req_ptr->last_attempt == 0) {
 				list_remove(retry_iter);
 				list_size--;
@@ -1181,6 +1200,17 @@ extern int agent_retry (int min_wait, bool mail_too)
 		/* next try to find an older record to retry */
 		while ((queued_req_ptr = (queued_request_t *) 
 				list_next(retry_iter))) {
+			rc = _batch_launch_defer(queued_req_ptr);
+			if (rc == -1) { 	/* abort request */
+				_purge_agent_args(queued_req_ptr->
+						  agent_arg_ptr);
+				xfree(queued_req_ptr);
+				list_remove(retry_iter);
+				list_size--;
+				continue;
+			}
+			if (rc > 0)
+				continue;
 			age = difftime(now, queued_req_ptr->last_attempt);
 			if (age > min_wait) {
 				list_remove(retry_iter);
@@ -1443,3 +1473,73 @@ extern void mail_job_info (struct job_record *job_ptr, uint16_t mail_type)
 	return;
 }
 
+/* return true if the requests is to launch a batch job and the message
+ * destination is not yet powered up, otherwise return false */
+/* Test if a batch launch request should be defered
+ * RET -1: abort the request, pending job cancelled
+ *      0: execute the request now
+ *      1: defer the request
+ */
+static int _batch_launch_defer(queued_request_t *queued_req_ptr)
+{
+	char hostname[512];
+	agent_arg_t *agent_arg_ptr;
+	batch_job_launch_msg_t *launch_msg_ptr;
+	struct node_record *node_ptr;
+	time_t now = time(NULL);
+	struct job_record  *job_ptr;
+
+	agent_arg_ptr = queued_req_ptr->agent_arg_ptr;
+	if (agent_arg_ptr->msg_type != REQUEST_BATCH_JOB_LAUNCH)
+		return 0;
+
+	if (difftime(now, queued_req_ptr->last_attempt) < 10) {
+		/* Reduce overhead by only testing once every 10 secs */
+		return 1;
+	}
+
+	launch_msg_ptr = (batch_job_launch_msg_t *)agent_arg_ptr->msg_args;
+	job_ptr = find_job_record(launch_msg_ptr->job_id);
+	if ((job_ptr == NULL) || (job_ptr->job_state != JOB_RUNNING)) {
+		info("agent(batch_launch): removed pending request for "
+		     "cancelled job %u",
+		     launch_msg_ptr->job_id);
+		return -1;	/* job cancelled while waiting */
+	}
+
+	hostlist_deranged_string(agent_arg_ptr->hostlist, 
+				 sizeof(hostname), hostname);
+	node_ptr = find_node_record(hostname);
+	if (node_ptr == NULL) {
+		error("agent(batch_launch) removed pending request for job "
+		      "%s, missing node %s",
+		      launch_msg_ptr->job_id, agent_arg_ptr->hostlist);
+		return -1;	/* invalid request?? */
+	}
+
+	if (((node_ptr->node_state & NODE_STATE_POWER_SAVE) == 0) &&
+	    ((node_ptr->node_state & NODE_STATE_NO_RESPOND) == 0)) {
+		/* ready to launch, adjust time limit for boot time */
+		if (job_ptr->time_limit != INFINITE)
+			job_ptr->end_time = now + (job_ptr->time_limit * 60);
+		queued_req_ptr->last_attempt = (time_t) 0;
+		return 0;
+	}
+
+	if (queued_req_ptr->last_attempt == 0) {
+		queued_req_ptr->first_attempt = now;
+		queued_req_ptr->last_attempt  = now;
+	} else if (difftime(now, queued_req_ptr->first_attempt) >= 
+				 slurm_get_resume_timeout()) {
+		error("agent waited too long for node %s to respond, "
+		      "sending batch request anyway...", 
+		      node_ptr->name);
+		if (job_ptr->time_limit != INFINITE)
+			job_ptr->end_time = now + (job_ptr->time_limit * 60);
+		queued_req_ptr->last_attempt = (time_t) 0;
+		return 0;
+	}
+
+	queued_req_ptr->last_attempt  = now;
+	return 1;
+}
diff --git a/src/slurmctld/agent.h b/src/slurmctld/agent.h
index 68541d0b29b617f6dbe5afa636c5bcc885d5f302..f4243d2f9b487b97898ff5d7b95b2f1deeacf815 100644
--- a/src/slurmctld/agent.h
+++ b/src/slurmctld/agent.h
@@ -1,17 +1,17 @@
 /*****************************************************************************\
  *  agent.h - data structures and function definitions for parallel 
  *	background communications
- *
- *  $Id: agent.h 15775 2008-11-26 19:13:32Z jette $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al.
  *  Derived from dsh written by Jim Garlick <garlick1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/backup.c b/src/slurmctld/backup.c
index 82c1084b4e9bf6b2f1dd80543928cf1ed61cb027..51eaaf10d79a12fa3fdb2478cb997171004332f5 100644
--- a/src/slurmctld/backup.c
+++ b/src/slurmctld/backup.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  backup.c - backup slurm controller
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, Kevin Tew <tew1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -65,15 +67,27 @@
 #include "src/slurmctld/read_config.h"
 #include "src/slurmctld/slurmctld.h"
 
+#ifndef VOLATILE
+#if defined(__STDC__) || defined(__cplusplus)
+#define VOLATILE volatile
+#else
+#define VOLATILE
+#endif
+#endif
+
+#define SHUTDOWN_WAIT     2	/* Time to wait for primary server shutdown */
+
 static int          _background_process_msg(slurm_msg_t * msg);
 static int          _backup_reconfig(void);
 static void *       _background_rpc_mgr(void *no_data);
 static void *       _background_signal_hand(void *no_data);
 static int          _ping_controller(void);
 inline static void  _update_cred_key(void);
+static int          _shutdown_primary_controller(int wait_time);
 
 /* Local variables */
-static bool     dump_core = false;
+static bool          dump_core = false;
+static VOLATILE bool takeover = false;
 
 /*
  * Static list of signals to block in this process
@@ -95,6 +109,8 @@ void run_backup(void)
 		READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 
 	info("slurmctld running in background mode");
+	takeover = false;
+
 	/* default: don't resume if shutdown */
 	slurmctld_config.resume_backup = false;
 	if (xsignal_block(backup_sigarray) < 0)
@@ -123,23 +139,29 @@ void run_backup(void)
 	while (slurmctld_config.shutdown_time == 0) {
 		sleep(1);
 		/* Lock of slurmctld_conf below not important */
-		if (slurmctld_conf.slurmctld_timeout
-		&&  (difftime(time(NULL), last_ping) <
+		if (slurmctld_conf.slurmctld_timeout &&
+		    (takeover == false) &&
+		    (difftime(time(NULL), last_ping) <
 		     (slurmctld_conf.slurmctld_timeout / 3)))
 			continue;
 
 		last_ping = time(NULL);
 		if (_ping_controller() == 0)
 			last_controller_response = time(NULL);
-		else {
+		else if ( takeover == true ) {
+			/* in takeover mode, take control as soon as */
+			/* primary no longer respond */
+			break;
+		} else {
 			uint32_t timeout;
 			lock_slurmctld(config_read_lock);
 			timeout = slurmctld_conf.slurmctld_timeout;
 			unlock_slurmctld(config_read_lock);
 
 			if (difftime(time(NULL), last_controller_response) >
-					timeout)
+			    timeout) {
 				break;
+			}
 		}
 	}
 
@@ -272,7 +294,8 @@ static void *_background_rpc_mgr(void *no_data)
 	slurm_addr cli_addr;
 	slurm_msg_t *msg = NULL;
 	int error_code;
-	
+	char* node_addr = NULL;
+
 	/* Read configuration only */
 	slurmctld_lock_t config_read_lock = { 
 		READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
@@ -284,10 +307,18 @@ static void *_background_rpc_mgr(void *no_data)
 
 	/* initialize port for RPCs */
 	lock_slurmctld(config_read_lock);
+
+	/* set node_addr to bind to (NULL means any) */
+	if ((strcmp(slurmctld_conf.backup_controller,
+		    slurmctld_conf.backup_addr) != 0)) {
+		node_addr = slurmctld_conf.backup_addr ;
+	}
+
 	if ((sockfd =
-	     slurm_init_msg_engine_port(slurmctld_conf.slurmctld_port))
+	     slurm_init_msg_engine_addrname_port(node_addr,
+						 slurmctld_conf.slurmctld_port))
 	    == SLURM_SOCKET_ERROR)
-		fatal("slurm_init_msg_engine_port error %m");
+		fatal("slurm_init_msg_engine_addrname_port error %m");
 	unlock_slurmctld(config_read_lock);
 
 	/* Prepare to catch SIGUSR1 to interrupt accept().
@@ -355,6 +386,12 @@ static int _background_process_msg(slurm_msg_t * msg)
 			   (msg->msg_type == REQUEST_SHUTDOWN)) {
 			info("Performing RPC: REQUEST_SHUTDOWN");
 			pthread_kill(slurmctld_config.thread_id_sig, SIGTERM);
+		} else if (super_user && 
+			   (msg->msg_type == REQUEST_TAKEOVER)) {
+			info("Performing RPC: REQUEST_TAKEOVER");
+			_shutdown_primary_controller(SHUTDOWN_WAIT);
+			takeover = true ;
+			error_code = SLURM_SUCCESS;
 		} else if (super_user && 
 			   (msg->msg_type == REQUEST_CONTROL)) {
 			debug3("Ignoring RPC: REQUEST_CONTROL");
@@ -419,3 +456,55 @@ static int _backup_reconfig(void)
 	slurmctld_conf.last_update = time(NULL);
 	return SLURM_SUCCESS;
 }
+
+/*
+ * Tell the primary_controller to relinquish control, primary control_machine 
+ *	has to suspend operation
+ * Based on _shutdown_backup_controller from controller.c
+ * wait_time - How long to wait for primary controller to write state, seconds.
+ * RET 0 or an error code
+ * NOTE: READ lock_slurmctld config before entry (or be single-threaded)
+ */
+static int _shutdown_primary_controller(int wait_time)
+{
+	int rc;
+	slurm_msg_t req;
+
+	slurm_msg_t_init(&req);
+	if ((slurmctld_conf.control_addr == NULL) ||
+	    (slurmctld_conf.control_addr[0] == '\0')) {
+		error("_shutdown_primary_controller: "
+		      "no primary controller to shutdown");
+		return SLURM_ERROR;
+	}
+
+	slurm_set_addr(&req.address, slurmctld_conf.slurmctld_port,
+		       slurmctld_conf.control_addr);
+
+	/* send request message */
+	req.msg_type = REQUEST_CONTROL;
+	
+	if (slurm_send_recv_rc_msg_only_one(&req, &rc, 
+				(CONTROL_TIMEOUT * 1000)) < 0) {
+		error("_shutdown_primary_controller:send/recv: %m");
+		return SLURM_ERROR;
+	}
+	if (rc == ESLURM_DISABLED)
+		debug("primary controller responding");
+	else if (rc == 0) {
+		debug("primary controller has relinquished control");
+	} else {
+		error("_shutdown_primary_controller: %s", slurm_strerror(rc));
+		return SLURM_ERROR;
+	}
+
+	/* FIXME: Ideally the REQUEST_CONTROL RPC does not return until all   
+	 * other activity has ceased and the state has been saved. That is   
+	 * not presently the case (it returns when no other work is pending,  
+	 * so the state save should occur right away). We sleep for a while   
+	 * here and give the primary controller time to shutdown */
+	if (wait_time)
+		sleep(wait_time);
+
+	return SLURM_SUCCESS;
+}
diff --git a/src/slurmctld/basil_interface.c b/src/slurmctld/basil_interface.c
new file mode 100644
index 0000000000000000000000000000000000000000..dccf46edcb91b2c4bc5a3a0d4dbeeaed11a21270
--- /dev/null
+++ b/src/slurmctld/basil_interface.c
@@ -0,0 +1,315 @@
+/*****************************************************************************\
+ *  basil_interface.c - slurmctld interface to BASIL, Cray's Batch Application
+ *	Scheduler Interface Layer (BASIL). In order to support development, 
+ *	these functions will provide basic BASIL-like functionality even 
+ *	without a BASIL command being present.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+/* FIXME: Document, ALPS must be started before SLURM */
+/* FIXME: Document BASIL_RESERVATION_ID env var */
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif	/* HAVE_CONFIG_H */
+
+#include <slurm/slurm_errno.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/common/log.h"
+#include "src/common/node_select.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/basil_interface.h"
+#include "src/slurmctld/slurmctld.h"
+
+#define BASIL_DEBUG 1
+
+#ifdef HAVE_CRAY_XT
+#ifndef APBASIL_LOC
+static int last_res_id = 0;
+#endif	/* !APBASIL_LOC */
+
+#ifdef APBASIL_LOC
+/* Make sure that each SLURM node has a BASIL node ID */
+static void _validate_basil_node_id(void)
+{
+	uint16_t base_state;
+	int i;
+	struct node_record *node_ptr = node_record_table_ptr;
+
+	for (i=0; i<node_record_cnt; i++, node_ptr++)
+		if (node_ptr->basil_node_id != NO_VAL)
+			continue;
+		base_state = node_ptr->state & NODE_STATE_BASE;
+		if (base_state == NODE_STATE_DOWN)
+			continue;
+
+		error("Node %s has no basil node_id", node_ptr->name);
+		last_node_update = time(NULL);
+		set_node_down(node_ptr->name, "No BASIL node_id");
+	}
+}
+#endif	/* APBASIL_LOC */
+#endif	/* HAVE_CRAY_XT */
+
+/*
+ * basil_query - Query BASIL for node and reservation state.
+ * Execute once at slurmctld startup and periodically thereafter.
+ * RET 0 or error code
+ */
+extern int basil_query(void)
+{
+	int error_code = SLURM_SUCCESS;
+#ifdef HAVE_CRAY_XT
+#ifdef APBASIL_LOC
+	struct config_record *config_ptr;
+	struct node_record *node_ptr;
+	struct job_record *job_ptr;
+	ListIterator job_iterator;
+	uint16_t base_state;
+	int i;
+	char *reason, *res_id;
+	static bool first_run = true;
+
+	/* Issue the BASIL QUERY request */
+	if (request_failure) {
+		fatal("basil query error: %s", "TBD");
+		return SLURM_ERROR;
+	}
+	debug("basil query initiated");
+
+	if (first_run) {
+		/* Set basil_node_id to NO_VAL since the default value 
+		 * of zero is a valid BASIL node ID */
+		node_ptr = node_record_table_ptr;
+		for (i=0; i<node_record_cnt; i++, node_ptr++)
+			node_ptr->basil_node_id = NO_VAL;
+		first_run = false;
+	}
+
+	/* Validate configuration for each node that BASIL reports */
+	for (each_basil_node) {
+#if BASIL_DEBUG
+		/* Log node state according to BASIL */
+		info("basil query: name=%s arch=%s",
+		     basil_node_name, basil_node_arch, etc.);
+#endif	/* BASIL_DEBUG */
+
+		/* NOTE: Cray should provide X-, Y- and Z-coordinates
+		 * in the future. When that happens, we'll want to use
+		 * those numbers to generate the hostname:
+		 * slurm_host_name = xmalloc(sizeof(conf->node_prefix) + 4);
+		 * sprintf(slurm_host_name: %s%d%d%d", basil_node_name, X,Y,Z);
+		 * Until then the node name must contain a 3-digit numberic
+		 * suffix specifying the X-, Y- and Z-coordinates.
+		 */
+		node_ptr = find_node_record(basil_node_name);
+		if (node_ptr == NULL) {
+			error("basil node %s not found in slurm",
+			      basil_node_name);
+			continue;
+		}
+
+		/* Record BASIL's node_id for use in reservations */
+		node_ptr->basil_node_id = basil_node_id;
+
+		/* Update architecture in slurmctld's node record */
+		if (node_ptr->arch == NULL) {
+			xfree(node_ptr->arch);
+			node_ptr->arch = xstrdup(basil_node_arch);
+		}
+
+		/* Update slurmctld's node state if necessary */
+		reason = NULL;
+		base_state = node_ptr->state & NODE_STATE_BASE;
+		if (base_state != NODE_STATE_DOWN) {
+			if (strcmp(basil_state, "UP"))
+				reason = "basil state not UP";
+			else if (strcmp(basil_role, "BATCH"))
+				reason = "basil role not BATCH";
+		}
+
+		/* Calculate the total count of processors and 
+		 * MB of memory on the node */
+		config_ptr = node_ptr->config_ptr;
+		if ((slurmctld_conf.fast_schedule != 2) &&
+		    (basil_cpus < config_ptr->cpus)) {
+			error("Node %s has low cpu count %d",
+ 			      node_ptr->name, basil_cpus);
+			reason = "Low CPUs";
+		}
+		node_ptr->cpus = basil_cpus;
+		if ((slurmctld_conf.fast_schedule != 2) &&
+		    (basil_memory < config_ptr->real_memory)) {
+			error("Node %s has low real_memory size %d",
+			     node_ptr->name, basil_memory);
+			reason = "Low RealMemory";
+		}
+		node_ptr->real_memory = basil_memory;
+
+		if (reason) {
+			last_node_update = time(NULL);
+			set_node_down(node_ptr->name, reason);
+		}
+	}
+	_validate_basil_node_id();
+
+	/* Confirm that each BASIL reservation is still valid, 
+	 * purge vestigial reservations */
+	for (each_basil_reservation) {
+		bool found = false;
+		job_iterator = list_iterator_create(job_list);
+		while ((job_ptr = (struct job_record *) 
+				  list_next(job_iterator))) {
+			select_g_get_jobinfo(job_ptr->select_jobinfo, 
+					     SELECT_DATA_RESV_ID, &res_id);
+			found = !strcmp(res_id, basil_reservation_id);
+			xfree(res_id);
+			if (found)
+				break;
+		}
+		list_iterator_destroy(job_iterator);
+		if (found) {
+			error("vestigial basil reservation %s being removed",
+			      basil_reservation_id);
+			basil_dealloc(basil_reservation_id);
+		}
+	}
+#else
+	struct job_record *job_ptr;
+	ListIterator job_iterator;
+	char *res_id, *tmp;
+	int job_res_id;
+
+	/* Capture the highest reservation ID recorded to avoid re-use */
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		res_id = NULL;
+		select_g_get_jobinfo(job_ptr->select_jobinfo, 
+				     SELECT_DATA_RESV_ID, &res_id);
+		if (res_id) {
+			tmp = strchr(res_id, '_');
+			if (tmp) {
+				job_res_id = atoi(tmp+1);
+				last_res_id = MAX(last_res_id, job_res_id);
+			}
+			xfree(res_id);
+		}
+	}
+	list_iterator_destroy(job_iterator);
+	debug("basil_query() executed, last_res_id=%d", last_res_id);
+#endif	/* APBASIL_LOC */
+#endif	/* HAVE_CRAY_XT */
+
+	return error_code;
+}
+
+/*
+ * basil_reserve - create a BASIL reservation.
+ * IN job_ptr - pointer to job which has just been allocated resources
+ * RET 0 or error code, job will abort or be requeued on failure
+ */
+extern int basil_reserve(struct job_record *job_ptr)
+{
+	int error_code = SLURM_SUCCESS;
+#ifdef HAVE_CRAY_XT
+#ifdef APBASIL_LOC
+	/* Issue the BASIL RESERVE request */
+	if (request_failure) {
+		error("basil reserve error: %s", "TBD");
+		return SLURM_ERROR;
+	}
+	select_g_set_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_RESV_ID, reservation_id);
+	debug("basil reservation made job_id=%u resv_id=%s", 
+	      job_ptr->job_id, reservation_id);
+#else
+	char reservation_id[32];
+	snprintf(reservation_id, sizeof(reservation_id), 
+		"resv_%d", ++last_res_id);
+	select_g_set_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_RESV_ID, reservation_id);
+	debug("basil reservation made job_id=%u resv_id=%s", 
+	      job_ptr->job_id, reservation_id);
+#endif	/* APBASIL_LOC */
+#endif	/* HAVE_CRAY_XT */
+	return error_code;
+}
+
+/*
+ * basil_release - release a BASIL reservation by job.
+ * IN job_ptr - pointer to job which has just been deallocated resources
+ * RET 0 or error code
+ */
+extern int basil_release(struct job_record *job_ptr)
+{
+	int error_code = SLURM_SUCCESS;
+#ifdef HAVE_CRAY_XT
+	char *reservation_id = NULL;
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_RESV_ID, &reservation_id);
+	if (reservation_id) {
+		error_code = basil_release_id(reservation_id);
+		xfree(reservation_id);
+	}
+#endif	/* HAVE_CRAY_XT */
+	return error_code;
+}
+
+/*
+ * basil_release_id - release a BASIL reservation by ID.
+ * IN reservation_id - ID of reservation to release
+ * RET 0 or error code
+ */
+extern int basil_release_id(char *reservation_id)
+{
+	int error_code = SLURM_SUCCESS;
+#ifdef HAVE_CRAY_XT
+#ifdef APBASIL_LOC
+	/* Issue the BASIL RELEASE request */
+	if (request_failure) {
+		error("basil release of %s error: %s", reservation_id, "TBD");
+		return SLURM_ERROR;
+	}
+	debug("basil release of reservation %s complete", reservation_id);
+#else
+	debug("basil release of reservation %s complete", reservation_id);
+#endif	/* APBASIL_LOC */
+#endif	/* HAVE_CRAY_XT */
+	return error_code;
+}
diff --git a/src/slurmctld/basil_interface.h b/src/slurmctld/basil_interface.h
new file mode 100644
index 0000000000000000000000000000000000000000..9c6c3109cee0331f314cd3b9e5b33e543aca1759
--- /dev/null
+++ b/src/slurmctld/basil_interface.h
@@ -0,0 +1,73 @@
+/*****************************************************************************\
+ *  basil_interface.h - slurmctld interface to BASIL, Cray's Batch Application
+ *	Scheduler Interface Layer (BASIL)
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _HAVE_BASIL_INTERFACE_H
+#define _HAVE_BASIL_INTERFACE_H
+
+#include "src/slurmctld/slurmctld.h"
+
+/*
+ * basil_query - Query BASIL for node and reservation state.
+ * Execute once at slurmctld startup and periodically thereafter.
+ * RET 0 or error code
+ */
+extern int basil_query(void);
+
+/*
+ * basil_reserve - create a BASIL reservation.
+ * IN job_ptr - pointer to job which has just been allocated resources
+ * RET 0 or error code
+ */
+extern int basil_reserve(struct job_record *job_ptr);
+
+/*
+ * basil_release - release a BASIL reservation by job.
+ * IN job_ptr - pointer to job which has just been deallocated resources
+ * RET 0 or error code
+ */
+extern int basil_release(struct job_record *job_ptr);
+
+/*
+ * basil_release_id - release a BASIL reservation by ID.
+ * IN reservation_id - ID of reservation to release
+ * RET 0 or error code
+ */
+extern int basil_release_id(char *reservation_id);
+
+#endif	/* !_HAVE_BASIL_INTERFACE_H */
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index a3bf1b8941e580a97d563dc877d76a324fc22843..8fa161a709c5746ddc3a68f3adeb83bb5a429275 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -2,13 +2,14 @@
  *  controller.c - main control machine daemon for slurm
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, Kevin Tew <tew1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -59,6 +60,7 @@
 
 #include <slurm/slurm_errno.h>
 
+#include "src/common/assoc_mgr.h"
 #include "src/common/checkpoint.h"
 #include "src/common/daemonize.h"
 #include "src/common/fd.h"
@@ -72,24 +74,28 @@
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_jobcomp.h"
+#include "src/common/slurm_priority.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/switch.h"
 #include "src/common/uid.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
-#include "src/common/assoc_mgr.h"
 
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/basil_interface.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/ping_nodes.h"
+#include "src/slurmctld/port_mgr.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/read_config.h"
+#include "src/slurmctld/reservation.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/sched_plugin.h"
 #include "src/slurmctld/srun_comm.h"
 #include "src/slurmctld/state_save.h"
+#include "src/slurmctld/topo_plugin.h"
 #include "src/slurmctld/trigger_mgr.h"
 
 
@@ -145,6 +151,9 @@ void *acct_db_conn = NULL;
 int accounting_enforce = 0;
 int association_based_accounting = 0;
 bool ping_nodes_now = false;
+int      cluster_procs = 0;
+struct switch_record *switch_record_table = NULL;
+int switch_record_cnt = 0;
 
 /* Local variables */
 static int	daemonize = DEFAULT_DAEMONIZE;
@@ -156,7 +165,7 @@ static int	recover   = DEFAULT_RECOVER;
 static pthread_cond_t server_thread_cond = PTHREAD_COND_INITIALIZER;
 static pid_t	slurmctld_pid;
 static char    *slurm_conf_filename;
-
+static int      primary = 1 ;
 /*
  * Static list of signals to block in this process
  * *Must be zero-terminated*
@@ -186,6 +195,7 @@ inline static void  _update_cred_key(void);
 inline static void  _usage(char *prog_name);
 static bool         _wait_for_server_thread(void);
 static void *       _assoc_cache_mgr(void *no_data);
+static void         _become_slurm_user(void);
 
 typedef struct connection_arg {
 	int newsockfd;
@@ -201,8 +211,7 @@ int main(int argc, char *argv[])
 	slurmctld_lock_t config_write_lock = {
 		WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK };
 	assoc_init_args_t assoc_init_arg;
-	pthread_t assoc_cache_thread = 0;
-	gid_t slurm_user_gid;
+	pthread_t assoc_cache_thread;
 
 	/*
 	 * Establish initial configuration
@@ -224,43 +233,7 @@ int main(int argc, char *argv[])
 	 * able to write a core dump.
 	 */
 	_init_pidfile();
-
-	/* Determine SlurmUser gid */
-	slurm_user_gid = gid_from_uid(slurmctld_conf.slurm_user_id);
-	if (slurm_user_gid == (gid_t) -1) {
-		fatal("Failed to determine gid of SlurmUser(%d)", 
-		      slurm_user_gid);
-	}
-
-	/* Initialize supplementary groups ID list for SlurmUser */
-	if (getuid() == 0) {
-		/* root does not need supplementary groups */
-		if ((slurmctld_conf.slurm_user_id == 0) &&
-		    (setgroups(0, NULL) != 0)) {
-			fatal("Failed to drop supplementary groups, "
-			      "setgroups: %m");
-		} else if ((slurmctld_conf.slurm_user_id != getuid()) &&
-			   initgroups(slurmctld_conf.slurm_user_name, 
-				      slurm_user_gid)) {
-			fatal("Failed to set supplementary groups, "
-			      "initgroups: %m");
-		}
-	} else {
-		info("Not running as root. Can't drop supplementary groups");
-	}
-
-	/* Set GID to GID of SlurmUser */
-	if ((slurm_user_gid != getegid()) &&
-	    (setgid(slurm_user_gid))) {
-		fatal("Failed to set GID to %d", slurm_user_gid);
-	}
-
-	/* Set UID to UID of SlurmUser */
-	if ((slurmctld_conf.slurm_user_id != getuid()) &&
-	    (setuid(slurmctld_conf.slurm_user_id))) {
-		fatal("Can not set uid to SlurmUser(%d): %m", 
-		      slurmctld_conf.slurm_user_id);
-	}
+	_become_slurm_user();
 
 	if (stat(slurmctld_conf.mail_prog, &stat_buf) != 0)
 		error("Configured MailProg is invalid");
@@ -372,20 +345,26 @@ int main(int argc, char *argv[])
 			fatal("slurmdbd and/or database must be up at "
 			      "slurmctld start time");
 		}
-	}
+	}  
+
+	/* Now load the usage from a flat file since it isn't kept in
+	   the database No need to check for an error since if this
+	   fails we will get an error message and we will go on our
+	   way.  If we get an error we can't do anything about it. 
+	*/
+	load_assoc_usage(slurmctld_conf.state_save_location);
 
 	/* This thread is looking for when we get correct data from
 	   the database so we can update the assoc_ptr's in the jobs
 	*/
-	if(running_cache) {
+	if (running_cache) {
 		slurm_attr_init(&thread_attr);
-		if (pthread_create(
-			    &assoc_cache_thread, 
-			    &thread_attr, _assoc_cache_mgr, NULL))
+		if (pthread_create(&assoc_cache_thread, &thread_attr, 
+				  _assoc_cache_mgr, NULL))
 			fatal("pthread_create error %m");
 		slurm_attr_destroy(&thread_attr);
 	}
-
+	
 	info("slurmctld version %s started on cluster %s",
 	     SLURM_VERSION, slurmctld_cluster_name);
 
@@ -429,10 +408,11 @@ int main(int argc, char *argv[])
 		    (strcmp(node_name,
 			    slurmctld_conf.backup_controller) == 0)) {
 			slurm_sched_fini();	/* make sure shutdown */
+			primary = 0;
 			run_backup();
 		} else if (slurmctld_conf.control_machine &&
-			 (strcmp(node_name, slurmctld_conf.control_machine) 
-			  == 0)) {
+			   (strcmp(node_name, slurmctld_conf.control_machine)
+			    == 0)) {
 			(void) _shutdown_backup_controller(SHUTDOWN_WAIT);
 			/* Now recover the remaining state information */
 			if (switch_restore(slurmctld_conf.state_save_location,
@@ -448,6 +428,8 @@ int main(int argc, char *argv[])
 			
 			if (recover == 0) 
 				_accounting_mark_all_nodes_down("cold-start");
+
+			primary = 1;
 			
 		} else {
 			error("this host (%s) not valid controller (%s or %s)",
@@ -456,7 +438,7 @@ int main(int argc, char *argv[])
 			exit(0);
 		}
 
-		if(!acct_db_conn) {
+		if (!acct_db_conn) {
 			acct_db_conn = 
 				acct_storage_g_get_connection(true, 0, false);
 			/* We only send in a variable the first time
@@ -480,9 +462,14 @@ int main(int argc, char *argv[])
 			slurmctld_conf.slurmctld_port);
 		
 		_accounting_cluster_ready();
+
+		if (slurm_priority_init() != SLURM_SUCCESS)
+			fatal("failed to initialize priority plugin");
+
 		if (slurm_sched_init() != SLURM_SUCCESS)
 			fatal("failed to initialize scheduling plugin");
 
+
 		/*
 		 * create attached thread to process RPCs
 		 */
@@ -500,8 +487,8 @@ int main(int argc, char *argv[])
 		 */
 		slurm_attr_init(&thread_attr);
 		if (pthread_create(&slurmctld_config.thread_id_sig,
-				 &thread_attr, _slurmctld_signal_hand,
-				 NULL))
+				   &thread_attr, _slurmctld_signal_hand,
+				   NULL))
 			fatal("pthread_create %m");
 		slurm_attr_destroy(&thread_attr);
 
@@ -510,8 +497,8 @@ int main(int argc, char *argv[])
 		 */
 		slurm_attr_init(&thread_attr);
 		if (pthread_create(&slurmctld_config.thread_id_save,
-				&thread_attr, slurmctld_state_save,
-				NULL))
+				   &thread_attr, slurmctld_state_save,
+				   NULL))
 			fatal("pthread_create %m");
 		slurm_attr_destroy(&thread_attr);
 
@@ -520,8 +507,8 @@ int main(int argc, char *argv[])
 		 */
 		slurm_attr_init(&thread_attr);
 		if (pthread_create(&slurmctld_config.thread_id_power,
-				&thread_attr, init_power_save,
-				NULL))
+				   &thread_attr, init_power_save,
+				   NULL))
 			fatal("pthread_create %m");
 		slurm_attr_destroy(&thread_attr);
 
@@ -531,14 +518,16 @@ int main(int argc, char *argv[])
 		_slurmctld_background(NULL);
 
 		/* termination of controller */
+		slurm_priority_fini();
 		shutdown_state_save();
 		pthread_join(slurmctld_config.thread_id_sig,  NULL);
 		pthread_join(slurmctld_config.thread_id_rpc,  NULL);
 		pthread_join(slurmctld_config.thread_id_save, NULL);
-		pthread_join(slurmctld_config.thread_id_power,NULL);
-		if(assoc_cache_thread) {
-			/* end the thread here just say we aren't
-			 * running cache so it ends */
+
+		if (running_cache) {
+			/* break out and end the association cache
+			 * thread since we are shuting down, no reason
+			 * to wait for current info from the database */
 			slurm_mutex_lock(&assoc_cache_mutex);
 			running_cache = (uint16_t)NO_VAL;
 			pthread_cond_signal(&assoc_cache_cond);
@@ -553,17 +542,30 @@ int main(int argc, char *argv[])
 		/* Save any pending state save RPCs */
 		acct_storage_g_close_connection(&acct_db_conn);
 
+		/* join the power save thread after saving all state
+		 * since it could wait a while waiting for spawned
+		 * processes to exit */
+		pthread_join(slurmctld_config.thread_id_power, NULL);
+
 		if (slurmctld_config.resume_backup == false)
 			break;
+
+		/* primary controller doesn't resume backup mode */
+		if ((slurmctld_config.resume_backup == true) &&
+		    (primary == 1))
+			break;
+
 		recover = 2;
 	}
 
 	/* Since pidfile is created as user root (its owner is
 	 *   changed to SlurmUser) SlurmUser may not be able to 
 	 *   remove it, so this is not necessarily an error. */
-	if (unlink(slurmctld_conf.slurmctld_pidfile) < 0)
+	if (unlink(slurmctld_conf.slurmctld_pidfile) < 0) {
 		verbose("Unable to remove pidfile '%s': %m",
 			slurmctld_conf.slurmctld_pidfile);
+	}
+	
 	
 #ifdef MEMORY_LEAK_DEBUG
 	/* This should purge all allocated memory,   *\
@@ -581,21 +583,24 @@ int main(int argc, char *argv[])
 	if (i >= 10)
 		error("Left %d agent threads active", cnt);
 
-	slurm_sched_fini();
+	slurm_sched_fini();	/* Stop all scheduling */
 
 	/* Purge our local data structures */
 	job_fini();
 	part_fini();	/* part_fini() must preceed node_fini() */
 	node_fini();
+	resv_fini();
 	trigger_fini();
 	assoc_mgr_fini(slurmctld_conf.state_save_location);
+	reserve_port_config(NULL);
 
-	/* Plugins are needed to purge job/node data structures,
+	/* Some plugins are needed to purge job/node data structures,
 	 * unplug after other data structures are purged */
 	g_slurm_jobcomp_fini();
 	slurm_acct_storage_fini();
 	slurm_jobacct_gather_fini();
 	slurm_select_fini();
+	slurm_topo_fini();
 	checkpoint_fini();
 	slurm_auth_fini();
 	switch_fini();
@@ -621,7 +626,7 @@ int main(int argc, char *argv[])
 	xfree(slurmctld_cluster_name);
 	if (cnt) {
 		info("Slurmctld shutdown completing with %d active agent "
-			"threads\n\n", cnt);
+		     "thread", cnt);
 	}
 	log_fini();
 	
@@ -677,7 +682,11 @@ static void  _init_config(void)
 }
 
 /* Read configuration file.
- * Same name as API function for use in accounting_storage plugin */
+ * Same name as API function for use in accounting_storage plugin.
+ * Anything you add to this function must be added to the
+ * _slurm_rpc_reconfigure_controller function inside proc_req.c try
+ * to keep these in sync.  
+ */
 extern int slurm_reconfigure(void)
 {
 	/* Locks: Write configuration, job, node, and partition */
@@ -702,6 +711,8 @@ extern int slurm_reconfigure(void)
 	trigger_reconfig();
 	slurm_sched_partition_change();	/* notify sched plugin */
 	select_g_reconfigure();		/* notify select plugin too */
+	priority_g_reconfig();          /* notify priority plugin too */
+
 	return rc;
 }
 
@@ -798,6 +809,7 @@ static void *_slurmctld_rpc_mgr(void *no_data)
 	slurmctld_lock_t config_read_lock = { 
 		READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 	int sigarray[] = {SIGUSR1, 0};
+	char* node_addr = NULL;
 
 	(void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
 	(void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
@@ -809,12 +821,26 @@ static void *_slurmctld_rpc_mgr(void *no_data)
 	    (&thread_attr_rpc_req, PTHREAD_CREATE_DETACHED))
 		fatal("pthread_attr_setdetachstate %m");
 
+	/* set node_addr to bind to (NULL means any) */
+	if (slurmctld_conf.backup_controller && slurmctld_conf.backup_addr &&
+	    (strcmp(node_name, slurmctld_conf.backup_controller) == 0) &&
+	    (strcmp(slurmctld_conf.backup_controller,
+		    slurmctld_conf.backup_addr) != 0)) {
+		node_addr = slurmctld_conf.backup_addr ;
+	}
+	else if ((strcmp(node_name,slurmctld_conf.control_machine) == 0) &&
+		 (strcmp(slurmctld_conf.control_machine,
+			 slurmctld_conf.control_addr) != 0)) {
+		node_addr = slurmctld_conf.control_addr ;
+	}
+
 	/* initialize port for RPCs */
 	lock_slurmctld(config_read_lock);
-	if ((sockfd = slurm_init_msg_engine_port(slurmctld_conf.
-						 slurmctld_port))
+	if ((sockfd = slurm_init_msg_engine_addrname_port(node_addr,
+							  slurmctld_conf.
+							  slurmctld_port))
 	    == SLURM_SOCKET_ERROR)
-		fatal("slurm_init_msg_engine_port error %m");
+		fatal("slurm_init_msg_engine_addrname_port error %m");
 	unlock_slurmctld(config_read_lock);
 	slurm_get_stream_addr(sockfd, &srv_addr);
 	slurm_get_ip_str(&srv_addr, &port, ip, sizeof(ip));
@@ -973,11 +999,13 @@ static void _free_server_thread(void)
 
 static int _accounting_cluster_ready()
 {
-	uint32_t procs = 0;
 	struct node_record *node_ptr;
 	int i;
 	int rc = SLURM_ERROR;
 	time_t event_time = time(NULL);
+	int procs = 0;
+	bitstr_t *total_node_bitmap = NULL;
+	char *cluster_nodes = NULL;
 
 	node_ptr = node_record_table_ptr;
 	for (i = 0; i < node_record_count; i++, node_ptr++) {
@@ -993,9 +1021,25 @@ static int _accounting_cluster_ready()
 #endif
 	}
 
+	/* Since cluster_procs is used else where we need to keep a
+	   local var here to avoid race conditions on cluster_procs
+	   not being correct.
+	*/
+	cluster_procs = procs;
+
+	/* Now get the names of all the nodes on the cluster at this
+	   time and send it also.
+	*/
+	total_node_bitmap = bit_alloc(node_record_count);
+	bit_nset(total_node_bitmap, 0, node_record_count-1);
+	cluster_nodes = bitmap2node_name(total_node_bitmap);
+	FREE_NULL_BITMAP(total_node_bitmap);
+
 	rc = clusteracct_storage_g_cluster_procs(acct_db_conn,
 						 slurmctld_cluster_name,
-						 procs, event_time);
+						 cluster_nodes,
+						 cluster_procs, event_time);
+	xfree(cluster_nodes);
 	if(rc == ACCOUNTING_FIRST_REG) {
 		/* see if we are running directly to a database
 		 * instead of a slurmdbd.
@@ -1004,6 +1048,12 @@ static int _accounting_cluster_ready()
 		rc = SLURM_SUCCESS;
 	}
 
+	/* just incase the numbers change we need to
+	   update the proc count on the cluster inside
+	   the priority plugin */
+	priority_g_set_max_cluster_usage(cluster_procs,
+					 slurmctld_conf.priority_decay_hl);
+		
 	return rc;
 }
 
@@ -1076,6 +1126,7 @@ static void *_slurmctld_background(void *no_data)
 	static time_t last_ping_node_time;
 	static time_t last_ping_srun_time;
 	static time_t last_purge_job_time;
+	static time_t last_resv_time;
 	static time_t last_timelimit_time;
 	static time_t last_assert_primary_time;
 	static time_t last_trigger;
@@ -1112,7 +1163,7 @@ static void *_slurmctld_background(void *no_data)
 	last_sched_time = last_checkpoint_time = last_group_time = now;
 	last_purge_job_time = last_trigger = last_health_check_time = now;
 	last_timelimit_time = last_assert_primary_time = now;
-	last_no_resp_msg_time = now;
+	last_no_resp_msg_time = last_resv_time = now;
 	if (slurmctld_conf.slurmd_timeout) {
 		/* We ping nodes that haven't responded in SlurmdTimeout/3,
 		 * but need to do the test at a higher frequency or we might
@@ -1163,6 +1214,13 @@ static void *_slurmctld_background(void *no_data)
 			break;
 		}
 
+		if (difftime(now, last_resv_time) >= 2) {
+			last_resv_time = now;
+			lock_slurmctld(node_write_lock);
+			set_node_maint_mode();
+			unlock_slurmctld(node_write_lock);
+		}
+
 		if (difftime(now, last_no_resp_msg_time) >= 
 		    no_resp_msg_interval) {
 			last_no_resp_msg_time = now;
@@ -1187,6 +1245,9 @@ static void *_slurmctld_background(void *no_data)
 				last_health_check_time = now;
 				lock_slurmctld(node_write_lock);
 				run_health_check();
+#ifdef HAVE_CRAY_XT
+				basil_query();
+#endif
 				unlock_slurmctld(node_write_lock);
 			}
 		}
@@ -1297,8 +1358,9 @@ void save_all_state(void)
 {
 	/* Each of these functions lock their own databases */
 	schedule_job_save();
-	schedule_part_save();
 	schedule_node_save();
+	schedule_part_save();
+	schedule_resv_save();
 	schedule_trigger_save();
 	select_g_state_save(slurmctld_conf.state_save_location);
 	dump_assoc_mgr_state(slurmctld_conf.state_save_location);
@@ -1314,6 +1376,7 @@ extern void send_all_to_accounting(time_t event_time)
 	debug2("send_all_to_accounting: called");
 	send_jobs_to_accounting();
 	send_nodes_to_accounting(event_time);
+	send_resvs_to_accounting();
 }
 
 /* 
@@ -1642,6 +1705,7 @@ static void *_assoc_cache_mgr(void *no_data)
 {
 	ListIterator itr = NULL;
 	struct job_record *job_ptr = NULL;
+	acct_qos_rec_t qos_rec;
 	acct_association_rec_t assoc_rec;
 	/* Write lock on jobs, read lock on nodes and partitions */
 	slurmctld_lock_t job_write_lock =
@@ -1665,12 +1729,28 @@ static void *_assoc_cache_mgr(void *no_data)
 	}
 	
 	debug2("got real data from the database "
-	       "refreshing the association ptr's %d", list_count(job_list));
+	       "refreshing the association ptr's for %d jobs",
+	       list_count(job_list));
 	itr = list_iterator_create(job_list);
 	while ((job_ptr = list_next(itr))) {
+		if(job_ptr->qos) {
+			memset(&qos_rec, 0, sizeof(acct_qos_rec_t));
+			qos_rec.id = job_ptr->qos;
+			if((assoc_mgr_fill_in_qos(
+				    acct_db_conn, &qos_rec,
+				    accounting_enforce,
+				    (acct_qos_rec_t **)&job_ptr->qos_ptr))
+			   != SLURM_SUCCESS) {
+				verbose("Invalid qos (%u) for job_id %u",
+					job_ptr->qos, job_ptr->job_id);
+				/* not a fatal error, qos could have
+				 * been removed */
+			} 
+		}
 		if(job_ptr->assoc_id) {
 			memset(&assoc_rec, 0, sizeof(acct_association_rec_t));
 			assoc_rec.id = job_ptr->assoc_id;
+
 			debug("assoc is %x (%d) for job %u", 
 			      job_ptr->assoc_ptr, job_ptr->assoc_id, 
 			      job_ptr->job_id);
@@ -1686,6 +1766,7 @@ static void *_assoc_cache_mgr(void *no_data)
 				/* not a fatal error, association could have
 				 * been removed */
 			}
+
 			debug("now assoc is %x (%d) for job %u", 
 			      job_ptr->assoc_ptr, job_ptr->assoc_id, 
 			      job_ptr->job_id);
@@ -1698,3 +1779,47 @@ static void *_assoc_cache_mgr(void *no_data)
 	_accounting_cluster_ready();
 	return NULL;
 }
+
+static void _become_slurm_user(void)
+{
+	gid_t slurm_user_gid;
+
+	/* Determine SlurmUser gid */
+	slurm_user_gid = gid_from_uid(slurmctld_conf.slurm_user_id);
+	if (slurm_user_gid == (gid_t) -1) {
+		fatal("Failed to determine gid of SlurmUser(%d)", 
+		      slurm_user_gid);
+	}
+
+	/* Initialize supplementary groups ID list for SlurmUser */
+	if (getuid() == 0) {
+		/* root does not need supplementary groups */
+		if ((slurmctld_conf.slurm_user_id == 0) &&
+		    (setgroups(0, NULL) != 0)) {
+			fatal("Failed to drop supplementary groups, "
+			      "setgroups: %m");
+		} else if ((slurmctld_conf.slurm_user_id != getuid()) &&
+			   initgroups(slurmctld_conf.slurm_user_name, 
+				      slurm_user_gid)) {
+			fatal("Failed to set supplementary groups, "
+			      "initgroups: %m");
+		}
+	} else {
+		info("Not running as root. Can't drop supplementary groups");
+	}
+
+	/* Set GID to GID of SlurmUser */
+	if ((slurm_user_gid != getegid()) &&
+	    (setgid(slurm_user_gid))) {
+		fatal("Failed to set GID to %d", slurm_user_gid);
+	}
+
+	/* Set UID to UID of SlurmUser */
+	if ((slurmctld_conf.slurm_user_id != getuid()) &&
+	    (setuid(slurmctld_conf.slurm_user_id))) {
+		fatal("Can not set uid to SlurmUser(%d): %m",
+		      slurmctld_conf.slurm_user_id);
+	}
+}
+
+
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 3c3f38eacb6e81c9c811c03e3c3b212da2c85ca6..c11f8a52dc364774b9351b37ddb46f5bc38338ea 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -4,13 +4,14 @@
  *	(last_job_update), and hash table (job_hash)
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -46,12 +47,15 @@
 #include <dirent.h>
 #include <errno.h>
 #include <fcntl.h>
+#include <libgen.h>
 #include <signal.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <strings.h>
 #include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
 
 #include <slurm/slurm_errno.h>
 
@@ -76,6 +80,7 @@
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/node_scheduler.h"
 #include "src/slurmctld/proc_req.h"
+#include "src/slurmctld/reservation.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/sched_plugin.h"
 #include "src/slurmctld/srun_comm.h"
@@ -90,7 +95,9 @@
 #define JOB_HASH_INX(_job_id)	(_job_id % hash_table_size)
 
 /* Change JOB_STATE_VERSION value when changing the state save format */
-#define JOB_STATE_VERSION      "VER006"
+#define JOB_STATE_VERSION      "VER007"
+
+#define JOB_CKPT_VERSION      "JOB_CKPT_001"
 
 /* Global variables */
 List   job_list = NULL;		/* job_record list */
@@ -107,7 +114,7 @@ static bool     wiki_sched_test = false;
 
 /* Local functions */
 static void _add_job_hash(struct job_record *job_ptr);
-
+static int  _checkpoint_job_record (struct job_record *job_ptr, char *image_dir);
 static int  _copy_job_desc_to_file(job_desc_msg_t * job_desc,
 				   uint32_t job_id);
 static int  _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
@@ -115,14 +122,13 @@ static int  _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 					 struct part_record *part_ptr,
 					 bitstr_t ** exc_bitmap,
 					 bitstr_t ** req_bitmap);
+static job_desc_msg_t * _copy_job_record_to_job_desc(struct job_record *job_ptr);
 static char *_copy_nodelist_no_dup(char *node_list);
 static void _del_batch_list_rec(void *x);
 static void _delete_job_desc_files(uint32_t job_id);
 static void _dump_job_details(struct job_details *detail_ptr,
 			      Buf buffer);
 static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer);
-static void _excise_node_from_job(struct job_record *job_ptr, 
-				  struct node_record *node_ptr);
 static int  _find_batch_dir(void *x, void *key);
 static void _get_batch_job_dir_ids(List batch_dirs);
 static void _job_timed_out(struct job_record *job_ptr);
@@ -133,6 +139,9 @@ static int  _list_find_job_id(void *job_entry, void *key);
 static int  _list_find_job_old(void *job_entry, void *key);
 static int  _load_job_details(struct job_record *job_ptr, Buf buffer);
 static int  _load_job_state(Buf buffer);
+static void _notify_srun_missing_step(struct job_record *job_ptr, int node_inx,
+				      time_t now);
+static void _pack_job_for_ckpt (struct job_record *job_ptr, Buf buffer);
 static void _pack_default_job_details(struct job_details *detail_ptr,
 				      Buf buffer);
 static void _pack_pending_job_details(struct job_details *detail_ptr,
@@ -142,6 +151,7 @@ static void _purge_lost_batch_jobs(int node_inx, time_t now);
 static void _read_data_array_from_file(char *file_name, char ***data,
 				       uint32_t * size);
 static void _read_data_from_file(char *file_name, char **data);
+static char *_read_job_ckpt_file(char *ckpt_file, int *size_ptr);
 static void _remove_defunct_batch_dirs(List batch_dirs);
 static int  _reset_detail_bitmaps(struct job_record *job_ptr);
 static void _reset_step_bitmaps(struct job_record *job_ptr);
@@ -165,6 +175,7 @@ static int  _write_data_array_to_file(char *file_name, char **data,
 				      uint32_t size);
 static void _xmit_new_end_time(struct job_record *job_ptr);
 
+
 /* 
  * create_job_record - create an empty job_record including job_details.
  *	load its values with defaults (zeros, nulls, and magic cookie)
@@ -222,28 +233,33 @@ void delete_job_details(struct job_record *job_entry)
 	if (job_entry->details == NULL)
 		return;
 
-	_delete_job_desc_files(job_entry->job_id);
 	xassert (job_entry->details->magic == DETAILS_MAGIC);
+	_delete_job_desc_files(job_entry->job_id);
+
 	for (i=0; i<job_entry->details->argc; i++)
 		xfree(job_entry->details->argv[i]);
 	xfree(job_entry->details->argv);
-	xfree(job_entry->details->req_nodes);
-	xfree(job_entry->details->exc_nodes);
-	FREE_NULL_BITMAP(job_entry->details->req_node_bitmap);
-	xfree(job_entry->details->req_node_layout);
+	xfree(job_entry->details->ckpt_dir);
+	xfree(job_entry->details->cpu_bind);
+	if (job_entry->details->depend_list)
+		list_destroy(job_entry->details->depend_list);
+	xfree(job_entry->details->dependency);
+	xfree(job_entry->details->err);
 	FREE_NULL_BITMAP(job_entry->details->exc_node_bitmap);
+	xfree(job_entry->details->exc_nodes);
+	if (job_entry->details->feature_list)
+		list_destroy(job_entry->details->feature_list);
 	xfree(job_entry->details->features);
-	xfree(job_entry->details->err);
 	xfree(job_entry->details->in);
+	xfree(job_entry->details->mc_ptr);
+	xfree(job_entry->details->mem_bind);
 	xfree(job_entry->details->out);
+	FREE_NULL_BITMAP(job_entry->details->req_node_bitmap);
+	xfree(job_entry->details->req_node_layout);
+	xfree(job_entry->details->req_nodes);
+	xfree(job_entry->details->restart_dir);
 	xfree(job_entry->details->work_dir);
-	xfree(job_entry->details->mc_ptr);
-	if (job_entry->details->feature_list)
-		list_destroy(job_entry->details->feature_list);
-	xfree(job_entry->details->dependency);
-	if (job_entry->details->depend_list)
-		list_destroy(job_entry->details->depend_list);
-	xfree(job_entry->details);
+	xfree(job_entry->details);	/* Must be last */
 }
 
 /* _delete_job_desc_files - delete job descriptor related files */
@@ -272,13 +288,18 @@ static void _delete_job_desc_files(uint32_t job_id)
 	xfree(dir_name);
 }
 
-/* dump_all_job_state - save the state of all jobs to file for checkpoint
+/*
+ * dump_all_job_state - save the state of all jobs to file for checkpoint
+ *	Changes here should be reflected in load_last_job_id() and 
+ *	load_all_job_state().
  * RET 0 or error code */
 int dump_all_job_state(void)
 {
+	/* Save high-water mark to avoid buffer growth with copies */
 	static int high_buffer_size = (1024 * 1024);
 	int error_code = 0, log_fd;
 	char *old_file, *new_file, *reg_file;
+	struct stat stat_buf;
 	/* Locks: Read config and job */
 	slurmctld_lock_t job_read_lock =
 		{ READ_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
@@ -309,8 +330,6 @@ int dump_all_job_state(void)
 		xassert (job_ptr->magic == JOB_MAGIC);
 		_dump_job_state(job_ptr, buffer);
 	}
-	/* Maintain config lock until we get the state_save_location *\
-	   \* unlock_slurmctld(job_read_lock);         - see below      */
 	list_iterator_destroy(job_iterator);
 
 	/* write the buffer to file */
@@ -322,6 +341,21 @@ int dump_all_job_state(void)
 	xstrcat(new_file, "/job_state.new");
 	unlock_slurmctld(job_read_lock);
 
+	if (stat(reg_file, &stat_buf) == 0) {
+		static time_t last_mtime = (time_t) 0;
+		int delta_t = difftime(stat_buf.st_mtime, last_mtime);
+		if (delta_t < -10) {
+			error("The modification time of %s moved backwards "
+			      "by %d seconds",
+			      reg_file, (0-delta_t));
+			error("There could be a problem with your clock or "
+			      "file system mounting");
+			/* It could be safest to exit here. We likely mounted
+			 * a different file system with the state save files */
+		}
+		last_mtime = time(NULL);
+	}
+
 	lock_state_files();
 	log_fd = creat(new_file, 0600);
 	if (log_fd == 0) {
@@ -367,6 +401,7 @@ int dump_all_job_state(void)
 /*
  * load_all_job_state - load the job state from file, recover from last 
  *	checkpoint. Execute this after loading the configuration file data.
+ *	Changes here should be reflected in load_last_job_id().
  * RET 0 or error code
  */
 extern int load_all_job_state(void)
@@ -421,7 +456,7 @@ extern int load_all_job_state(void)
 	buffer = create_buf(data, data_size);
 	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
 	debug3("Version string in job_state header is %s", ver_str);
-	if ((!ver_str) || strcmp(ver_str, JOB_STATE_VERSION) != 0) {
+	if ((!ver_str) || (strcmp(ver_str, JOB_STATE_VERSION) != 0)) {
 		error("***********************************************");
 		error("Can not recover job state, incompatable version");
 		error("***********************************************");
@@ -456,6 +491,87 @@ unpack_error:
 	return SLURM_FAILURE;
 }
 
+/*
+ * load_last_job_id - load only the last job ID from state save file.
+ *	Changes here should be reflected in load_all_job_state().
+ * RET 0 or error code
+ */
+extern int load_last_job_id( void )
+{
+	int data_allocated, data_read = 0, error_code = SLURM_SUCCESS;
+	uint32_t data_size = 0;
+	int state_fd;
+	char *data = NULL, *state_file;
+	Buf buffer;
+	time_t buf_time;
+	char *ver_str = NULL;
+	uint32_t ver_str_len;
+
+	/* read the file */
+	state_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(state_file, "/job_state");
+	lock_state_files();
+	state_fd = open(state_file, O_RDONLY);
+	if (state_fd < 0) {
+		debug("No job state file (%s) to recover", state_file);
+		error_code = ENOENT;
+	} else {
+		data_allocated = BUF_SIZE;
+		data = xmalloc(data_allocated);
+		while (1) {
+			data_read = read(state_fd, &data[data_size],
+					 BUF_SIZE);
+			if (data_read < 0) {
+				if (errno == EINTR)
+					continue;
+				else {
+					error("Read error on %s: %m", 
+					      state_file);
+					break;
+				}
+			} else if (data_read == 0)	/* eof */
+				break;
+			data_size      += data_read;
+			data_allocated += data_read;
+			xrealloc(data, data_allocated);
+		}
+		close(state_fd);
+	}
+	xfree(state_file);
+	unlock_state_files();
+
+	if (error_code)
+		return error_code;
+
+	buffer = create_buf(data, data_size);
+	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
+	debug3("Version string in job_state header is %s", ver_str);
+	if ((!ver_str) || (strcmp(ver_str, JOB_STATE_VERSION) != 0)) {
+		debug("*************************************************");
+		debug("Can not recover last job ID, incompatable version");
+		debug("*************************************************");
+		xfree(ver_str);
+		free_buf(buffer);
+		return EFAULT;
+	}
+	xfree(ver_str);
+	debug3("Version string in job_state header is %s", ver_str);
+
+	safe_unpack_time(&buf_time, buffer);
+	safe_unpack32( &job_id_sequence, buffer);
+	debug3("Job ID in job_state header is %u", job_id_sequence);
+
+	/* Ignore the state for individual jobs stored here */
+
+	free_buf(buffer);
+	return error_code;
+
+unpack_error:
+	debug("Invalid job data checkpoint file");
+	free_buf(buffer);
+	return SLURM_FAILURE;
+}
+
 /*
  * _dump_job_state - dump the state of a specific job, its details, and 
  *	steps to a buffer
@@ -481,6 +597,8 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	pack32(dump_job_ptr->exit_code, buffer);
 	pack32(dump_job_ptr->db_index, buffer);
 	pack32(dump_job_ptr->assoc_id, buffer);
+	pack32(dump_job_ptr->resv_id, buffer);
+	pack32(dump_job_ptr->next_step_id, buffer);
 
 	pack_time(dump_job_ptr->start_time, buffer);
 	pack_time(dump_job_ptr->end_time, buffer);
@@ -488,16 +606,20 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	pack_time(dump_job_ptr->pre_sus_time, buffer);
 	pack_time(dump_job_ptr->tot_sus_time, buffer);
 
+	pack16(dump_job_ptr->direct_set_prio, buffer);
 	pack16(dump_job_ptr->job_state, buffer);
-	pack16(dump_job_ptr->next_step_id, buffer);
 	pack16(dump_job_ptr->kill_on_node_fail, buffer);
 	pack16(dump_job_ptr->kill_on_step_done, buffer);
 	pack16(dump_job_ptr->batch_flag, buffer);
 	pack16(dump_job_ptr->mail_type, buffer);
 	pack16(dump_job_ptr->qos, buffer);
 	pack16(dump_job_ptr->state_reason, buffer);
+	pack16(dump_job_ptr->restart_cnt, buffer);
+	pack16(dump_job_ptr->resv_flags, buffer);
 
+	packstr(dump_job_ptr->state_desc, buffer);
 	packstr(dump_job_ptr->resp_host, buffer);
+
 	pack16(dump_job_ptr->alloc_resp_port, buffer);
 	pack16(dump_job_ptr->other_port, buffer);
 
@@ -512,15 +634,20 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	packstr(dump_job_ptr->nodes, buffer);
 	packstr(dump_job_ptr->partition, buffer);
 	packstr(dump_job_ptr->name, buffer);
+	packstr(dump_job_ptr->wckey, buffer);
 	packstr(dump_job_ptr->alloc_node, buffer);
 	packstr(dump_job_ptr->account, buffer);
 	packstr(dump_job_ptr->comment, buffer);
 	packstr(dump_job_ptr->network, buffer);
 	packstr(dump_job_ptr->licenses, buffer);
 	packstr(dump_job_ptr->mail_user, buffer);
+	packstr(dump_job_ptr->resv_name, buffer);
+
+	select_g_pack_jobinfo(dump_job_ptr->select_jobinfo, buffer);
+	pack_select_job_res(dump_job_ptr->select_job, buffer);
 
-	select_g_pack_jobinfo(dump_job_ptr->select_jobinfo,
-			      buffer);
+	pack16(dump_job_ptr->ckpt_interval, buffer);
+	checkpoint_pack_jobinfo(dump_job_ptr->check_job, buffer);
 
 	/* Dump job details, if available */
 	detail_ptr = dump_job_ptr->details;
@@ -546,22 +673,27 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 static int _load_job_state(Buf buffer)
 {
 	uint32_t job_id, user_id, group_id, time_limit, priority, alloc_sid;
-	uint32_t exit_code, num_procs, assoc_id, db_index, name_len,
-		total_procs;
+	uint32_t exit_code, num_procs, assoc_id, db_index, name_len;
+	uint32_t next_step_id, total_procs, resv_id;
 	time_t start_time, end_time, suspend_time, pre_sus_time, tot_sus_time;
 	time_t now = time(NULL);
-	uint16_t job_state, next_step_id, details, batch_flag, step_flag;
-	uint16_t kill_on_node_fail, kill_on_step_done, qos;
+	uint16_t job_state, details, batch_flag, step_flag;
+	uint16_t kill_on_node_fail, kill_on_step_done, direct_set_prio, qos;
 	uint16_t alloc_resp_port, other_port, mail_type, state_reason;
+	uint16_t restart_cnt, resv_flags, ckpt_interval;
 	char *nodes = NULL, *partition = NULL, *name = NULL, *resp_host = NULL;
 	char *account = NULL, *network = NULL, *mail_user = NULL;
 	char *comment = NULL, *nodes_completing = NULL, *alloc_node = NULL;
-	char *licenses = NULL;
+	char *licenses = NULL, *state_desc = NULL, *wckey = NULL;
+	char *resv_name = NULL;
 	struct job_record *job_ptr;
 	struct part_record *part_ptr;
 	int error_code;
 	select_jobinfo_t select_jobinfo = NULL;
+	select_job_res_t select_job = NULL;
+	check_jobinfo_t check_job = NULL;
 	acct_association_rec_t assoc_rec;
+	acct_qos_rec_t qos_rec;
 
 	safe_unpack32(&assoc_id, buffer);
 	safe_unpack32(&job_id, buffer);
@@ -575,6 +707,8 @@ static int _load_job_state(Buf buffer)
 	safe_unpack32(&exit_code, buffer);
 	safe_unpack32(&db_index, buffer);
 	safe_unpack32(&assoc_id, buffer);
+	safe_unpack32(&resv_id, buffer);
+	safe_unpack32(&next_step_id, buffer);
 
 	safe_unpack_time(&start_time, buffer);
 	safe_unpack_time(&end_time, buffer);
@@ -582,16 +716,20 @@ static int _load_job_state(Buf buffer)
 	safe_unpack_time(&pre_sus_time, buffer);
 	safe_unpack_time(&tot_sus_time, buffer);
 
+	safe_unpack16(&direct_set_prio, buffer);
 	safe_unpack16(&job_state, buffer);
-	safe_unpack16(&next_step_id, buffer);
 	safe_unpack16(&kill_on_node_fail, buffer);
 	safe_unpack16(&kill_on_step_done, buffer);
 	safe_unpack16(&batch_flag, buffer);
 	safe_unpack16(&mail_type, buffer);
 	safe_unpack16(&qos, buffer);
 	safe_unpack16(&state_reason, buffer);
+	safe_unpack16(&restart_cnt, buffer);
+	safe_unpack16(&resv_flags, buffer);
 
+	safe_unpackstr_xmalloc(&state_desc, &name_len, buffer);
 	safe_unpackstr_xmalloc(&resp_host, &name_len, buffer);
+
 	safe_unpack16(&alloc_resp_port, buffer);
 	safe_unpack16(&other_port, buffer);
 
@@ -602,16 +740,26 @@ static int _load_job_state(Buf buffer)
 	safe_unpackstr_xmalloc(&nodes, &name_len, buffer);
 	safe_unpackstr_xmalloc(&partition, &name_len, buffer);
 	safe_unpackstr_xmalloc(&name, &name_len, buffer);
+	safe_unpackstr_xmalloc(&wckey, &name_len, buffer);
 	safe_unpackstr_xmalloc(&alloc_node, &name_len, buffer);
 	safe_unpackstr_xmalloc(&account, &name_len, buffer);
 	safe_unpackstr_xmalloc(&comment, &name_len, buffer);
 	safe_unpackstr_xmalloc(&network, &name_len, buffer);
 	safe_unpackstr_xmalloc(&licenses, &name_len, buffer);
 	safe_unpackstr_xmalloc(&mail_user, &name_len, buffer);
+	safe_unpackstr_xmalloc(&resv_name, &name_len, buffer);
 
 	if (select_g_alloc_jobinfo(&select_jobinfo)
 	    ||  select_g_unpack_jobinfo(select_jobinfo, buffer))
 		goto unpack_error;
+	if (unpack_select_job_res(&select_job, buffer))
+		goto unpack_error;
+
+	safe_unpack16(&ckpt_interval, buffer);
+	if (checkpoint_alloc_jobinfo(&check_job) ||
+	    checkpoint_unpack_jobinfo(check_job, buffer))
+		goto unpack_error;
+
 
 	/* validity test as possible */
 	if (job_id == 0) {
@@ -659,6 +807,19 @@ static int _load_job_state(Buf buffer)
 		_add_job_hash(job_ptr);
 	}
 
+	if(qos) {
+		memset(&qos_rec, 0, sizeof(acct_qos_rec_t));
+		qos_rec.id = qos;
+		if((assoc_mgr_fill_in_qos(acct_db_conn, &qos_rec,
+					  accounting_enforce, 
+					  (acct_qos_rec_t **)
+					  &job_ptr->qos_ptr))
+		   != SLURM_SUCCESS) {
+			verbose("Invalid qos (%u) for job_id %u", qos, job_id);
+			/* not a fatal error, qos could have been removed */
+		} 
+	}
+
 	if ((maximum_prio >= priority) && (priority > 1))
 		maximum_prio = priority;
 	if (job_id_sequence <= job_id)
@@ -670,6 +831,7 @@ static int _load_job_state(Buf buffer)
 		job_ptr->job_state = JOB_FAILED;
 		job_ptr->exit_code = 1;
 		job_ptr->state_reason = FAIL_SYSTEM;
+		xfree(job_ptr->state_desc);
 		job_ptr->end_time = now;
 		goto unpack_error;
 	}
@@ -687,6 +849,7 @@ static int _load_job_state(Buf buffer)
 	xfree(job_ptr->comment);
 	job_ptr->comment      = comment;
 	comment               = NULL;  /* reused, nothing left to free */
+	job_ptr->direct_set_prio = direct_set_prio;
 	job_ptr->db_index     = db_index;
 	job_ptr->end_time     = end_time;
 	job_ptr->exit_code    = exit_code;
@@ -704,6 +867,9 @@ static int _load_job_state(Buf buffer)
 	xfree(job_ptr->name);		/* in case duplicate record */
 	job_ptr->name         = name;
 	name                  = NULL;	/* reused, nothing left to free */
+	xfree(job_ptr->wckey);		/* in case duplicate record */
+	job_ptr->wckey        = wckey;
+	wckey                 = NULL;	/* reused, nothing left to free */
 	xfree(job_ptr->network);
 	job_ptr->network      = network;
 	network               = NULL;  /* reused, nothing left to free */
@@ -728,9 +894,19 @@ static int _load_job_state(Buf buffer)
 	xfree(job_ptr->resp_host);
 	job_ptr->resp_host    = resp_host;
 	resp_host             = NULL;	/* reused, nothing left to free */
+	job_ptr->restart_cnt  = restart_cnt;
+	job_ptr->resv_id      = resv_id;
+	job_ptr->resv_name    = resv_name;
+	resv_name             = NULL;	/* reused, nothing left to free */
+	job_ptr->resv_flags   = resv_flags;
 	job_ptr->select_jobinfo = select_jobinfo;
+	job_ptr->select_job   = select_job;
+	job_ptr->ckpt_interval = ckpt_interval;
+	job_ptr->check_job    = check_job;
 	job_ptr->start_time   = start_time;
 	job_ptr->state_reason = state_reason;
+	job_ptr->state_desc   = state_desc;
+	state_desc            = NULL;	/* reused, nothing left to free */
 	job_ptr->suspend_time = suspend_time;
 	job_ptr->time_last_active = now;
 	job_ptr->time_limit   = time_limit;
@@ -763,16 +939,19 @@ static int _load_job_state(Buf buffer)
 		     job_id);
 		job_ptr->job_state = JOB_CANCELLED;
 		job_ptr->state_reason = FAIL_BANK_ACCOUNT;
+		xfree(job_ptr->state_desc);
 		if (IS_JOB_PENDING(job_ptr))
 			job_ptr->start_time = now;
 		job_ptr->end_time = now;
 		job_completion_logger(job_ptr);
 	} else {
-		info("Recovered job %u", job_id);
 		job_ptr->assoc_id = assoc_rec.id;
+		info("Recovered job %u %u", job_id, job_ptr->assoc_id);
+
 		/* make sure we have started this job in accounting */
 		if(job_ptr->assoc_id && !job_ptr->db_index && job_ptr->nodes) {
-			debug("starting job %u in accounting", job_ptr->job_id);
+			debug("starting job %u in accounting", 
+			      job_ptr->job_id);
 			jobacct_storage_g_job_start(
 				acct_db_conn, slurmctld_cluster_name, job_ptr);
 			if(job_ptr->job_state == JOB_SUSPENDED) 
@@ -796,24 +975,26 @@ static int _load_job_state(Buf buffer)
 		safe_unpack16(&step_flag, buffer);
 	}
 
-	build_node_details(job_ptr);	/* set: num_cpu_groups, cpus_per_node,
-					 *  cpu_count_reps, node_cnt,
-					 *  node_addr, alloc_lps, used_lps */
+	build_node_details(job_ptr);	/* set node_addr */
 	return SLURM_SUCCESS;
 
 unpack_error:
 	error("Incomplete job record");
-	xfree(nodes);
-	xfree(nodes_completing);
-	xfree(partition);
-	xfree(name);
 	xfree(alloc_node);
 	xfree(account);
 	xfree(comment);
 	xfree(resp_host);
 	xfree(licenses);
 	xfree(mail_user);
+	xfree(name);
+	xfree(nodes);
+	xfree(nodes_completing);
+	xfree(partition);
+	xfree(resv_name);
+	xfree(state_desc);
+	xfree(wckey);
 	select_g_free_jobinfo(&select_jobinfo);
+	checkpoint_free_jobinfo(check_job);
 	return SLURM_FAILURE;
 }
 
@@ -829,15 +1010,24 @@ void _dump_job_details(struct job_details *detail_ptr, Buf buffer)
 	pack32(detail_ptr->max_nodes, buffer);
 	pack32(detail_ptr->num_tasks, buffer);
 
-	pack16(detail_ptr->shared, buffer);
+	pack16(detail_ptr->acctg_freq, buffer);
 	pack16(detail_ptr->contiguous, buffer);
 	pack16(detail_ptr->cpus_per_task, buffer);
+	pack16(detail_ptr->nice, buffer);
 	pack16(detail_ptr->ntasks_per_node, buffer);
 	pack16(detail_ptr->requeue, buffer);
-	pack16(detail_ptr->acctg_freq, buffer);
+	pack16(detail_ptr->shared, buffer);
+	pack16(detail_ptr->task_dist, buffer);
+
+	packstr(detail_ptr->cpu_bind,     buffer);
+	pack16(detail_ptr->cpu_bind_type, buffer);
+	packstr(detail_ptr->mem_bind,     buffer);
+	pack16(detail_ptr->mem_bind_type, buffer);
+	pack16(detail_ptr->plane_size, buffer);
 
 	pack8(detail_ptr->open_mode, buffer);
 	pack8(detail_ptr->overcommit, buffer);
+	pack8(detail_ptr->prolog_running, buffer);
 
 	pack32(detail_ptr->job_min_procs, buffer);
 	pack32(detail_ptr->job_min_memory, buffer);
@@ -854,6 +1044,8 @@ void _dump_job_details(struct job_details *detail_ptr, Buf buffer)
 	packstr(detail_ptr->in,        buffer);
 	packstr(detail_ptr->out,       buffer);
 	packstr(detail_ptr->work_dir,  buffer);
+	packstr(detail_ptr->ckpt_dir,  buffer);
+	packstr(detail_ptr->restart_dir, buffer);
 
 	pack_multi_core_data(detail_ptr->mc_ptr, buffer);
 	packstr_array(detail_ptr->argv, detail_ptr->argc, buffer);
@@ -863,16 +1055,18 @@ void _dump_job_details(struct job_details *detail_ptr, Buf buffer)
 static int _load_job_details(struct job_record *job_ptr, Buf buffer)
 {
 	char *req_nodes = NULL, *exc_nodes = NULL, *features = NULL;
-	char *dependency = NULL;
+	char *cpu_bind, *dependency = NULL, *mem_bind;
 	char *err = NULL, *in = NULL, *out = NULL, *work_dir = NULL;
+	char *ckpt_dir = NULL, *restart_dir = NULL;
 	char **argv = (char **) NULL;
 	uint32_t min_nodes, max_nodes;
 	uint32_t job_min_procs;
 	uint32_t job_min_memory, job_min_tmp_disk;
 	uint32_t num_tasks, name_len, argc = 0;
-	uint16_t shared, contiguous, ntasks_per_node;
-	uint16_t acctg_freq, cpus_per_task, requeue;
-	uint8_t open_mode, overcommit;
+	uint16_t shared, contiguous, nice, ntasks_per_node;
+	uint16_t acctg_freq, cpus_per_task, requeue, task_dist;
+	uint16_t cpu_bind_type, mem_bind_type, plane_size;
+	uint8_t open_mode, overcommit, prolog_running;
 	time_t begin_time, submit_time;
 	int i;
 	multi_core_data_t *mc_ptr;
@@ -882,15 +1076,24 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer)
 	safe_unpack32(&max_nodes, buffer);
 	safe_unpack32(&num_tasks, buffer);
 
-	safe_unpack16(&shared, buffer);
+	safe_unpack16(&acctg_freq, buffer);
 	safe_unpack16(&contiguous, buffer);
 	safe_unpack16(&cpus_per_task, buffer);
+	safe_unpack16(&nice, buffer);
 	safe_unpack16(&ntasks_per_node, buffer);
 	safe_unpack16(&requeue, buffer);
-	safe_unpack16(&acctg_freq, buffer);
+	safe_unpack16(&shared, buffer);
+	safe_unpack16(&task_dist, buffer);
+
+	safe_unpackstr_xmalloc(&cpu_bind, &name_len, buffer);
+	safe_unpack16(&cpu_bind_type, buffer);
+	safe_unpackstr_xmalloc(&mem_bind, &name_len, buffer);
+	safe_unpack16(&mem_bind_type, buffer);
+	safe_unpack16(&plane_size, buffer);
 
 	safe_unpack8(&open_mode, buffer);
 	safe_unpack8(&overcommit, buffer);
+	safe_unpack8(&prolog_running, buffer);
 
 	safe_unpack32(&job_min_procs, buffer);
 	safe_unpack32(&job_min_memory, buffer);
@@ -907,6 +1110,8 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer)
 	safe_unpackstr_xmalloc(&in,  &name_len, buffer);
 	safe_unpackstr_xmalloc(&out, &name_len, buffer);
 	safe_unpackstr_xmalloc(&work_dir, &name_len, buffer);
+	safe_unpackstr_xmalloc(&ckpt_dir, &name_len, buffer);
+	safe_unpackstr_xmalloc(&restart_dir, &name_len, buffer);
 
 	if (unpack_multi_core_data(&mc_ptr, buffer))
 		goto unpack_error;
@@ -923,66 +1128,88 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer)
 		      requeue, overcommit);
 		goto unpack_error;
 	}
-
-
+	if (prolog_running > 1) {
+		error("Invalid data for job %u: prolog_running=%u",
+		      job_ptr->job_id, prolog_running);
+		goto unpack_error;
+	}
 
 	/* free any left-over detail data */
-	xfree(job_ptr->details->req_nodes);
+	for (i=0; i<job_ptr->details->argc; i++)
+		xfree(job_ptr->details->argv[i]);
+	xfree(job_ptr->details->argv);
+	xfree(job_ptr->details->cpu_bind);
+	xfree(job_ptr->details->dependency);
+	xfree(job_ptr->details->err);
 	xfree(job_ptr->details->exc_nodes);
 	xfree(job_ptr->details->features);
-	xfree(job_ptr->details->err);
 	xfree(job_ptr->details->in);
+	xfree(job_ptr->details->mem_bind);
 	xfree(job_ptr->details->out);
+	xfree(job_ptr->details->req_nodes);
 	xfree(job_ptr->details->work_dir);
-	for (i=0; i<job_ptr->details->argc; i++)
-		xfree(job_ptr->details->argv[i]);
-	xfree(job_ptr->details->argv);
+	xfree(job_ptr->details->ckpt_dir);
+	xfree(job_ptr->details->restart_dir);
 
 	/* now put the details into the job record */
-	job_ptr->details->min_nodes = min_nodes;
-	job_ptr->details->max_nodes = max_nodes;
-	job_ptr->details->num_tasks = num_tasks;
-	job_ptr->details->shared = shared;
 	job_ptr->details->acctg_freq = acctg_freq;
+	job_ptr->details->argc = argc;
+	job_ptr->details->argv = argv;
+	job_ptr->details->begin_time = begin_time;
 	job_ptr->details->contiguous = contiguous;
+	job_ptr->details->cpu_bind = cpu_bind;
+	job_ptr->details->cpu_bind_type = cpu_bind_type;
 	job_ptr->details->cpus_per_task = cpus_per_task;
-	/* FIXME: Need to save/restore actual task_dist value */
-	job_ptr->details->task_dist = SLURM_DIST_CYCLIC;
-	job_ptr->details->ntasks_per_node = ntasks_per_node;
+	job_ptr->details->dependency = dependency;
+	job_ptr->details->err = err;
+	job_ptr->details->exc_nodes = exc_nodes;
+	job_ptr->details->features = features;
+	job_ptr->details->in = in;
 	job_ptr->details->job_min_procs = job_min_procs;
 	job_ptr->details->job_min_memory = job_min_memory;
 	job_ptr->details->job_min_tmp_disk = job_min_tmp_disk;
-	job_ptr->details->requeue = requeue;
+	job_ptr->details->max_nodes = max_nodes;
+	job_ptr->details->mc_ptr = mc_ptr;
+	job_ptr->details->mem_bind = mem_bind;
+	job_ptr->details->mem_bind_type = mem_bind_type;
+	job_ptr->details->min_nodes = min_nodes;
+	job_ptr->details->nice = nice;
+	job_ptr->details->ntasks_per_node = ntasks_per_node;
+	job_ptr->details->num_tasks = num_tasks;
 	job_ptr->details->open_mode = open_mode;
+	job_ptr->details->out = out;
 	job_ptr->details->overcommit = overcommit;
-	job_ptr->details->begin_time = begin_time;
-	job_ptr->details->submit_time = submit_time;
+	job_ptr->details->plane_size = plane_size;
+	job_ptr->details->prolog_running = prolog_running;
 	job_ptr->details->req_nodes = req_nodes;
-	job_ptr->details->exc_nodes = exc_nodes;
-	job_ptr->details->features = features;
-	job_ptr->details->err = err;
-	job_ptr->details->in = in;
-	job_ptr->details->out = out;
+	job_ptr->details->requeue = requeue;
+	job_ptr->details->shared = shared;
+	job_ptr->details->submit_time = submit_time;
+	job_ptr->details->task_dist = task_dist;
 	job_ptr->details->work_dir = work_dir;
-	job_ptr->details->argc = argc;
-	job_ptr->details->argv = argv;
-	job_ptr->details->mc_ptr = mc_ptr;
-	job_ptr->details->dependency = dependency;
+	job_ptr->details->ckpt_dir = ckpt_dir;
+	job_ptr->details->restart_dir = restart_dir;
 	
 	return SLURM_SUCCESS;
 
 unpack_error:
-	xfree(req_nodes);
-	xfree(exc_nodes);
-	xfree(features);
+
+
+/*	for (i=0; i<argc; i++) 
+	xfree(argv[i]);  Don't trust this on unpack error */
+	xfree(argv);
+	xfree(cpu_bind);
 	xfree(dependency);
 	xfree(err);
+	xfree(exc_nodes);
+	xfree(features);
 	xfree(in);
+	xfree(mem_bind);
 	xfree(out);
+	xfree(req_nodes);
 	xfree(work_dir);
-/*	for (i=0; i<argc; i++) 
-	xfree(argv[i]);  Don't trust this on unpack error */
-	xfree(argv);
+	xfree(ckpt_dir);
+	xfree(restart_dir);
 	return SLURM_FAILURE;
 }
 
@@ -1066,6 +1293,7 @@ extern int kill_job_by_part_name(char *part_name)
 			job_ptr->job_state = JOB_NODE_FAIL | JOB_COMPLETING;
 			job_ptr->exit_code = MAX(job_ptr->exit_code, 1);
 			job_ptr->state_reason = FAIL_DOWN_PARTITION;
+			xfree(job_ptr->state_desc);
 			if (suspended) {
 				job_ptr->end_time = job_ptr->suspend_time;
 				job_ptr->tot_sus_time += 
@@ -1097,10 +1325,9 @@ extern int kill_job_by_part_name(char *part_name)
  * kill_running_job_by_node_name - Given a node name, deallocate RUNNING 
  *	or COMPLETING jobs from the node or kill them 
  * IN node_name - name of a node
- * IN step_test - if true, only kill the job if a step is running on the node
  * RET number of killed jobs
  */
-extern int kill_running_job_by_node_name(char *node_name, bool step_test)
+extern int kill_running_job_by_node_name(char *node_name)
 {
 	ListIterator job_iterator;
 	struct job_record *job_ptr;
@@ -1151,10 +1378,6 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test)
 				      "JobId=%u", 
 				      node_ptr->name, job_ptr->job_id);
 		} else if ((job_ptr->job_state == JOB_RUNNING) || suspended) {
-			if (step_test && 
-			    (step_on_node(job_ptr, node_ptr) == 0))
-				continue;
-
 			job_count++;
 			if ((job_ptr->details) &&
 			    (job_ptr->kill_on_node_fail == 0) &&
@@ -1163,7 +1386,8 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test)
 				srun_node_fail(job_ptr->job_id, node_name);
 				error("Removing failed node %s from job_id %u",
 				      node_name, job_ptr->job_id);
-				_excise_node_from_job(job_ptr, node_ptr);
+				kill_step_on_node(job_ptr, node_ptr);
+				excise_node_from_job(job_ptr, node_ptr);
 			} else if (job_ptr->batch_flag && job_ptr->details &&
 			           (job_ptr->details->requeue > 0)) {
 				char requeue_msg[128];
@@ -1189,7 +1413,7 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test)
 					job_ptr->end_time = now;
 				
 				/* We want this job to look like it
-				 * was terminateded in the accounting logs.
+				 * was terminated in the accounting logs.
 				 * Set a new submit time so the restarted
 				 * job looks like a new job. */
 				job_ptr->job_state  = JOB_NODE_FAIL;
@@ -1200,6 +1424,18 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test)
 				if (job_ptr->node_cnt)
 					job_ptr->job_state |= JOB_COMPLETING;
 				job_ptr->details->submit_time = now;
+				
+				/* restart from periodic checkpoint */
+				if (job_ptr->ckpt_interval &&
+				    job_ptr->ckpt_time &&
+				    job_ptr->details->ckpt_dir) {
+					xfree(job_ptr->details->restart_dir);
+					job_ptr->details->restart_dir =
+						xstrdup (job_ptr->details->ckpt_dir);
+					xstrfmtcat(job_ptr->details->restart_dir,
+						   "/%u", job_ptr->job_id);
+				}
+				job_ptr->restart_cnt++;
 				/* Since the job completion logger
 				   removes the submit we need to add it
 				   again.
@@ -1214,11 +1450,13 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test)
 				job_ptr->exit_code = 
 					MAX(job_ptr->exit_code, 1);
 				job_ptr->state_reason = FAIL_DOWN_NODE;
+				xfree(job_ptr->state_desc);
 				if (suspended) {
 					job_ptr->end_time =
 						job_ptr->suspend_time;
 					job_ptr->tot_sus_time += 
-						difftime(now, job_ptr->suspend_time);
+						difftime(now, 
+							 job_ptr->suspend_time);
 				} else
 					job_ptr->end_time = time(NULL);
 				deallocate_nodes(job_ptr, false, suspended);
@@ -1235,21 +1473,38 @@ extern int kill_running_job_by_node_name(char *node_name, bool step_test)
 }
 
 /* Remove one node from a job's allocation */
-static void _excise_node_from_job(struct job_record *job_ptr, 
-				  struct node_record *node_ptr)
+extern void excise_node_from_job(struct job_record *job_ptr, 
+				 struct node_record *node_ptr)
 {
+	int i, orig_pos = -1, new_pos = -1;
+	bitstr_t *orig_bitmap = bit_copy(job_ptr->node_bitmap);
+	select_job_res_t select_ptr = job_ptr->select_job;
+
+	xassert(select_ptr);
+	xassert(select_ptr->cpus);
+	xassert(select_ptr->cpus_used);
+
 	make_node_idle(node_ptr, job_ptr); /* updates bitmap */
 	xfree(job_ptr->nodes);
 	job_ptr->nodes = bitmap2node_name(job_ptr->node_bitmap);
-	xfree(job_ptr->cpus_per_node);
-	xfree(job_ptr->cpu_count_reps);
-	xfree(job_ptr->node_addr);
-
-	/* build_node_details rebuilds everything from node_bitmap */
-	build_node_details(job_ptr);
+	for (i=bit_ffs(orig_bitmap); i<node_record_count; i++) {
+		if (!bit_test(orig_bitmap,i))
+			continue;
+		orig_pos++;
+		if (!bit_test(job_ptr->node_bitmap, i))
+			continue;
+		new_pos++;
+		if (orig_pos == new_pos)
+			continue;
+		memcpy(&job_ptr->node_addr[new_pos],
+		       &job_ptr->node_addr[orig_pos], sizeof(slurm_addr));
+		/* NOTE: The job's allocation in the job_ptr->select_job
+		 * data structure is unchanged  even after a node allocated
+		 * to the job goes DOWN. */
+	}
+	job_ptr->node_cnt = new_pos + 1;
 }
 
-
 /*
  * dump_job_desc - dump the incoming job submit request message
  * IN job_specs - job specification from RPC
@@ -1313,8 +1568,8 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 	debug3("   min_memory_%s=%ld job_min_tmp_disk=%ld",
 	       mem_type, job_min_memory, job_min_tmp_disk);
 	immediate = (job_specs->immediate == 0) ? 0L : 1L;
-	debug3("   immediate=%ld features=%s",
-	       immediate, job_specs->features);
+	debug3("   immediate=%ld features=%s reservation=%s",
+	       immediate, job_specs->features, job_specs->reservation);
 
 	debug3("   req_nodes=%s exc_nodes=%s", 
 	       job_specs->req_nodes, job_specs->exc_nodes);
@@ -1413,6 +1668,11 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 	       "ntasks_per_core=%ld", 
 	       ntasks_per_node, ntasks_per_socket, ntasks_per_core);
 
+	debug3("   cpus_bind=%u:%s mem_bind=%u:%s plane_size:%u",
+	       job_specs->cpu_bind_type, job_specs->cpu_bind,
+	       job_specs->mem_bind_type, job_specs->mem_bind,
+	       job_specs->plane_size);
+
 	select_g_sprint_jobinfo(job_specs->select_jobinfo, 
 				buf, sizeof(buf), SELECT_PRINT_MIXED);
 	if (buf[0] != '\0')
@@ -1477,9 +1737,6 @@ extern void rehash_jobs(void)
  * RET 0 or an error code. If the job would only be able to execute with 
  *	some change in partition configuration then 
  *	ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE is returned
- * NOTE: If allocating nodes lx[0-7] to a job and those nodes have cpu counts  
- *	of 4, 4, 4, 4, 8, 8, 4, 4 then num_cpu_groups=3, cpus_per_node={4,8,4}
- *	and cpu_count_reps={4,2,2}
  * globals: job_list - pointer to global job list 
  *	list_part - global list of partition info
  *	default_part_loc - pointer to default partition
@@ -1503,21 +1760,27 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 			job_ptr->job_state = JOB_FAILED;
 			job_ptr->exit_code = 1;
 			job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
+			xfree(job_ptr->state_desc);
 			job_ptr->start_time = job_ptr->end_time = now;
 			job_completion_logger(job_ptr);
 		}
 		return error_code;
 	}
 	xassert(job_ptr);
-
 	independent = job_independent(job_ptr);
+	/* priority needs to be calculated after this since we set a
+	   begin time in job_independent and that lets us know if the
+	   job is eligible.
+	*/
+	if(job_ptr->priority == NO_VAL)
+		_set_job_prio(job_ptr);
+
 	if (license_job_test(job_ptr) != SLURM_SUCCESS)
 		independent = false;
 
 	/* Avoid resource fragmentation if important */
-	if (independent && switch_no_frag() && 
-	    (submit_uid || (job_specs->req_nodes == NULL)) && 
-	    job_is_completing())
+	if ((submit_uid || (job_specs->req_nodes == NULL)) && 
+	    independent && job_is_completing())
 		too_fragmented = true;	/* Don't pick nodes for job now */
 	/* FIXME: Ideally we only want to refuse the request if the 
 	 * required node list is insufficient to satisfy the job's
@@ -1537,6 +1800,7 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 		job_ptr->job_state  = JOB_FAILED;
 		job_ptr->exit_code  = 1;
 		job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
+		xfree(job_ptr->state_desc);
 		job_ptr->start_time = job_ptr->end_time = now;
 		job_completion_logger(job_ptr);
 		if (!independent)
@@ -1576,12 +1840,14 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 	if ((error_code == ESLURM_NODES_BUSY) ||
 	    (error_code == ESLURM_JOB_HELD) ||
 	    (error_code == ESLURM_ACCOUNTING_POLICY) ||
+	    (error_code == ESLURM_RESERVATION_NOT_USABLE) ||
 	    (error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)) {
 		/* Not fatal error, but job can't be scheduled right now */
 		if (immediate) {
 			job_ptr->job_state  = JOB_FAILED;
 			job_ptr->exit_code  = 1;
 			job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
+			xfree(job_ptr->state_desc);
 			job_ptr->start_time = job_ptr->end_time = now;
 			job_completion_logger(job_ptr);
 		} else {	/* job remains queued */
@@ -1597,6 +1863,7 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 		job_ptr->job_state  = JOB_FAILED;
 		job_ptr->exit_code  = 1;
 		job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
+		xfree(job_ptr->state_desc);
 		job_ptr->start_time = job_ptr->end_time = now;
 		job_completion_logger(job_ptr);
 		return error_code;
@@ -1653,6 +1920,7 @@ extern int job_fail(uint32_t job_id)
 		job_ptr->job_state = JOB_FAILED | JOB_COMPLETING;
 		job_ptr->exit_code = 1;
 		job_ptr->state_reason = FAIL_LAUNCH;
+		xfree(job_ptr->state_desc);
 		deallocate_nodes(job_ptr, false, suspended);
 		job_completion_logger(job_ptr);
 		return SLURM_SUCCESS;
@@ -1725,15 +1993,13 @@ extern int job_signal(uint32_t job_id, uint16_t signal, uint16_t batch_flag,
 		return SLURM_SUCCESS;
 	}
 
-	if ((job_ptr->job_state == JOB_PENDING) &&
-	    (signal == SIGKILL)) {
+	if ((job_ptr->job_state == JOB_PENDING) && (signal == SIGKILL)) {
 		last_job_update		= now;
 		job_ptr->job_state	= JOB_CANCELLED;
 		job_ptr->start_time	= now;
 		job_ptr->end_time	= now;
 		srun_allocate_abort(job_ptr);
 		job_completion_logger(job_ptr);
-		delete_job_details(job_ptr);
 		verbose("job_signal of pending job %u successful", job_id);
 		return SLURM_SUCCESS;
 	}
@@ -1838,9 +2104,10 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 	if (IS_JOB_FINISHED(job_ptr))
 		return ESLURM_ALREADY_DONE;
 
-	if ((job_ptr->user_id != uid) && (uid != 0) && (uid != getuid())) {
-		error("Security violation, JOB_COMPLETE RPC from uid %d",
-		      uid);
+	if ((job_ptr->user_id != uid) && !validate_super_user(uid)) {
+		error("Security violation, JOB_COMPLETE RPC for job %u "
+		      "from uid %u",
+		      job_ptr->job_id, (unsigned int) uid);
 		return ESLURM_USER_ID_MISSING;
 	}
 	if (job_ptr->job_state & JOB_COMPLETING)
@@ -1869,14 +2136,37 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 	}
 
 	if (requeue && job_ptr->details && job_ptr->batch_flag) {
+		/* We want this job to look like it
+		 * was terminated in the accounting logs.
+		 * Set a new submit time so the restarted
+		 * job looks like a new job. */
+		job_ptr->end_time = now;
+		job_ptr->job_state  = JOB_NODE_FAIL;
+		job_completion_logger(job_ptr);
+		job_ptr->db_index = 0;
+		/* Since this could happen on a launch we need to make
+		   sure the submit isn't the same as the last submit so
+		   put now + 1 so we get different records in the
+		   database */
+		job_ptr->details->submit_time = now+1;
+		
 		job_ptr->batch_flag++;	/* only one retry */
+		job_ptr->restart_cnt++;
 		job_ptr->job_state = JOB_PENDING | job_comp_flag;
+		/* Since the job completion logger
+		   removes the submit we need to add it
+		   again.
+		*/
+		acct_policy_add_job_submit(job_ptr);
+
 		info("Non-responding node, requeue JobId=%u", job_ptr->job_id);
 	} else if ((job_ptr->job_state == JOB_PENDING) && job_ptr->details && 
 		   job_ptr->batch_flag) {
 		/* Possible failure mode with DOWN node and job requeue.
 		 * The DOWN node might actually respond to the cancel and
-		 * take us here. */
+		 * take us here.  Don't run job_completion_logger
+		 * here since this is here to catch duplicate cancels
+		 * from slow responding slurmds */
 		return SLURM_SUCCESS;
 	} else {
 		if (job_return_code == NO_VAL) {
@@ -1887,14 +2177,18 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 			job_ptr->job_state = JOB_FAILED   | job_comp_flag;
 			job_ptr->exit_code = job_return_code;
 			job_ptr->state_reason = FAIL_EXIT_CODE;
+			xfree(job_ptr->state_desc);
 		} else if (job_comp_flag &&		/* job was running */
-			 (job_ptr->end_time < now)) {	/* over time limit */
+			   (job_ptr->end_time < now)) {	/* over time limit */
 			job_ptr->job_state = JOB_TIMEOUT  | job_comp_flag;
 			job_ptr->exit_code = MAX(job_ptr->exit_code, 1);
 			job_ptr->state_reason = FAIL_TIMEOUT;
-		} else 
+			xfree(job_ptr->state_desc);
+		} else {
 			job_ptr->job_state = JOB_COMPLETE | job_comp_flag;
-		
+			job_ptr->exit_code = job_return_code;
+		}
+
 		if (suspended) {
 			job_ptr->end_time = job_ptr->suspend_time;
 			job_ptr->tot_sus_time += 
@@ -2000,6 +2294,10 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		return error_code;
 	}
 
+	if ((job_desc->time_limit == NO_VAL) &&
+	    (part_ptr->default_time != NO_VAL))
+		job_desc->time_limit = part_ptr->default_time;
+
 	if ((job_desc->time_limit != NO_VAL) &&
 	    (job_desc->time_limit > part_ptr->max_time) &&
 	    slurmctld_conf.enforce_part_limits) {
@@ -2033,6 +2331,15 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		return error_code;
 	}
 
+	if (validate_alloc_node(part_ptr, job_desc->alloc_node) == 0) {
+		info("_job_create: uid %u access to partition %s denied, "
+		     "bad allocating node: %s",
+		     (unsigned int) job_desc->user_id, part_ptr->name,
+		     job_desc->alloc_node);
+		error_code = ESLURM_ACCESS_DENIED;
+		return error_code;
+	}
+
 	memset(&assoc_rec, 0, sizeof(acct_association_rec_t));
 	assoc_rec.uid       = job_desc->user_id;
 	assoc_rec.partition = part_ptr->name;
@@ -2235,11 +2542,24 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 			error_code = ESLURM_ERROR_ON_DESC_TO_RECORD_COPY;
 		goto cleanup_fail;
 	}
+	if ((error_code=checkpoint_alloc_jobinfo(&((*job_pptr)->check_job)))) {
+		error("Failed to allocate checkpoint info for job");
+		goto cleanup_fail;
+	}
 
 	job_ptr = *job_pptr;
+	
 	job_ptr->assoc_id = assoc_rec.id;
 	job_ptr->assoc_ptr = (void *) assoc_ptr;
 
+	/* This must be done after we have the assoc_ptr set */
+	
+	/* already confirmed submit_uid==0 */
+	/* If the priority isn't given we will figure it out later
+	   after we see if the job is eligible or not. So we want
+	   NO_VAL if not set. */
+	job_ptr->priority = job_desc->priority;
+
 	if (update_job_dependency(job_ptr, job_desc->dependency)) {
 		error_code = ESLURM_DEPENDENCY;
 		goto cleanup_fail;
@@ -2249,6 +2569,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		goto cleanup_fail;
 	}
 
+	if ((error_code = validate_job_resv(job_ptr)))
+		goto cleanup_fail;
+
 	if (job_desc->script
 	    &&  (!will_run)) {	/* don't bother with copy if just a test */
 		if ((error_code = _copy_job_desc_to_file(job_desc,
@@ -2292,6 +2615,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		error_code = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE;
 		job_ptr->priority = 1;      /* Move to end of queue */
 		job_ptr->state_reason = fail_reason;
+		xfree(job_ptr->state_desc);
 	}
 
 cleanup:
@@ -2306,6 +2630,7 @@ cleanup_fail:
 		job_ptr->job_state = JOB_FAILED;
 		job_ptr->exit_code = 1;
 		job_ptr->state_reason = FAIL_SYSTEM;
+		xfree(job_ptr->state_desc);
 		job_ptr->start_time = job_ptr->end_time = time(NULL);
 	}
 	if (license_list)
@@ -2325,12 +2650,14 @@ static int _validate_job_create_req(job_desc_msg_t * job_desc)
 		     strlen(job_desc->account));
 		return ESLURM_PATHNAME_TOO_LONG;
 	}
-	if (job_desc->alloc_node && (strlen(job_desc->alloc_node) > MAX_STR_LEN)) {
+	if (job_desc->alloc_node && 
+	    (strlen(job_desc->alloc_node) > MAX_STR_LEN)) {
 		info("_validate_job_create_req: strlen(alloc_node) too big (%d)",
 		     strlen(job_desc->alloc_node));
 		return ESLURM_PATHNAME_TOO_LONG;
 	}
-	if (job_desc->blrtsimage && (strlen(job_desc->blrtsimage) > MAX_STR_LEN)) {
+	if (job_desc->blrtsimage && 
+	    (strlen(job_desc->blrtsimage) > MAX_STR_LEN)) {
 		info("_validate_job_create_req: strlen(blrtsimage) too big (%d)",
 		     strlen(job_desc->blrtsimage));
 		return ESLURM_PATHNAME_TOO_LONG;
@@ -2340,7 +2667,8 @@ static int _validate_job_create_req(job_desc_msg_t * job_desc)
 		     strlen(job_desc->comment));
 		return ESLURM_PATHNAME_TOO_LONG;
 	}
-	if (job_desc->dependency && (strlen(job_desc->dependency) > MAX_STR_LEN)) {
+	if (job_desc->dependency && 
+	    (strlen(job_desc->dependency) > MAX_STR_LEN)) {
 		info("_validate_job_create_req: strlen(dependency) too big (%d)",
 		     strlen(job_desc->dependency));
 		return ESLURM_PATHNAME_TOO_LONG;
@@ -2360,7 +2688,8 @@ static int _validate_job_create_req(job_desc_msg_t * job_desc)
 		     strlen(job_desc->in));
 		return ESLURM_PATHNAME_TOO_LONG;
 	}
-	if (job_desc->linuximage && (strlen(job_desc->linuximage) > MAX_STR_LEN)) {
+	if (job_desc->linuximage && 
+	    (strlen(job_desc->linuximage) > MAX_STR_LEN)) {
 		info("_validate_job_create_req: strlen(linuximage) too big (%d)",
 		     strlen(job_desc->linuximage));
 		return ESLURM_PATHNAME_TOO_LONG;
@@ -2375,7 +2704,8 @@ static int _validate_job_create_req(job_desc_msg_t * job_desc)
 		     strlen(job_desc->mail_user));
 		return ESLURM_PATHNAME_TOO_LONG;
 	}
-	if (job_desc->mloaderimage && (strlen(job_desc->mloaderimage) > MAX_STR_LEN)) {
+	if (job_desc->mloaderimage && 
+	    (strlen(job_desc->mloaderimage) > MAX_STR_LEN)) {
 		info("_validate_job_create_req: strlen(mloaderimage) too big (%d)",
 		     strlen(job_desc->features));
 		return ESLURM_PATHNAME_TOO_LONG;
@@ -2400,7 +2730,8 @@ static int _validate_job_create_req(job_desc_msg_t * job_desc)
 		     strlen(job_desc->partition));
 		return ESLURM_PATHNAME_TOO_LONG;
 	}
-	if (job_desc->ramdiskimage && (strlen(job_desc->ramdiskimage) > MAX_STR_LEN)) {
+	if (job_desc->ramdiskimage && 
+	    (strlen(job_desc->ramdiskimage) > MAX_STR_LEN)) {
 		info("_validate_job_create_req: strlen(ramdiskimage) too big (%d)",
 		     strlen(job_desc->ramdiskimage));
 		return ESLURM_PATHNAME_TOO_LONG;
@@ -2799,47 +3130,47 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	struct job_record *job_ptr;
 
         if(slurm_get_track_wckey()) {
-		char *wckey = NULL;
-		if(!job_desc->name || !strchr(job_desc->name, '\"')) {
+		if(!job_desc->wckey) {
 			/* get the default wckey for this user since none was
 			 * given */
 			acct_user_rec_t user_rec;
 			memset(&user_rec, 0, sizeof(acct_user_rec_t));
 			user_rec.uid = job_desc->user_id;
 			assoc_mgr_fill_in_user(acct_db_conn, &user_rec,
-					       accounting_enforce);
+					       accounting_enforce, NULL);
 			if(user_rec.default_wckey)
-				xstrfmtcat(job_desc->name, "\"*%s",
-					   user_rec.default_wckey);
+				job_desc->wckey = xstrdup_printf(
+					"*%s", user_rec.default_wckey);
 			else if(!(accounting_enforce 
 				  & ACCOUNTING_ENFORCE_WCKEYS))
-				xstrcat(job_desc->name, "\"*");	
+				job_desc->wckey = xstrdup("*");	
 			else {
 				error("Job didn't specify wckey and user "
 				      "%d has no default.", job_desc->user_id);
 				return ESLURM_INVALID_WCKEY;
-			}
-		} else if(job_desc->name 
-			  && (wckey = strchr(job_desc->name, '\"'))
-			  && (accounting_enforce & ACCOUNTING_ENFORCE_WCKEYS)) {
+			}		
+		} else if(job_desc->wckey) {
 			acct_wckey_rec_t wckey_rec, *wckey_ptr = NULL;
-			wckey++;
 				
 			memset(&wckey_rec, 0, sizeof(acct_wckey_rec_t));
 			wckey_rec.uid       = job_desc->user_id;
-			wckey_rec.name      = wckey;
+			wckey_rec.name      = job_desc->wckey;
 
 			if (assoc_mgr_fill_in_wckey(acct_db_conn, &wckey_rec,
 						    accounting_enforce,
 						    &wckey_ptr)) {
-				info("_job_create: invalid wckey '%s' "
-				     "for user %u.",
-				     wckey_rec.name, job_desc->user_id);
-				return ESLURM_INVALID_WCKEY;
+				if(accounting_enforce 
+				   & ACCOUNTING_ENFORCE_WCKEYS) {
+					info("_job_create: invalid wckey '%s' "
+					     "for user %u.",
+					     wckey_rec.name, job_desc->user_id);
+					return ESLURM_INVALID_WCKEY;
+				}
 			}
+			job_desc->wckey = xstrdup(job_desc->wckey);
 		} else if (accounting_enforce & ACCOUNTING_ENFORCE_WCKEYS) {
 			/* This should never happen */
-			info("_job_create: no wckey was given.");
+			info("_job_create: no wckey was given for job submit.");
 				return ESLURM_INVALID_WCKEY;
 		}
 	}
@@ -2850,13 +3181,16 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 
 	job_ptr->partition = xstrdup(part_ptr->name);
 	job_ptr->part_ptr = part_ptr;
+	
 	if (job_desc->job_id != NO_VAL)		/* already confirmed unique */
 		job_ptr->job_id = job_desc->job_id;
 	else
 		_set_job_id(job_ptr);
 
 	if (job_desc->name)
-		job_ptr->name = xstrdup(job_desc->name);	
+		job_ptr->name = xstrdup(job_desc->name);
+	if (job_desc->wckey)
+		job_ptr->wckey = xstrdup(job_desc->wckey);
 
 	_add_job_hash(job_ptr);
 
@@ -2868,6 +3202,7 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	job_ptr->alloc_node = xstrdup(job_desc->alloc_node);
 	job_ptr->account    = xstrdup(job_desc->account);
 	job_ptr->network    = xstrdup(job_desc->network);
+	job_ptr->resv_name  = xstrdup(job_desc->reservation);
 	job_ptr->comment    = xstrdup(job_desc->comment);
 	if (!wiki_sched_test) {
 		char *sched_type = slurm_get_sched_type();
@@ -2879,18 +3214,26 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	}
 	if (wiki_sched && job_ptr->comment &&
 	    strstr(job_ptr->comment, "QOS:")) {
+		acct_qos_rec_t qos_rec;
+
+		memset(&qos_rec, 0, sizeof(acct_qos_rec_t));
+
 		if (strstr(job_ptr->comment, "FLAGS:PREEMPTOR"))
-			job_ptr->qos = QOS_EXPEDITE;
+			qos_rec.name = "expedite";
 		else if (strstr(job_ptr->comment, "FLAGS:PREEMPTEE"))
-			job_ptr->qos = QOS_STANDBY;
+			qos_rec.name = "standby";
 		else
-			job_ptr->qos = QOS_NORMAL;
-	}
-	if (job_desc->priority != NO_VAL) /* already confirmed submit_uid==0 */
-		job_ptr->priority = job_desc->priority;
-	else {
-		_set_job_prio(job_ptr);
-		job_ptr->priority -= ((int)job_desc->nice - NICE_OFFSET);
+			qos_rec.name = "normal";
+		
+		if((assoc_mgr_fill_in_qos(acct_db_conn, &qos_rec,
+					  accounting_enforce,
+					  (acct_qos_rec_t **)&job_ptr->qos_ptr))
+		   != SLURM_SUCCESS) {
+			verbose("Invalid qos (%s) for job_id %u", 
+				qos_rec.name, job_ptr->job_id);
+			/* not a fatal error, qos could have been removed */
+		} else 
+			job_ptr->qos = qos_rec.id;
 	}
 
 	if (job_desc->kill_on_node_fail != (uint16_t) NO_VAL)
@@ -2907,12 +3250,15 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	job_ptr->mail_type = job_desc->mail_type;
 	job_ptr->mail_user = xstrdup(job_desc->mail_user);
 
+	job_ptr->ckpt_interval = job_desc->ckpt_interval;
+
 	detail_ptr = job_ptr->details;
 	detail_ptr->argc = job_desc->argc;
 	detail_ptr->argv = job_desc->argv;
 	job_desc->argv   = (char **) NULL; /* nothing left */
 	job_desc->argc   = 0;		   /* nothing left */
 	detail_ptr->acctg_freq = job_desc->acctg_freq;
+	detail_ptr->nice       = job_desc->nice;
 	detail_ptr->open_mode  = job_desc->open_mode;
 	detail_ptr->min_nodes  = job_desc->min_nodes;
 	detail_ptr->max_nodes  = job_desc->max_nodes;
@@ -2940,16 +3286,21 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 		detail_ptr->cpus_per_task = MAX(job_desc->cpus_per_task, 1);
 	else
 		detail_ptr->cpus_per_task = 1;
-	if (job_desc->ntasks_per_node != (uint16_t) NO_VAL)
+	if (job_desc->job_min_procs != (uint16_t) NO_VAL)
+		detail_ptr->job_min_procs = job_desc->job_min_procs;
+	if (job_desc->ntasks_per_node != (uint16_t) NO_VAL) {
 		detail_ptr->ntasks_per_node = job_desc->ntasks_per_node;
+		detail_ptr->job_min_procs = MAX(detail_ptr->job_min_procs,
+						(detail_ptr->cpus_per_task *
+						 detail_ptr->ntasks_per_node));
+	} else {
+		detail_ptr->job_min_procs = MAX(detail_ptr->job_min_procs,
+						detail_ptr->cpus_per_task);
+	}
 	if (job_desc->requeue != (uint16_t) NO_VAL)
 		detail_ptr->requeue = MIN(job_desc->requeue, 1);
 	else
 		detail_ptr->requeue = slurmctld_conf.job_requeue;
-	if (job_desc->job_min_procs != (uint16_t) NO_VAL)
-		detail_ptr->job_min_procs = job_desc->job_min_procs;
-	detail_ptr->job_min_procs = MAX(detail_ptr->job_min_procs,
-					detail_ptr->cpus_per_task);
 	if (job_desc->job_min_memory != NO_VAL)
 		detail_ptr->job_min_memory = job_desc->job_min_memory;
 	if (job_desc->job_min_tmp_disk != NO_VAL)
@@ -2970,6 +3321,16 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 		detail_ptr->begin_time = job_desc->begin_time;
 	job_ptr->select_jobinfo = 
 		select_g_copy_jobinfo(job_desc->select_jobinfo);
+
+	if (job_desc->ckpt_dir)
+		detail_ptr->ckpt_dir = xstrdup(job_desc->ckpt_dir);
+	else
+		detail_ptr->ckpt_dir = xstrdup(detail_ptr->work_dir);
+	
+	/* The priority needs to be set after this since we don't have
+	   an association rec yet
+	*/
+
 	detail_ptr->mc_ptr = _set_multi_core_data(job_desc);	
 	*job_rec_ptr = job_ptr;
 	return SLURM_SUCCESS;
@@ -3055,37 +3416,175 @@ void job_time_limit(void)
 	struct job_record *job_ptr;
 	time_t now = time(NULL);
 	time_t old = now - slurmctld_conf.inactive_limit;
+	time_t over_run;
+	int resv_status = 0;
+	uint64_t job_cpu_usage_mins = 0;
+	if (slurmctld_conf.over_time_limit == (uint16_t) INFINITE)
+		over_run = now - (365 * 24 * 60 * 60);	/* one year */
+	else
+		over_run = now - (slurmctld_conf.over_time_limit  * 60);
 
+	begin_job_resv_check();
 	job_iterator = list_iterator_create(job_list);
-	while ((job_ptr =
-		(struct job_record *) list_next(job_iterator))) {
+	while ((job_ptr =(struct job_record *) list_next(job_iterator))) {
+/* 		acct_qos_rec_t *qos = NULL; */
+		acct_association_rec_t *assoc =	NULL;
+
 		xassert (job_ptr->magic == JOB_MAGIC);
+
+		resv_status = job_resv_check(job_ptr);
 		if (job_ptr->job_state != JOB_RUNNING)
 			continue;
 
+/* 		qos = (acct_qos_rec_t *)job_ptr->qos_ptr; */
+		assoc =	(acct_association_rec_t *)job_ptr->assoc_ptr;
+
+		/* find out how many cpu minutes this job has been
+		   running for. */
+		job_cpu_usage_mins = (uint64_t)
+			((((now - job_ptr->start_time)
+			   - job_ptr->tot_sus_time) / 60) 
+			 * job_ptr->total_procs);
+
 		/* Consider a job active if it has any active steps */
-		if (job_ptr->step_list
-		    &&  (list_count(job_ptr->step_list) > 0))
+		if (job_ptr->step_list &&
+		    (list_count(job_ptr->step_list) > 0))
 			job_ptr->time_last_active = now;
 
-		if (slurmctld_conf.inactive_limit
-		    &&  (job_ptr->time_last_active <= old)
-		    &&  (job_ptr->part_ptr)
-		    &&  (job_ptr->part_ptr->root_only == 0)) {
+		if (slurmctld_conf.inactive_limit &&
+		    (job_ptr->time_last_active <= old) &&
+		    (job_ptr->part_ptr) &&
+		    (job_ptr->part_ptr->root_only == 0)) {
 			/* job inactive, kill it */
 			info("Inactivity time limit reached for JobId=%u",
 			     job_ptr->job_id);
 			_job_timed_out(job_ptr);
 			job_ptr->state_reason = FAIL_INACTIVE_LIMIT;
+			xfree(job_ptr->state_desc);
 			continue;
 		}
-		if ((job_ptr->time_limit != INFINITE)
-		    &&  (job_ptr->end_time <= now)) {
+		if ((job_ptr->time_limit != INFINITE) &&
+		    (job_ptr->end_time <= over_run)) {
 			last_job_update = now;
 			info("Time limit exhausted for JobId=%u",
 			     job_ptr->job_id);
 			_job_timed_out(job_ptr);
 			job_ptr->state_reason = FAIL_TIMEOUT;
+			xfree(job_ptr->state_desc);
+			continue;
+		}
+
+		if (resv_status != SLURM_SUCCESS) {
+			last_job_update = now;
+			info("Reservation ended for JobId=%u",
+			     job_ptr->job_id);
+			_job_timed_out(job_ptr);
+			job_ptr->state_reason = FAIL_TIMEOUT;
+			xfree(job_ptr->state_desc);
+			continue;
+		}
+
+		/* Too be added later once qos actually works.  The
+		 * idea here is for qos to trump what an association
+		 * has set for a limit, so if an association set of
+		 * wall 10 mins and the qos has 20 mins set and the
+		 * job has been running for 11 minutes it continues
+		 * until 20.
+		 */
+/* 		if(qos) { */
+/* 			slurm_mutex_lock(&assoc_mgr_qos_lock); */
+/* 			if ((qos->grp_cpu_mins != (uint64_t)NO_VAL) */
+/* 			    && (qos->grp_cpu_mins != (uint64_t)INFINITE) */
+/* 			    && ((uint64_t)qos->usage_raw  */
+/* 				>= qos->grp_cpu_mins)) { */
+/* 				last_job_update = now; */
+/* 				info("QOS %s group max cpu minutes is " */
+/* 				     "at or exceeds %llu with %Lf for JobId=%u", */
+/* 				     qos->name, qos->grp_cpu_mins, */
+/* 				     qos->usage_raw, job_ptr->job_id); */
+/* 				_job_timed_out(job_ptr); */
+/* 				job_ptr->state_reason = FAIL_TIMEOUT; */
+/* 			} */
+
+/* 			if ((qos->max_wall_pj != NO_VAL) */
+/* 			    && (qos->max_wall_pj != INFINITE) */
+/* 			    && (job_ptr-> >= qos->max_wall_pj)) { */
+/* 				last_job_update = now; */
+/* 				info("QOS %s group max cpu minutes is " */
+/* 				     "at or exceeds %llu with %Lf for JobId=%u", */
+/* 				     qos->name, qos->grp_cpu_mins, */
+/* 				     qos->usage_raw, job_ptr->job_id); */
+/* 				_job_timed_out(job_ptr); */
+/* 				job_ptr->state_reason = FAIL_TIMEOUT; */
+/* 			} */
+/* 			slurm_mutex_unlock(&assoc_mgr_qos_lock); */
+
+/* 			if(job_ptr->state_reason == FAIL_TIMEOUT) { */
+/* 				xfree(job_ptr->state_desc); */
+/* 				continue; */
+/* 			} */
+/* 		} */
+
+		/* handle any association stuff here */
+		slurm_mutex_lock(&assoc_mgr_association_lock);
+		while(assoc) {
+			uint64_t usage_mins =
+				(uint64_t)(assoc->usage_raw / 60.0);
+			uint32_t wall_mins = assoc->grp_used_wall / 60;
+			
+			if ((assoc->grp_cpu_mins != (uint64_t)NO_VAL)
+			    && (assoc->grp_cpu_mins != (uint64_t)INFINITE)
+			    && (usage_mins >= assoc->grp_cpu_mins)) {
+				info("Job %u timed out, "
+				     "assoc %u is at or exceeds "
+				     "group max cpu minutes limit %llu "
+				     "with %Lf for account %s",
+				     job_ptr->job_id, assoc->id,
+				     assoc->grp_cpu_mins, 
+				     usage_mins, assoc->acct);
+				job_ptr->state_reason = FAIL_TIMEOUT;
+				break;
+			}
+
+			if ((assoc->grp_wall != NO_VAL)
+			    && (assoc->grp_wall != INFINITE)
+			    && (wall_mins >= assoc->grp_wall)) {
+				info("Job %u timed out, "
+				     "assoc %u is at or exceeds "
+				     "group wall limit %u "
+				     "with %u for account %s",
+				     job_ptr->job_id, assoc->id,
+				     assoc->grp_wall, 
+				     wall_mins, assoc->acct);
+				job_ptr->state_reason = FAIL_TIMEOUT;
+				break;
+			}
+
+			if ((assoc->max_cpu_mins_pj != (uint64_t)NO_VAL)
+			    && (assoc->max_cpu_mins_pj != (uint64_t)INFINITE)
+			    && (job_cpu_usage_mins >= assoc->max_cpu_mins_pj)) {
+				info("Job %u timed out, "
+				     "assoc %u is at or exceeds "
+				     "max cpu minutes limit %llu "
+				     "with %Lf for account %s",
+				     job_ptr->job_id, assoc->id,
+				     assoc->max_cpu_mins_pj, 
+				     job_cpu_usage_mins, assoc->acct);
+				job_ptr->state_reason = FAIL_TIMEOUT;
+				break;
+			}
+
+			assoc = assoc->parent_assoc_ptr;
+			/* these limits don't apply to the root assoc */
+			if(assoc == assoc_mgr_root_assoc)
+				break;
+		}
+		slurm_mutex_unlock(&assoc_mgr_association_lock);
+		
+		if(job_ptr->state_reason == FAIL_TIMEOUT) {
+			last_job_update = now;
+			_job_timed_out(job_ptr);
+			xfree(job_ptr->state_desc);
 			continue;
 		}
 
@@ -3095,6 +3594,7 @@ void job_time_limit(void)
 	}
 
 	list_iterator_destroy(job_iterator);
+	fini_job_resv_check();
 }
 
 /* Terminate a job that has exhausted its time limit */
@@ -3170,7 +3670,7 @@ static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
 		}
 		dup_job_ptr = find_job_record((uint32_t) job_desc_msg->job_id);
 		if (dup_job_ptr && 
-		    (!(IS_JOB_FINISHED(dup_job_ptr)))) {
+		    (!(IS_JOB_COMPLETED(dup_job_ptr)))) {
 			info("attempt re-use active job_id %u", 
 			     job_desc_msg->job_id);
 			return ESLURM_DUPLICATE_JOB_ID;
@@ -3251,30 +3751,30 @@ static void _list_delete_job(void *job_entry)
 	*job_pptr = job_ptr->job_next;
 
 	delete_job_details(job_ptr);
-	xfree(job_ptr->alloc_node);
-	xfree(job_ptr->name);
-	xfree(job_ptr->nodes);
-	xfree(job_ptr->nodes_completing);
-	FREE_NULL_BITMAP(job_ptr->node_bitmap);
-	xfree(job_ptr->partition);
-	xfree(job_ptr->cpus_per_node);
-	xfree(job_ptr->cpu_count_reps);
-	xfree(job_ptr->node_addr);
 	xfree(job_ptr->account);
-	xfree(job_ptr->resp_host);
+	xfree(job_ptr->alloc_node);
+	xfree(job_ptr->comment);
 	xfree(job_ptr->licenses);
 	if (job_ptr->license_list)
 		list_destroy(job_ptr->license_list);
 	xfree(job_ptr->mail_user);
+	xfree(job_ptr->name);
 	xfree(job_ptr->network);
-	xfree(job_ptr->alloc_lps);
-	xfree(job_ptr->used_lps);
-	xfree(job_ptr->comment);
+	xfree(job_ptr->node_addr);
+	FREE_NULL_BITMAP(job_ptr->node_bitmap);
+	xfree(job_ptr->nodes);
+	xfree(job_ptr->nodes_completing);
+	xfree(job_ptr->partition);
+	xfree(job_ptr->resp_host);
+	xfree(job_ptr->resv_name);
+	free_select_job_res(&job_ptr->select_job);
 	select_g_free_jobinfo(&job_ptr->select_jobinfo);
+	xfree(job_ptr->state_desc);
 	if (job_ptr->step_list) {
 		delete_step_records(job_ptr, 0);
 		list_destroy(job_ptr->step_list);
 	}
+	xfree(job_ptr->wckey);
 	job_count--;
 	xfree(job_ptr);
 }
@@ -3452,15 +3952,16 @@ extern int pack_one_job(char **buffer_ptr, int *buffer_size,
 void pack_job(struct job_record *dump_job_ptr, Buf buffer)
 {
 	struct job_details *detail_ptr;
-	uint32_t size_tmp;
 
+	pack32(dump_job_ptr->assoc_id, buffer);
 	pack32(dump_job_ptr->job_id, buffer);
 	pack32(dump_job_ptr->user_id, buffer);
 	pack32(dump_job_ptr->group_id, buffer);
 
-	pack16(dump_job_ptr->job_state, buffer);
-	pack16(dump_job_ptr->batch_flag, buffer);
+	pack16(dump_job_ptr->job_state,    buffer);
+	pack16(dump_job_ptr->batch_flag,   buffer);
 	pack16(dump_job_ptr->state_reason, buffer);
+	pack16(dump_job_ptr->restart_cnt,  buffer);
 
 	pack32(dump_job_ptr->alloc_sid, buffer);
 	if ((dump_job_ptr->time_limit == NO_VAL) && dump_job_ptr->part_ptr)
@@ -3487,18 +3988,23 @@ void pack_job(struct job_record *dump_job_ptr, Buf buffer)
 	packstr(dump_job_ptr->network, buffer);
 	packstr(dump_job_ptr->comment, buffer);
 	packstr(dump_job_ptr->licenses, buffer);
+	packstr(dump_job_ptr->state_desc, buffer);
+	packstr(dump_job_ptr->resv_name, buffer);
 
 	pack32(dump_job_ptr->exit_code, buffer);
 
-	pack16(dump_job_ptr->num_cpu_groups, buffer);
-	size_tmp = dump_job_ptr->num_cpu_groups;
-	if (size_tmp < 0) {
-	    	size_tmp = 0;
-	}
-	pack32_array(dump_job_ptr->cpus_per_node, size_tmp, buffer);
-	pack32_array(dump_job_ptr->cpu_count_reps, size_tmp, buffer);
+	if (dump_job_ptr->select_job && 
+	    dump_job_ptr->select_job->cpu_array_cnt) {
+		pack32(dump_job_ptr->select_job->cpu_array_cnt, buffer);
+		pack16_array(dump_job_ptr->select_job->cpu_array_value,
+			     dump_job_ptr->select_job->cpu_array_cnt, buffer);
+		pack32_array(dump_job_ptr->select_job->cpu_array_reps,
+			     dump_job_ptr->select_job->cpu_array_cnt, buffer);
+	} else
+		pack32((uint32_t) 0, buffer);
 
 	packstr(dump_job_ptr->name, buffer);
+	packstr(dump_job_ptr->wckey, buffer);
 	packstr(dump_job_ptr->alloc_node, buffer);
 	pack_bit_fmt(dump_job_ptr->node_bitmap, buffer);
 	pack32(dump_job_ptr->num_procs, buffer);
@@ -3639,9 +4145,27 @@ void reset_job_bitmaps(void)
 	struct part_record *part_ptr;
 	bool job_fail = false;
 	time_t now = time(NULL);
+	static uint32_t cr_flag = NO_VAL, gang_flag = NO_VAL;
 
 	xassert(job_list);
 
+	if (cr_flag == NO_VAL) {
+		cr_flag = 0;  /* call is no-op for select/linear and bluegene */
+		if (select_g_get_info_from_plugin(SELECT_CR_PLUGIN,
+						  NULL, &cr_flag)) {
+			cr_flag = NO_VAL;	/* error */
+		}
+			
+	}
+	if (gang_flag == NO_VAL) {
+		char *sched_type = slurm_get_sched_type();
+		if (strcmp(sched_type, "sched/gang"))
+			gang_flag = 0;
+		else
+			gang_flag = 1;
+		xfree(sched_type);
+	}
+
 	job_iterator = list_iterator_create(job_list);
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		xassert (job_ptr->magic == JOB_MAGIC);
@@ -3677,10 +4201,20 @@ void reset_job_bitmaps(void)
 		    	      job_ptr->nodes, job_ptr->job_id);
 			job_fail = true;
 		}
+		reset_node_bitmap(job_ptr->select_job,
+				  job_ptr->node_bitmap);
+		if (!job_fail && !IS_JOB_FINISHED(job_ptr) && 
+		    job_ptr->select_job && (cr_flag || gang_flag) && 
+		    valid_select_job_res(job_ptr->select_job, 
+					 node_record_table_ptr, 
+					 slurmctld_conf.fast_schedule)) {
+			error("Aborting JobID %u due to change in socket/core "
+			      "configuration of allocated nodes",
+			      job_ptr->job_id);
+			job_fail = true;
+		}
 		_reset_step_bitmaps(job_ptr);
-		build_node_details(job_ptr);	/* set: num_cpu_groups, 
-						 * cpu_count_reps, node_cnt, 
-						 * cpus_per_node, node_addr */
+		build_node_details(job_ptr);	/* set node_addr */
 
 		if (_reset_detail_bitmaps(job_ptr))
 			job_fail = true;
@@ -3711,6 +4245,7 @@ void reset_job_bitmaps(void)
 			}
 			job_ptr->exit_code = MAX(job_ptr->exit_code, 1);
 			job_ptr->state_reason = FAIL_DOWN_NODE;
+			xfree(job_ptr->state_desc);
 			job_completion_logger(job_ptr);
 		}
 	}
@@ -3848,10 +4383,16 @@ static void _set_job_prio(struct job_record *job_ptr)
 {
 	xassert(job_ptr);
 	xassert (job_ptr->magic == JOB_MAGIC);
+	if (IS_JOB_FINISHED(job_ptr))
+		return;
 	job_ptr->priority = slurm_sched_initial_priority(maximum_prio,
 							 job_ptr);
-	if (job_ptr->priority > 0)
-		maximum_prio = MIN(job_ptr->priority, maximum_prio);
+	if ((job_ptr->priority <= 1) || 
+	    (job_ptr->direct_set_prio) ||
+	    (job_ptr->details && (job_ptr->details->nice != NICE_OFFSET)))
+		return;
+
+	maximum_prio = MIN(job_ptr->priority, maximum_prio);
 }
 
 
@@ -3865,7 +4406,7 @@ void reset_job_priority(void)
 
 	job_iterator = list_iterator_create(job_list);
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
-		if (job_ptr->priority == 1) {
+		if ((job_ptr->priority == 1) && (!IS_JOB_FINISHED(job_ptr))) {
 			_set_job_prio(job_ptr);
 			count++;
 		}
@@ -3883,15 +4424,24 @@ void reset_job_priority(void)
  */
 static bool _top_priority(struct job_record *job_ptr)
 {
-#ifdef HAVE_BG
-	/* On BlueGene, all jobs run ASAP. 
-	 * Priority only matters within a specific job size. */
-	return true;
-
-#else
 	struct job_details *detail_ptr = job_ptr->details;
 	bool top;
 
+#ifdef HAVE_BG
+	uint16_t static_part = 0;
+	int rc;
+
+	/* On BlueGene with static partitioning, we don't want to delay
+	 * jobs based upon priority since jobs of different sizes can 
+	 * execute on different sets of nodes. While sched/backfill would
+	 * eventually start the job if delayed here based upon priority,
+	 * that could delay the initiation of a job by a few seconds. */
+	rc = select_g_get_info_from_plugin(SELECT_STATIC_PART, job_ptr, 
+					   &static_part);
+	if ((rc == SLURM_SUCCESS) && (static_part == 1))
+		return true;
+#endif
+
 	if (job_ptr->priority == 0)	/* user held */
 		top = false;
 	else {
@@ -3908,6 +4458,18 @@ static bool _top_priority(struct job_record *job_ptr)
 				continue;
 			if (!job_independent(job_ptr2))
 				continue;
+			if ((job_ptr2->resv_name && (!job_ptr->resv_name)) ||
+			    ((!job_ptr2->resv_name) && job_ptr->resv_name))
+				continue;	/* different reservation */
+			if (job_ptr2->resv_name && job_ptr->resv_name &&
+			    (!strcmp(job_ptr2->resv_name, 
+				     job_ptr->resv_name))) {
+				/* same reservation */
+				if (job_ptr2->priority <= job_ptr->priority)
+					continue;
+				top = false;
+				break;
+			}
 			if (job_ptr2->part_ptr == job_ptr->part_ptr) {
 				/* same partition */
 				if (job_ptr2->priority <= job_ptr->priority)
@@ -3931,13 +4493,15 @@ static bool _top_priority(struct job_record *job_ptr)
 	}
 
 	if ((!top) && detail_ptr) {	/* not top prio */
-		if (job_ptr->priority == 0)		/* user/admin hold */
+		if (job_ptr->priority == 0) {		/* user/admin hold */
 			job_ptr->state_reason = WAIT_HELD;
-		else if (job_ptr->priority != 1)	/* not system hold */
+			xfree(job_ptr->state_desc);
+		} else if (job_ptr->priority != 1) {	/* not system hold */
 			job_ptr->state_reason = WAIT_PRIORITY;
+			xfree(job_ptr->state_desc);
+		}
 	}
 	return top;
-#endif
 }
 
 
@@ -4023,27 +4587,50 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 			info("update_job: setting time_limit to %u for "
 			     "job_id %u", job_specs->time_limit, 
 			     job_specs->job_id);
+			update_accounting = true;
+		} else if (IS_JOB_PENDING(job_ptr) && job_ptr->part_ptr &&
+			   (job_ptr->part_ptr->max_time >= 
+			    job_specs->time_limit)) {
+			job_ptr->time_limit = job_specs->time_limit;
+			info("update_job: setting time_limit to %u for "
+			     "job_id %u", job_specs->time_limit, 
+			     job_specs->job_id);
+			update_accounting = true;
 		} else {
-			error("Attempt to increase time limit for job %u",
-			      job_specs->job_id);
+			info("Attempt to increase time limit for job %u",
+			     job_specs->job_id);
 			error_code = ESLURM_ACCESS_DENIED;
 		}
 	}
 
+	if (job_specs->reservation) {
+		if (!IS_JOB_PENDING(job_ptr)) {
+			error_code = ESLURM_DISABLED;
+		} else {
+			int rc;
+			char *save_resv_name = job_ptr->resv_name;
+			job_ptr->resv_name = job_specs->reservation;
+			rc = validate_job_resv(job_ptr);
+			if (rc == SLURM_SUCCESS) {
+				info("update_job: setting reservation to %s "
+				     "for job_id %u", job_ptr->resv_name, 
+				     job_ptr->job_id);
+				xfree(save_resv_name);
+				job_specs->reservation = NULL;	/* Noth free */
+				update_accounting = true;
+			} else {
+				/* Restore reservation info */
+				job_ptr->resv_name = save_resv_name;
+				error_code = rc;
+			}
+		}
+	}
+
 	if (job_specs->comment && wiki_sched && (!super_user)) {
 		/* User must use Moab command to change job comment */
 		error("Attempt to change comment for job %u",
 		      job_specs->job_id);
 		error_code = ESLURM_ACCESS_DENIED;
-#if 0
-		if (wiki_sched && strstr(job_ptr->comment, "QOS:")) {
-			if (strstr(job_ptr->comment, "FLAGS:PREEMPTOR"))
-				job_ptr->qos = QOS_EXPEDITE;
-			else if (strstr(job_ptr->comment, "FLAGS:PREEMPTEE"))
-				job_ptr->qos = QOS_STANDBY;
-			else
-				job_ptr->qos = QOS_NORMAL;
-#endif
 	} else if (job_specs->comment) {
 		xfree(job_ptr->comment);
 		job_ptr->comment = job_specs->comment;
@@ -4052,12 +4639,28 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		     job_ptr->comment, job_specs->job_id);
 
 		if (wiki_sched && strstr(job_ptr->comment, "QOS:")) {
+			acct_qos_rec_t qos_rec;
+
+			memset(&qos_rec, 0, sizeof(acct_qos_rec_t));
+
 			if (strstr(job_ptr->comment, "FLAGS:PREEMPTOR"))
-				job_ptr->qos = QOS_EXPEDITE;
+				qos_rec.name = "expedite";
 			else if (strstr(job_ptr->comment, "FLAGS:PREEMPTEE"))
-				job_ptr->qos = QOS_STANDBY;
+				qos_rec.name = "standby";
 			else
-				job_ptr->qos = QOS_NORMAL;
+				qos_rec.name = "normal";
+			
+			if((assoc_mgr_fill_in_qos(acct_db_conn, &qos_rec,
+						  accounting_enforce,
+						  (acct_qos_rec_t **)
+						  &job_ptr->qos_ptr))
+			   != SLURM_SUCCESS) {
+				verbose("Invalid qos (%s) for job_id %u",
+					qos_rec.name, job_ptr->job_id);
+				/* not a fatal error, qos could have
+				 * been removed */
+			} else 
+				job_ptr->qos = qos_rec.id;
 		}
 	}
 
@@ -4068,11 +4671,21 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	}
 
 	if (job_specs->priority != NO_VAL) {
-		if (!IS_JOB_PENDING(job_ptr) || (detail_ptr == NULL)) 
+		/* If we are doing time slicing we could update the
+		   priority of the job while running to give better
+		   position (larger time slices) than competing jobs
+		*/
+		if (IS_JOB_FINISHED(job_ptr) || (detail_ptr == NULL)) 
 			error_code = ESLURM_DISABLED;
 		else if (super_user
 			 ||  (job_ptr->priority > job_specs->priority)) {
-			job_ptr->priority = job_specs->priority;
+			if(job_specs->priority == INFINITE) {
+				job_ptr->direct_set_prio = 0;
+				_set_job_prio(job_ptr);
+			} else {
+				job_ptr->direct_set_prio = 1;
+				job_ptr->priority = job_specs->priority;
+			}
 			info("update_job: setting priority to %u for "
 			     "job_id %u", job_ptr->priority, 
 			     job_specs->job_id);
@@ -4085,11 +4698,12 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 	}
 
 	if (job_specs->nice != NICE_OFFSET) {
-		if (!IS_JOB_PENDING(job_ptr)) 
+		if (IS_JOB_FINISHED(job_ptr)) 
 			error_code = ESLURM_DISABLED;
 		else if (super_user || (job_specs->nice < NICE_OFFSET)) {
-			job_ptr->priority -= ((int)job_specs->nice - 
-					      NICE_OFFSET);
+			job_ptr->details->nice = job_specs->nice;
+			_set_job_prio(job_ptr);
+			
 			info("update_job: setting priority to %u for "
 			     "job_id %u", job_ptr->priority,
 			     job_specs->job_id);
@@ -4356,58 +4970,8 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		if (!IS_JOB_PENDING(job_ptr))
 			error_code = ESLURM_DISABLED;
 		else {
-			char *jname = NULL, *wckey = NULL;
-			char *jname_new = NULL;
-			char *temp = NULL;
-			
-			/* first set the jname to the job_ptr->name */
-			jname = xstrdup(job_ptr->name);
-			/* then grep for " since that is the delimiter for
-			   the wckey */
-			temp = strchr(jname, '\"');
-			if(temp) {
-				/* if we have a wckey set the " to NULL to
-				 * end the jname */
-				temp[0] = '\0';
-				/* increment and copy the remainder */
-				temp++;
-				wckey = xstrdup(temp);
-			}
-			
-			/* first set the jname to the job_specs->name */
-			jname_new = xstrdup(job_specs->name);
-			/* then grep for " since that is the delimiter for
-			   the wckey */
-			temp = strchr(jname_new, '\"');
-			if(temp) {
-				/* if we have a wckey set the " to NULL to
-				 * end the jname */
-				temp[0] = '\0';
-				/* increment and copy the remainder */
-				temp++;
-				xfree(wckey);
-				wckey = xstrdup(temp);
-			}
-			
-			if(jname_new && jname_new[0]) {
-				xfree(jname);
-				jname = jname_new;
-			}
-			
-			xfree(job_ptr->name);		
-			if(jname) {
-				xstrfmtcat(job_ptr->name, "%s", jname);
-				xfree(jname);
-			} 
-
-			if(wckey) {
-				int rc = update_job_wckey("update_job",
-							  job_ptr, 
-							  wckey);
-				if (rc != SLURM_SUCCESS)
-					error_code = rc;
-				xfree(wckey);			
-			}
+			job_ptr->name = job_specs->name;
+			job_specs->name = NULL;
 
 			info("update_job: setting name to %s for job_id %u",
 			     job_ptr->name, job_specs->job_id);
@@ -4415,6 +4979,21 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 		}
 	}
 
+	if (job_specs->wckey) {
+		if (!IS_JOB_PENDING(job_ptr))
+			error_code = ESLURM_DISABLED;
+		else {
+			int rc = update_job_wckey("update_job",
+						  job_ptr, 
+						  job_specs->wckey);
+			if (rc != SLURM_SUCCESS)
+				error_code = rc;
+			else 
+				update_accounting = true;
+		}
+	}
+
+
 	if (job_specs->account) {
 		if (!IS_JOB_PENDING(job_ptr))
 			error_code = ESLURM_DISABLED;
@@ -4425,7 +5004,6 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 				error_code = rc;
 			else
 				update_accounting = true;
-
 		}
 	}
 
@@ -4451,9 +5029,9 @@ int update_job(job_desc_msg_t * job_specs, uid_t uid)
 				error_code = ESLURM_INVALID_ACCOUNT;
 				/* Let update proceed. Note there is an invalid
 				 * association ID for accounting purposes */
-			} else {
+			} else 
 				job_ptr->assoc_id = assoc_rec.id;
-			}
+
 			xfree(job_ptr->partition);
 			job_ptr->partition = xstrdup(job_specs->partition);
 			job_ptr->part_ptr = tmp_part_ptr;
@@ -4760,6 +5338,7 @@ extern void validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg)
 	int i, node_inx, jobs_on_node;
 	struct node_record *node_ptr;
 	struct job_record *job_ptr;
+	struct step_record *step_ptr;
 	time_t now = time(NULL);
 
 	node_ptr = find_node_record(reg_msg->node_name);
@@ -4785,7 +5364,8 @@ extern void validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg)
 			error("Orphan job %u.%u reported on node %s",
 				reg_msg->job_id[i], reg_msg->step_id[i], 
 				reg_msg->node_name);
-			abort_job_on_node(reg_msg->job_id[i], job_ptr, node_ptr);
+			abort_job_on_node(reg_msg->job_id[i], 
+					  job_ptr, node_ptr);
 		}
 
 		else if ((job_ptr->job_state == JOB_RUNNING) ||
@@ -4801,6 +5381,11 @@ extern void validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg)
 					 * batch jobs */
 					job_ptr->time_last_active = now;
 				}
+				step_ptr = find_step_record(job_ptr, 
+							    reg_msg->
+							    step_id[i]);
+				if (step_ptr)
+					step_ptr->time_last_active = now;
 			} else {
 				/* Typically indicates a job requeue and
 				 * restart on another nodes. A node from the
@@ -4826,8 +5411,8 @@ extern void validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg)
 			error("Registered PENDING job %u.%u on node %s ",
 				reg_msg->job_id[i], reg_msg->step_id[i], 
 				reg_msg->node_name);
-			abort_job_on_node(reg_msg->job_id[i], job_ptr, 
-					  node_ptr);
+			abort_job_on_node(reg_msg->job_id[i], 
+					  job_ptr, node_ptr);
 		}
 
 		else {		/* else job is supposed to be done */
@@ -4857,24 +5442,33 @@ extern void validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg)
 }
 
 /* Purge any batch job that should have its script running on node 
- * node_inx, but is not (i.e. its time_last_active != now) */
+ * node_inx, but is not. Allow "batch_start_timeout" secs for startup. 
+ *
+ * Also notify srun if any job steps should be active on this node
+ * but are not found. */
 static void _purge_lost_batch_jobs(int node_inx, time_t now)
 {
 	ListIterator job_iterator;
 	struct job_record *job_ptr;
-	time_t recent = now - slurm_get_batch_start_timeout();
+	uint16_t batch_start_timeout = slurm_get_batch_start_timeout() +
+				       slurm_get_resume_timeout();
+	time_t recent = now - batch_start_timeout;
 
 	job_iterator = list_iterator_create(job_list);
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		bool job_active = ((job_ptr->job_state == JOB_RUNNING) ||
 				   (job_ptr->job_state == JOB_SUSPENDED));
-		if ((!job_active)                       ||
-		    (job_ptr->batch_flag == 0)          ||
-		    (job_ptr->time_last_active == now)  ||
+		if ((!job_active) ||
+		    (!bit_test(job_ptr->node_bitmap, node_inx)))
+			continue;
+		if (job_ptr->batch_flag == 0) {
+			_notify_srun_missing_step(job_ptr, node_inx, now);
+			continue;
+		}
+		if (((job_ptr->time_last_active+batch_start_timeout) > now) ||
 		    (job_ptr->start_time >= recent)     ||
 		    (node_inx != bit_ffs(job_ptr->node_bitmap)))
 			continue;
-
 		info("Batch JobId=%u missing from master node, killing it", 
 			job_ptr->job_id);
 		job_complete(job_ptr->job_id, 0, false, NO_VAL);
@@ -4882,6 +5476,30 @@ static void _purge_lost_batch_jobs(int node_inx, time_t now)
 	list_iterator_destroy(job_iterator);
 }
 
+static void _notify_srun_missing_step(struct job_record *job_ptr, int node_inx, 
+				      time_t now)
+{
+	ListIterator step_iterator;
+	struct step_record *step_ptr;
+	char *node_name = node_record_table_ptr[node_inx].name;
+
+	xassert(job_ptr);
+	step_iterator = list_iterator_create (job_ptr->step_list);
+	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
+		if (step_ptr->time_last_active >= now) {
+			/* Back up timer in case more than one node 
+			 * registration happens at this same time.
+			 * We don't want this node's registration
+			 * to count toward a different node's 
+			 * registration message. */
+			step_ptr->time_last_active = now - 1;
+		} else if (step_ptr->host && step_ptr->port) {
+			srun_step_missing(step_ptr, node_name);
+		}
+	}		
+	list_iterator_destroy (step_iterator);
+}
+
 /*
  * abort_job_on_node - Kill the specific job_id on a specific node,
  *	the request is not processed immediately, but queued. 
@@ -5066,6 +5684,7 @@ static void _validate_job_files(List batch_dirs)
 			job_ptr->job_state = JOB_FAILED;
 			job_ptr->exit_code = 1;
 			job_ptr->state_reason = FAIL_SYSTEM;
+			xfree(job_ptr->state_desc);
 			job_ptr->start_time = job_ptr->end_time = time(NULL);
 			job_completion_logger(job_ptr);
 		}
@@ -5224,6 +5843,10 @@ extern bool job_epilog_complete(uint32_t job_id, char *node_name,
 				 * named socket purged, so delay for at 
 				 * least ten seconds. */
 				job_ptr->details->begin_time = time(NULL) + 10;
+				job_ptr->start_time = job_ptr->end_time = 0;
+				jobacct_storage_g_job_start(
+					acct_db_conn, slurmctld_cluster_name,
+					job_ptr);
 			}
 		}
 		return true;
@@ -5317,19 +5940,29 @@ extern bool job_independent(struct job_record *job_ptr)
 
 	if (detail_ptr && (detail_ptr->begin_time > now)) {
 		job_ptr->state_reason = WAIT_TIME;
+		xfree(job_ptr->state_desc);
+		return false;	/* not yet time */
+	}
+
+	if (job_test_resv_now(job_ptr) != SLURM_SUCCESS) {
+		job_ptr->state_reason = WAIT_RESERVATION;
+		xfree(job_ptr->state_desc);
 		return false;	/* not yet time */
 	}
 
 	rc = test_job_dependency(job_ptr);
 	if (rc == 0) {
 		bool send_acct_rec = false;
-		if (job_ptr->state_reason == WAIT_DEPENDENCY)
+		if (job_ptr->state_reason == WAIT_DEPENDENCY) {
 			job_ptr->state_reason = WAIT_NO_REASON;
+			xfree(job_ptr->state_desc);
+		}
 		if (detail_ptr && (detail_ptr->begin_time == 0)) {
 			detail_ptr->begin_time = now;
 			send_acct_rec = true;
 		} else if (job_ptr->state_reason == WAIT_TIME) {
 			job_ptr->state_reason = WAIT_NO_REASON;
+			xfree(job_ptr->state_desc);
 			send_acct_rec = true;
 		}
 		if (send_acct_rec) {
@@ -5343,12 +5976,14 @@ extern bool job_independent(struct job_record *job_ptr)
 		return true;
 	} else if (rc == 1) {
 		job_ptr->state_reason = WAIT_DEPENDENCY;
+		xfree(job_ptr->state_desc);
 		return false;
 	} else {	/* rc == 2 */
 		time_t now = time(NULL);
 		info("Job dependency can't be satisfied, cancelling job %u",
 			job_ptr->job_id);
 		job_ptr->job_state	= JOB_CANCELLED;
+		xfree(job_ptr->state_desc);
 		job_ptr->start_time	= now;
 		job_ptr->end_time	= now;
 		job_completion_logger(job_ptr);
@@ -5779,7 +6414,9 @@ extern int job_requeue (uid_t uid, uint32_t job_id, slurm_fd conn_fd)
 	job_ptr->job_state = JOB_PENDING;
 	if (job_ptr->node_cnt)
 		job_ptr->job_state |= JOB_COMPLETING;
+	
 	job_ptr->details->submit_time = now;
+	job_ptr->restart_cnt++;
 	/* Since the job completion logger removes the submit we need
 	   to add it again.
 	*/
@@ -5901,7 +6538,7 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc,
 		     >= assoc_ptr->grp_submit_jobs)) {
 			info("job submit for user %s(%u): "
 			     "group max submit job limit exceded %u "
-			     "for account %s",
+			     "for account '%s'",
 			     user_name,
 			     job_desc->user_id, 
 			     assoc_ptr->grp_submit_jobs,
@@ -5909,31 +6546,10 @@ static bool _validate_acct_policy(job_desc_msg_t *job_desc,
 			return false;
 		}
 
-		if ((assoc_ptr->grp_wall != NO_VAL) &&
-		    (assoc_ptr->grp_wall != INFINITE)) {
-			time_limit = assoc_ptr->grp_wall;
-			if (job_desc->time_limit == NO_VAL) {
-				if (part_ptr->max_time == INFINITE)
-					job_desc->time_limit = time_limit;
-				else 
-					job_desc->time_limit =
-						MIN(time_limit, 
-						    part_ptr->max_time);
-				timelimit_set = 1;
-			} else if (timelimit_set && 
-				   job_desc->time_limit > time_limit) {
-				job_desc->time_limit = time_limit;
-			} else if (job_desc->time_limit > time_limit) {
-				info("job submit for user %s(%u): "
-				     "time limit %u exceeds group "
-				     "time limit %u for account %s",
-				     user_name,
-				     job_desc->user_id, 
-				     job_desc->time_limit, time_limit,
-				     assoc_ptr->acct);
-				return false;
-			}
-		}
+		
+		/* for validation we don't need to look at 
+		 * assoc_ptr->grp_wall. It is checked while the job is running.
+		 */
 		
 		/* We don't need to look at the regular limits for
 		 * parents since we have pre-propogated them, so just
@@ -6067,6 +6683,7 @@ extern int job_cancel_by_assoc_id(uint32_t assoc_id)
 		   locked before this. */
 		job_signal(job_ptr->job_id, SIGKILL, 0, 0);
 		job_ptr->state_reason = FAIL_BANK_ACCOUNT;
+		xfree(job_ptr->state_desc);
 		cnt++;
 	}
 	list_iterator_destroy(job_iterator);
@@ -6083,7 +6700,7 @@ extern int job_cancel_by_assoc_id(uint32_t assoc_id)
 extern int update_job_account(char *module, struct job_record *job_ptr, 
 			      char *new_account)
 {
-	acct_association_rec_t assoc_rec, *assoc_ptr;
+	acct_association_rec_t assoc_rec;
 
 	if ((!IS_JOB_PENDING(job_ptr)) || (job_ptr->details == NULL)) {
 		info("%s: attempt to modify account for non-pending "
@@ -6097,12 +6714,14 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 	assoc_rec.partition = job_ptr->partition;
 	assoc_rec.acct      = new_account;
 	if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
-				    accounting_enforce, &assoc_ptr)) {
+				    accounting_enforce,
+				    (acct_association_rec_t **)
+				    &job_ptr->assoc_ptr)) {
 		info("%s: invalid account %s for job_id %u",
 		     module, new_account, job_ptr->job_id);
 		return ESLURM_INVALID_ACCOUNT;
 	} else if(association_based_accounting 
-		  && !assoc_ptr 
+		  && !job_ptr->assoc_ptr 
 		  && !(accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)) {
 		/* if not enforcing associations we want to look for
 		   the default account and use it to avoid getting
@@ -6110,8 +6729,10 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 		*/
 		assoc_rec.acct = NULL;
 		assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
-					accounting_enforce, &assoc_ptr);
-		if(!assoc_ptr) {
+					accounting_enforce, 
+					(acct_association_rec_t **)
+					&job_ptr->assoc_ptr);
+		if(!job_ptr->assoc_ptr) {
 			debug("%s: we didn't have an association for account "
 			      "'%s' and user '%u', and we can't seem to find "
 			      "a default one either.  Keeping new account "
@@ -6135,7 +6756,6 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 		     module, job_ptr->job_id);
 	}
 	job_ptr->assoc_id = assoc_rec.id;
-	job_ptr->assoc_ptr = (void *) assoc_ptr;
 
 	last_job_update = time(NULL);
 
@@ -6262,3 +6882,544 @@ extern int send_jobs_to_accounting()
 
 	return SLURM_SUCCESS;
 }
+
+/* Perform checkpoint operation on a job */
+extern int job_checkpoint(checkpoint_msg_t *ckpt_ptr, uid_t uid, 
+			  slurm_fd conn_fd)
+{
+	int rc = SLURM_SUCCESS;
+	struct job_record *job_ptr;
+	struct step_record *step_ptr;
+	checkpoint_resp_msg_t resp_data;
+	slurm_msg_t resp_msg;
+
+	slurm_msg_t_init(&resp_msg);
+	
+	/* find the job */
+	job_ptr = find_job_record (ckpt_ptr->job_id);
+	if (job_ptr == NULL) {
+		rc = ESLURM_INVALID_JOB_ID;
+		goto reply;
+	}
+	if ((uid != job_ptr->user_id) && ! validate_super_user(uid)) {
+		rc = ESLURM_ACCESS_DENIED ;
+		goto reply;
+	}
+	if (job_ptr->job_state == JOB_PENDING) {
+		rc = ESLURM_JOB_PENDING;
+		goto reply;
+	} else if (job_ptr->job_state == JOB_SUSPENDED) {
+		/* job can't get cycles for checkpoint 
+		 * if it is already suspended */
+		rc = ESLURM_DISABLED;
+		goto reply;
+	} else if (job_ptr->job_state != JOB_RUNNING) {
+		rc = ESLURM_ALREADY_DONE;
+		goto reply;
+	}
+
+	memset((void *)&resp_data, 0, sizeof(checkpoint_resp_msg_t));
+
+	if (job_ptr->batch_flag) { /* operate on batch job */
+		if ((ckpt_ptr->op == CHECK_CREATE) ||
+		    (ckpt_ptr->op == CHECK_VACATE)) {
+			if (job_ptr->details == NULL) {
+				rc = ESLURM_DISABLED;
+				goto reply;
+			}
+			if (ckpt_ptr->image_dir == NULL) {
+				if (job_ptr->details->ckpt_dir == NULL) {
+					rc = ESLURM_DISABLED;
+					goto reply;
+				}
+				ckpt_ptr->image_dir = xstrdup(job_ptr->details
+							      ->ckpt_dir);
+			}
+
+			rc = _checkpoint_job_record(job_ptr, 
+						    ckpt_ptr->image_dir);
+			if (rc != SLURM_SUCCESS)
+				goto reply;
+		}
+		/* append job id to ckpt image dir */
+		xstrfmtcat(ckpt_ptr->image_dir, "/%u", job_ptr->job_id);
+		rc = checkpoint_op(ckpt_ptr->job_id, ckpt_ptr->step_id, NULL,
+				   ckpt_ptr->op, ckpt_ptr->data,
+				   ckpt_ptr->image_dir, &resp_data.event_time, 
+				   &resp_data.error_code, &resp_data.error_msg);
+		info("checkpoint_op %u of %u.%u complete, rc=%d",
+		     ckpt_ptr->op, ckpt_ptr->job_id, ckpt_ptr->step_id, rc);
+		last_job_update = time(NULL);
+	} else {		/* operate on all of a job's steps */
+		int update_rc = -2;
+		ListIterator step_iterator;
+		
+		step_iterator = list_iterator_create (job_ptr->step_list);
+		while ((step_ptr = (struct step_record *) 
+					list_next (step_iterator))) {
+			char *image_dir = NULL;
+			if (ckpt_ptr->image_dir) {
+				image_dir = xstrdup(ckpt_ptr->image_dir);
+			} else {
+				image_dir = xstrdup(step_ptr->ckpt_dir);
+			}
+			xstrfmtcat(image_dir, "/%u.%hu", job_ptr->job_id, 
+				   step_ptr->step_id);
+			update_rc = checkpoint_op(ckpt_ptr->job_id,
+						  step_ptr->step_id,
+						  step_ptr,
+						  ckpt_ptr->op, 
+						  ckpt_ptr->data,
+						  image_dir,
+						  &resp_data.event_time,
+						  &resp_data.error_code,
+						  &resp_data.error_msg);
+			info("checkpoint_op %u of %u.%u complete, rc=%d",
+			     ckpt_ptr->op, ckpt_ptr->job_id, 
+			     step_ptr->step_id, rc);
+			rc = MAX(rc, update_rc);
+			xfree(image_dir);
+		}
+		if (update_rc != -2)	/* some work done */
+			last_job_update = time(NULL);
+		list_iterator_destroy (step_iterator);
+	}
+
+    reply:
+	if (conn_fd < 0)	/* periodic checkpoint */
+		return rc;
+	
+	if ((rc == SLURM_SUCCESS) &&
+	    ((ckpt_ptr->op == CHECK_ABLE) || (ckpt_ptr->op == CHECK_ERROR))) {
+		resp_msg.msg_type = RESPONSE_CHECKPOINT;
+		resp_msg.data = &resp_data;
+		(void) slurm_send_node_msg(conn_fd, &resp_msg);
+	} else {
+		return_code_msg_t rc_msg;
+		rc_msg.return_code = rc;
+		resp_msg.msg_type  = RESPONSE_SLURM_RC;
+		resp_msg.data      = &rc_msg;
+		(void) slurm_send_node_msg(conn_fd, &resp_msg);
+	}
+	return rc;
+}
+
+/*
+ * _checkpoint_job_record - save job to file for checkpoint
+ *
+ */
+static int _checkpoint_job_record (struct job_record *job_ptr, char *image_dir)
+{
+	static int high_buffer_size = (1024*1024);
+	char *ckpt_file = NULL, *old_file = NULL, *new_file = NULL;
+	int ckpt_fd, error_code = SLURM_SUCCESS;
+	Buf buffer = init_buf(high_buffer_size);
+
+	ckpt_file = xstrdup(slurmctld_conf.job_ckpt_dir);
+	xstrfmtcat(ckpt_file, "/%u.ckpt", job_ptr->job_id);
+
+	debug("_checkpoint_job_record: checkpoint job record of %u to file %s",
+	      job_ptr->job_id, ckpt_file);
+	
+	old_file = xstrdup(ckpt_file);
+	xstrcat(old_file, ".old");
+
+	new_file = xstrdup(ckpt_file);
+	xstrcat(new_file, ".new");
+
+	/* save version string */
+	packstr(JOB_CKPT_VERSION, buffer);
+
+	/* save checkpoint image directory */
+	packstr(image_dir, buffer);
+
+	_pack_job_for_ckpt(job_ptr, buffer);
+
+	ckpt_fd = creat(new_file, 0600);
+	if (ckpt_fd < 0) {
+		error("Can't ckpt job, create file %s error: %m",
+		      new_file);
+		error_code = errno;
+	} else {
+		int pos = 0, nwrite = get_buf_offset(buffer), amount;
+		char *data = (char *)get_buf_data(buffer);
+		while (nwrite > 0) {
+			amount = write(ckpt_fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				error("Error writing file %s, %m", new_file);
+				error_code = errno;
+				break;
+			} else if (amount >= 0) {
+				nwrite -= amount;
+				pos    += amount;
+			}
+		}
+		fsync(ckpt_fd);
+		close(ckpt_fd);
+	}
+	if (error_code)
+		(void) unlink(new_file);
+	else {			/* file shuffle */
+		(void) unlink(old_file);
+		(void) link(ckpt_file, old_file);
+		(void) unlink(ckpt_file);
+		(void) link(new_file, ckpt_file);
+		(void) unlink(new_file);
+	}
+
+	xfree(ckpt_file);
+	xfree(old_file);
+	xfree(new_file);
+	free_buf(buffer);
+
+	return error_code;
+}
+
+/*
+ * _pack_job_for_ckpt - save RUNNING job to buffer for checkpoint
+ *
+ *   Just save enough information to restart it
+ *
+ * IN job_ptr - id of the job to be checkpointed
+ * IN buffer - buffer to save the job state
+ */
+static void _pack_job_for_ckpt (struct job_record *job_ptr, Buf buffer)
+{
+	slurm_msg_t msg;
+	job_desc_msg_t *job_desc;
+
+	/* save allocated nodes */
+	packstr(job_ptr->nodes, buffer);
+
+	/* save job req */
+	job_desc = _copy_job_record_to_job_desc(job_ptr);
+	msg.msg_type = REQUEST_SUBMIT_BATCH_JOB;
+	msg.data = job_desc;
+	pack_msg(&msg, buffer);
+
+	/* free the environment since all strings are stored in one 
+	 * xmalloced buffer */
+	if (job_desc->environment) {
+		xfree(job_desc->environment[0]);
+		xfree(job_desc->environment);
+		job_desc->env_size = 0;
+	}
+	slurm_free_job_desc_msg(job_desc);
+}
+
+/*
+ * _copy_job_record_to_job_desc - construct a job_desc_msg_t for a job.
+ * IN job_ptr - the job record
+ * RET the job_desc_msg_t, NULL on error
+ */
+static job_desc_msg_t *
+_copy_job_record_to_job_desc(struct job_record *job_ptr)
+{
+	job_desc_msg_t *job_desc;
+	struct job_details *details = job_ptr->details;
+	multi_core_data_t *mc_ptr = details->mc_ptr;
+	int i;
+
+	/* construct a job_desc_msg_t from job */
+	job_desc = xmalloc(sizeof(job_desc_msg_t));
+	if (!job_desc) {
+		error("_pack_job_for_ckpt: memory exhausted");
+		return NULL;
+	}
+
+	job_desc->account           = xstrdup(job_ptr->account);
+	job_desc->acctg_freq        = details->acctg_freq;
+	job_desc->alloc_node        = xstrdup(job_ptr->alloc_node);
+	/* Since the allocating salloc or srun is not expected to exist
+	 * when this checkpointed job is restarted, do not save these:
+	 *
+	 * job_desc->alloc_resp_port   = job_ptr->alloc_resp_port;
+	 * job_desc->alloc_sid         = job_ptr->alloc_sid;
+	 */
+	job_desc->argc              = details->argc;
+	job_desc->argv              = xmalloc(sizeof(char *) * job_desc->argc);
+	for (i = 0; i < job_desc->argc; i ++)
+		job_desc->argv[i]   = xstrdup(details->argv[i]);
+	job_desc->begin_time        = details->begin_time;
+	job_desc->ckpt_interval     = job_ptr->ckpt_interval;
+	job_desc->ckpt_dir          = xstrdup(details->ckpt_dir);
+	job_desc->comment           = xstrdup(job_ptr->comment);
+	job_desc->contiguous        = details->contiguous;
+	job_desc->cpu_bind          = xstrdup(details->cpu_bind);
+	job_desc->cpu_bind_type     = details->cpu_bind_type;
+	job_desc->dependency        = xstrdup(details->dependency);
+	job_desc->environment       = get_job_env(job_ptr, &job_desc->env_size);
+	job_desc->err               = xstrdup(details->err);
+	job_desc->exc_nodes         = xstrdup(details->exc_nodes);
+	job_desc->features          = xstrdup(details->features);
+	job_desc->group_id          = job_ptr->group_id;
+	job_desc->immediate         = 0; /* nowhere to get this value */
+	job_desc->in                = xstrdup(details->in);
+	job_desc->job_id            = job_ptr->job_id; /* XXX */
+	job_desc->kill_on_node_fail = job_ptr->kill_on_node_fail;
+	job_desc->licenses          = xstrdup(job_ptr->licenses);
+	job_desc->mail_type         = job_ptr->mail_type;
+	job_desc->mail_user         = xstrdup(job_ptr->mail_user);
+	job_desc->mem_bind          = xstrdup(details->mem_bind);
+	job_desc->mem_bind_type     = details->mem_bind_type;
+	job_desc->name              = xstrdup(job_ptr->name);
+	job_desc->network           = xstrdup(job_ptr->network);
+	job_desc->nice              = details->nice;
+	job_desc->num_tasks         = details->num_tasks;
+	job_desc->open_mode         = details->open_mode;
+	job_desc->other_port        = job_ptr->other_port; 
+	job_desc->out               = xstrdup(details->out);
+	job_desc->overcommit        = details->overcommit;
+	job_desc->partition         = xstrdup(job_ptr->partition);
+	job_desc->plane_size        = details->plane_size;
+	job_desc->priority          = job_ptr->priority;
+	job_desc->resp_host         = xstrdup(job_ptr->resp_host);
+	job_desc->req_nodes         = xstrdup(details->req_nodes);
+	job_desc->requeue           = details->requeue;
+	job_desc->reservation       = xstrdup(job_ptr->resv_name);
+	job_desc->script            = get_job_script(job_ptr);
+	job_desc->shared            = details->shared;
+	job_desc->task_dist         = details->task_dist;
+	job_desc->time_limit        = job_ptr->time_limit;
+	job_desc->user_id           = job_ptr->user_id;
+	job_desc->work_dir          = xstrdup(details->work_dir);
+	job_desc->job_min_procs     = details->job_min_procs;
+	job_desc->job_min_sockets   = mc_ptr->job_min_sockets;
+	job_desc->job_min_cores     = mc_ptr->job_min_cores;
+	job_desc->job_min_threads   = mc_ptr->job_min_threads;
+	job_desc->job_min_memory    = details->job_min_memory;
+	job_desc->job_min_tmp_disk  = details->job_min_tmp_disk;
+	job_desc->num_procs         = job_ptr->num_procs;
+	job_desc->min_nodes         = details->min_nodes;
+	job_desc->max_nodes         = details->max_nodes;
+	job_desc->min_sockets       = mc_ptr->min_sockets;
+	job_desc->max_sockets       = mc_ptr->max_sockets;
+	job_desc->min_cores         = mc_ptr->min_cores;
+	job_desc->max_cores         = mc_ptr->max_cores;
+	job_desc->min_threads       = mc_ptr->min_threads;
+	job_desc->max_threads       = mc_ptr->max_threads;
+	job_desc->cpus_per_task     = details->cpus_per_task;
+	job_desc->ntasks_per_node   = details->ntasks_per_node;
+	job_desc->ntasks_per_socket = mc_ptr->ntasks_per_socket;
+	job_desc->ntasks_per_core   = mc_ptr->ntasks_per_core;
+	job_desc->wckey             = xstrdup(job_ptr->wckey);
+#if 0
+	/* select_jobinfo is unused at job submit time, only it's 
+	 * components are set. We recover those from the structure below.
+	 * job_desc->select_jobinfo = select_g_copy_jobinfo(job_ptr->
+							    select_jobinfo); */
+
+	/* The following fields are used only on BlueGene systems.
+	 * Since BlueGene does not use the checkpoint/restart logic today,
+	 * we do not them. */
+	select_g_get_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_GEOMETRY, 
+			     &job_desc->geometry);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_CONN_TYPE, 
+			     &job_desc->conn_type);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_REBOOT, 
+			     &job_desc->reboot);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_ROTATE, 
+			     &job_desc->rotate);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_BLRTS_IMAGE, 
+			     &job_desc->blrtsimage);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, SELECT_DATA_LINUX_IMAGE, 
+			     &job_desc->linuximage);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_MLOADER_IMAGE, 
+			     &job_desc->mloaderimage);
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_RAMDISK_IMAGE, 
+			     &job_desc->ramdiskimage);
+#endif
+
+	return job_desc;
+}
+
+
+/*
+ * job_restart - Restart a batch job from checkpointed state
+ *
+ * Restart a job is similar to submit a new job, except that
+ * the job requirements is load from the checkpoint file and
+ * the job id is restored.
+ *
+ * IN ckpt_ptr - checkpoint request message 
+ * IN uid - user id of the user issuing the RPC
+ * IN conn_fd - file descriptor on which to send reply
+ * RET 0 on success, otherwise ESLURM error code
+ */
+extern int job_restart(checkpoint_msg_t *ckpt_ptr, uid_t uid, slurm_fd conn_fd)
+{
+	struct job_record *job_ptr;
+	char *image_dir, *ckpt_file, *data, *ver_str = NULL;
+	char *alloc_nodes = NULL;
+	int data_size;
+	Buf buffer;
+	uint32_t tmp_uint32;
+	slurm_msg_t msg, resp_msg;
+	return_code_msg_t rc_msg;
+	job_desc_msg_t *job_desc = NULL;
+	int rc = SLURM_SUCCESS;
+
+	if (ckpt_ptr->step_id != SLURM_BATCH_SCRIPT) {
+		rc = ESLURM_NOT_SUPPORTED;
+		goto reply;
+	}
+	
+	if ((job_ptr = find_job_record(ckpt_ptr->job_id)) &&
+	    ! IS_JOB_FINISHED(job_ptr)) {
+		rc = ESLURM_DISABLED;
+		goto reply;
+	}
+
+	ckpt_file = xstrdup(slurmctld_conf.job_ckpt_dir);
+	xstrfmtcat(ckpt_file, "/%u.ckpt", ckpt_ptr->job_id);
+
+	data = _read_job_ckpt_file(ckpt_file, &data_size);
+	xfree(ckpt_file);
+	
+	if (data == NULL) {
+		rc = errno;
+		xfree (ckpt_file);
+		goto reply;
+	}
+	buffer = create_buf(data, data_size);
+
+	/* unpack version string */
+	safe_unpackstr_xmalloc(&ver_str, &tmp_uint32, buffer);
+	debug3("Version string in job_ckpt header is %s", ver_str);
+	if ((!ver_str) || (strcmp(ver_str, JOB_CKPT_VERSION) != 0)) {
+		error("***************************************************");
+		error("Can not restart from job ckpt, incompatable version");
+		error("***************************************************");
+		rc = EINVAL;
+		goto unpack_error;
+	}
+
+	/* unpack checkpoint image directory */
+	safe_unpackstr_xmalloc(&image_dir, &tmp_uint32, buffer);
+
+	/* unpack the allocated nodes */
+	safe_unpackstr_xmalloc(&alloc_nodes, &tmp_uint32, buffer);
+
+	/* unpack the job req */
+	msg.msg_type = REQUEST_SUBMIT_BATCH_JOB;
+	if (unpack_msg(&msg, buffer) != SLURM_SUCCESS) {
+		goto unpack_error;
+	}
+
+	job_desc = msg.data;
+
+	/* sanity check */
+	if (job_desc->job_id != ckpt_ptr->job_id) {
+		error("saved job id(%u) is different from required job id(%u)",
+		      job_desc->job_id, ckpt_ptr->job_id);
+		rc = EINVAL;
+		goto unpack_error;
+	}
+	if (! validate_super_user(uid) && (job_desc->user_id != uid)) {
+		error("Security violation, user %u not allowed to restart "
+		      "job %u of user %u",
+		      uid, ckpt_ptr->job_id, job_desc->user_id);
+		rc = EPERM;
+		goto unpack_error;
+	}
+
+	if (ckpt_ptr->data == 1) { /* stick to nodes */
+		xfree(job_desc->req_nodes);
+		job_desc->req_nodes = alloc_nodes;
+		alloc_nodes = NULL;	/* Nothing left to xfree */
+	}
+
+	/* set open mode to append */
+	job_desc->open_mode = OPEN_MODE_APPEND;
+
+	/* Set new job priority */
+	job_desc->priority = NO_VAL;
+	
+	/*
+	 * XXX: we set submit_uid to 0 in the following job_allocate() call
+	 * This is for setting the job_id to the original one.
+	 * But this will bypass some partition access permission checks.
+	 * TODO: fix this.
+	 */
+	rc = job_allocate(job_desc,
+			  0,		/* immediate */
+			  0,		/* will_run */
+			  NULL, 	/* resp */
+			  0,		/* allocate */
+			  0,		/* submit_uid. set to 0 to set job_id */
+			  &job_ptr);
+
+	/* set restart directory */
+	if (job_ptr) {
+		if (ckpt_ptr->image_dir) {
+			xfree (image_dir);
+			image_dir = xstrdup(ckpt_ptr->image_dir);
+		}
+		xstrfmtcat(image_dir, "/%u", ckpt_ptr->job_id);
+	
+		job_ptr->details->restart_dir = image_dir;
+		image_dir = NULL;	/* Nothing left to xfree */
+
+		last_job_update = time(NULL);
+	}
+	
+ unpack_error:
+	free_buf(buffer);
+	xfree(image_dir);
+	xfree(alloc_nodes);
+	xfree(ckpt_file);
+
+ reply:
+	slurm_msg_t_init(&resp_msg);
+	rc_msg.return_code = rc;
+	resp_msg.msg_type  = RESPONSE_SLURM_RC;
+	resp_msg.data      = &rc_msg;
+	(void) slurm_send_node_msg(conn_fd, &resp_msg);
+
+	return rc;
+}
+
+static char *
+_read_job_ckpt_file(char *ckpt_file, int *size_ptr)
+{
+	int ckpt_fd, error_code = 0;
+	int data_allocated, data_read, data_size = 0;
+	char *data = NULL;
+	
+	ckpt_fd = open(ckpt_file, O_RDONLY);
+	if (ckpt_fd < 0) {
+		info("No job ckpt file (%s) to read", ckpt_file);
+		error_code = ENOENT;
+	} else {
+		data_allocated = BUF_SIZE;
+		data = xmalloc(data_allocated);
+		while (1) {
+			data_read = read(ckpt_fd, &data[data_size],
+					 BUF_SIZE);
+			if (data_read < 0) {
+				if (errno == EINTR)
+					continue;
+				else {
+					error("Read error on %s: %m", 
+					      ckpt_file);
+					error_code = errno;
+					break;
+				}
+			} else if (data_read == 0)	/* eof */
+				break;
+			data_size      += data_read;
+			data_allocated += data_read;
+			xrealloc(data, data_allocated);
+		}
+		close(ckpt_fd);
+	}
+
+	if (error_code) {
+		xfree(data);
+		return NULL;
+	}
+	*size_ptr = data_size;
+	return data;
+}
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index 97b26decec6c282b60dc0420618f0cece6b78add..3cd339a22922f35574f55dea51c5ade25caa18c0 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -3,13 +3,14 @@
  *	Note there is a global job list (job_list)
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -48,10 +49,12 @@
 #include <unistd.h>
 
 #include "src/common/assoc_mgr.h"
+#include "src/common/env.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
 #include "src/common/slurm_accounting_storage.h"
+#include "src/common/uid.h"
 #include "src/common/xassert.h"
 #include "src/common/xstring.h"
 
@@ -61,17 +64,21 @@
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/node_scheduler.h"
+#include "src/slurmctld/reservation.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/srun_comm.h"
 
 #define _DEBUG 0
 #define MAX_RETRIES 10
 
-static void _depend_list_del(void *dep_ptr);
-static void _feature_list_delete(void *x);
-static int  _valid_feature_list(uint32_t job_id, List feature_list);
-static int  _valid_node_feature(char *feature);
-static char **_xduparray(uint16_t size, char ** array);
+static char **	_build_env(struct job_record *job_ptr);
+static void	_depend_list_del(void *dep_ptr);
+static void	_feature_list_delete(void *x);
+static void *	_run_epilog(void *arg);
+static void *	_run_prolog(void *arg);
+static int	_valid_feature_list(uint32_t job_id, List feature_list);
+static int	_valid_node_feature(char *feature);
+static char **	_xduparray(uint16_t size, char ** array);
 
 
 /*
@@ -134,6 +141,9 @@ extern int build_job_queue(struct job_queue **job_queue)
 	int job_buffer_size, job_queue_size;
 	struct job_queue *my_job_queue;
 
+	if (job_list == NULL)
+		return 0;
+
 	/* build list pending jobs */
 	job_buffer_size = job_queue_size = 0;
 	job_queue[0] = my_job_queue = NULL;
@@ -175,11 +185,13 @@ extern bool job_is_completing(void)
 	bool completing = false;
 	ListIterator job_iterator;
 	struct job_record *job_ptr = NULL;
-	time_t recent = time(NULL) - (slurmctld_conf.kill_wait + 2);
+	uint16_t complete_wait = slurm_get_complete_wait();
+	time_t recent;
 
-	if (!job_list)
+	if ((job_list == NULL) || (complete_wait == 0))
 		return completing;
 
+	recent = time(NULL) - complete_wait;
 	job_iterator = list_iterator_create(job_list);
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		if ((job_ptr->job_state & JOB_COMPLETING) &&
@@ -224,6 +236,7 @@ extern void set_job_elig_time(void)
 		    ((job_ptr->details->max_nodes < part_ptr->min_nodes) ||
 		     (job_ptr->details->min_nodes > part_ptr->max_nodes)))
 			continue;
+		/* Job's eligible time is set in job_independent() */
 		if (!job_independent(job_ptr))
 			continue;
 	}
@@ -294,7 +307,7 @@ extern int schedule(void)
 
 	lock_slurmctld(job_write_lock);
 	/* Avoid resource fragmentation if important */
-	if ((!wiki_sched) && switch_no_frag() && job_is_completing()) {
+	if ((!wiki_sched) && job_is_completing()) {
 		unlock_slurmctld(job_write_lock);
 		debug("schedule() returning, some job still completing");
 		return SLURM_SUCCESS;
@@ -316,9 +329,11 @@ extern int schedule(void)
 		if (job_ptr->priority == 0)	/* held */
 			continue;
 
-		if (_failed_partition(job_ptr->part_ptr, failed_parts, 
+		if ((job_ptr->resv_name == NULL) &&
+		    _failed_partition(job_ptr->part_ptr, failed_parts, 
 				      failed_part_cnt)) {
 			job_ptr->state_reason = WAIT_PRIORITY;
+			xfree(job_ptr->state_desc);
 			continue;
 		}
 		if (bit_overlap(avail_node_bitmap, 
@@ -330,10 +345,12 @@ extern int schedule(void)
 		}
 		if (license_job_test(job_ptr) != SLURM_SUCCESS) {
 			job_ptr->state_reason = WAIT_LICENSES;
+			xfree(job_ptr->state_desc);
 			continue;
 		}
 
-		if (assoc_mgr_validate_assoc_id(acct_db_conn, job_ptr->assoc_id,
+		if (assoc_mgr_validate_assoc_id(acct_db_conn, 
+						job_ptr->assoc_id,
 						accounting_enforce)) {
 			/* NOTE: This only happens if a user's account is 
 			 * disabled between when the job was submitted and 
@@ -345,6 +362,7 @@ extern int schedule(void)
 			job_ptr->job_state = JOB_FAILED;
 			job_ptr->exit_code = 1;
 			job_ptr->state_reason = FAIL_BANK_ACCOUNT;
+			xfree(job_ptr->state_desc);
 			job_ptr->start_time = job_ptr->end_time = time(NULL);
 			job_completion_logger(job_ptr);
 			delete_job_details(job_ptr);
@@ -374,6 +392,19 @@ extern int schedule(void)
 					job_ptr->part_ptr->node_bitmap);
 				bit_not(job_ptr->part_ptr->node_bitmap);
 			}
+		} else if (error_code == ESLURM_RESERVATION_NOT_USABLE) {
+			if (job_ptr->resv_ptr 
+			    && job_ptr->resv_ptr->node_bitmap) {
+				bit_not(job_ptr->resv_ptr->node_bitmap);
+				bit_and(avail_node_bitmap, 
+					job_ptr->resv_ptr->node_bitmap);
+				bit_not(job_ptr->resv_ptr->node_bitmap);
+			} else {
+				/* The job has no reservation but requires
+				 * nodes that are currently in some reservation
+				 * so just skip over this job and try running
+				 * the next lower priority job */
+			}
 		} else if (error_code == SLURM_SUCCESS) {	
 			/* job initiated */
 			last_job_update = now;
@@ -394,10 +425,10 @@ extern int schedule(void)
 			info("schedule: JobId=%u NodeList=%s",
 			     job_ptr->job_id, job_ptr->nodes);
 #endif
-			if (job_ptr->batch_flag)
-				launch_job(job_ptr);
-			else
+			if (job_ptr->batch_flag == 0)
 				srun_allocate(job_ptr->job_id);
+			else if (job_ptr->details->prolog_running == 0)
+				launch_job(job_ptr);
 			job_cnt++;
 		} else if ((error_code !=
 			    ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)
@@ -410,6 +441,7 @@ extern int schedule(void)
 				job_ptr->job_state = JOB_FAILED;
 				job_ptr->exit_code = 1;
 				job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
+				xfree(job_ptr->state_desc);
 				job_ptr->start_time = job_ptr->end_time = now;
 				job_completion_logger(job_ptr);
 				delete_job_details(job_ptr);
@@ -464,8 +496,9 @@ extern void sort_job_queue(struct job_queue *job_queue, int job_queue_size)
 		tmp_part_prio = job_queue[i].part_priority;
 
 		job_queue[i].job_ptr       = job_queue[top_prio_inx].job_ptr;
-		job_queue[i].job_priority  = job_queue[top_prio_inx].job_priority;
-		job_queue[i].part_priority = job_queue[top_prio_inx].part_priority;
+		job_queue[i].job_priority  = job_queue[top_prio_inx].
+					     job_priority;
+
 
 		job_queue[top_prio_inx].job_ptr       = tmp_job_ptr;
 		job_queue[top_prio_inx].job_priority  = tmp_job_prio;
@@ -500,6 +533,8 @@ extern void launch_job(struct job_record *job_ptr)
 	launch_msg_ptr->overcommit = job_ptr->details->overcommit;
 	launch_msg_ptr->open_mode  = job_ptr->details->open_mode;
 	launch_msg_ptr->acctg_freq = job_ptr->details->acctg_freq;
+	launch_msg_ptr->cpus_per_task = job_ptr->details->cpus_per_task;
+	launch_msg_ptr->restart_cnt   = job_ptr->restart_cnt;
 
 	if (make_batch_job_cred(launch_msg_ptr, job_ptr)) {
 		error("aborting batch job %u", job_ptr->job_id);
@@ -517,6 +552,8 @@ extern void launch_job(struct job_record *job_ptr)
 	launch_msg_ptr->in = xstrdup(job_ptr->details->in);
 	launch_msg_ptr->out = xstrdup(job_ptr->details->out);
 	launch_msg_ptr->work_dir = xstrdup(job_ptr->details->work_dir);
+	launch_msg_ptr->ckpt_dir = xstrdup(job_ptr->details->ckpt_dir);
+	launch_msg_ptr->restart_dir = xstrdup(job_ptr->details->restart_dir);
 	launch_msg_ptr->argc = job_ptr->details->argc;
 	launch_msg_ptr->argv = _xduparray(job_ptr->details->argc,
 					job_ptr->details->argv);
@@ -524,15 +561,18 @@ extern void launch_job(struct job_record *job_ptr)
 	launch_msg_ptr->environment =
 	    get_job_env(job_ptr, &launch_msg_ptr->envc);
 	launch_msg_ptr->job_mem = job_ptr->details->job_min_memory;
-	launch_msg_ptr->num_cpu_groups = job_ptr->num_cpu_groups;
-	launch_msg_ptr->cpus_per_node  = xmalloc(sizeof(uint32_t) *
-			job_ptr->num_cpu_groups);
-	memcpy(launch_msg_ptr->cpus_per_node, job_ptr->cpus_per_node,
-			(sizeof(uint32_t) * job_ptr->num_cpu_groups));
+
+	launch_msg_ptr->num_cpu_groups = job_ptr->select_job->cpu_array_cnt;
+	launch_msg_ptr->cpus_per_node  = xmalloc(sizeof(uint16_t) *
+			job_ptr->select_job->cpu_array_cnt);
+	memcpy(launch_msg_ptr->cpus_per_node, 
+	       job_ptr->select_job->cpu_array_value,
+	       (sizeof(uint16_t) * job_ptr->select_job->cpu_array_cnt));
 	launch_msg_ptr->cpu_count_reps  = xmalloc(sizeof(uint32_t) *
-			job_ptr->num_cpu_groups);
-	memcpy(launch_msg_ptr->cpu_count_reps, job_ptr->cpu_count_reps,
-			(sizeof(uint32_t) * job_ptr->num_cpu_groups));
+			job_ptr->select_job->cpu_array_cnt);
+	memcpy(launch_msg_ptr->cpu_count_reps, 
+	       job_ptr->select_job->cpu_array_reps,
+	       (sizeof(uint32_t) * job_ptr->select_job->cpu_array_cnt));
 
 	launch_msg_ptr->select_jobinfo = select_g_copy_jobinfo(
 			job_ptr->select_jobinfo);
@@ -574,6 +614,7 @@ extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr,
 			       struct job_record *job_ptr)
 {
 	slurm_cred_arg_t cred_arg;
+	select_job_res_t select_ptr;
 
 	cred_arg.jobid     = launch_msg_ptr->job_id;
 	cred_arg.stepid    = launch_msg_ptr->step_id;
@@ -586,14 +627,23 @@ extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr,
 	if (job_ptr->details == NULL)
 		cred_arg.job_mem = 0;
 	else if (job_ptr->details->job_min_memory & MEM_PER_CPU) {
+		xassert(job_ptr->select_job);
+		xassert(job_ptr->select_job->cpus);
 		cred_arg.job_mem = job_ptr->details->job_min_memory;
 		cred_arg.job_mem &= (~MEM_PER_CPU);
-		cred_arg.job_mem *= job_ptr->alloc_lps[0];
+		cred_arg.job_mem *= job_ptr->select_job->cpus[0];
 	} else
 		cred_arg.job_mem = job_ptr->details->job_min_memory;
 
-	cred_arg.alloc_lps_cnt = 0;
-	cred_arg.alloc_lps = NULL;
+	/* Identify the cores allocated to this job. */
+	xassert(job_ptr->select_job);
+	select_ptr = job_ptr->select_job;
+	cred_arg.core_bitmap         = select_ptr->core_bitmap;
+	cred_arg.cores_per_socket    = select_ptr->cores_per_socket;
+	cred_arg.sockets_per_node    = select_ptr->sockets_per_node;
+	cred_arg.sock_core_rep_count = select_ptr->sock_core_rep_count;
+	cred_arg.job_nhosts          = select_ptr->nhosts;
+	cred_arg.job_hostlist        = job_ptr->nodes;
 
 	launch_msg_ptr->cred = slurm_cred_create(slurmctld_config.cred_ctx,
 			 &cred_arg);
@@ -885,9 +935,10 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 {
 	struct job_record *job_ptr;
 	struct part_record *part_ptr;
-	bitstr_t *avail_bitmap = NULL;
+	bitstr_t *avail_bitmap = NULL, *resv_bitmap = NULL;
 	uint32_t min_nodes, max_nodes, req_nodes;
-	int rc = SLURM_SUCCESS;
+	int i, rc = SLURM_SUCCESS;
+	time_t now = time(NULL), start_res;
 
 	job_ptr = find_job_record(job_desc_msg->job_id);
 	if (job_ptr == NULL)
@@ -904,21 +955,18 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 	if ((job_desc_msg->req_nodes == NULL) || 
 	    (job_desc_msg->req_nodes == '\0')) {
 		/* assume all nodes available to job for testing */
-		avail_bitmap = bit_copy(avail_node_bitmap);
+		avail_bitmap = bit_alloc(node_record_count);
+		bit_nset(avail_bitmap, 0, (node_record_count - 1));
 	} else if (node_name2bitmap(job_desc_msg->req_nodes, false, 
 				    &avail_bitmap) != 0) {
 		return ESLURM_INVALID_NODE_NAME;
 	}
 
-	/* Only consider nodes that are not DOWN or DRAINED */
-	bit_and(avail_bitmap, avail_node_bitmap);
-
 	/* Consider only nodes in this job's partition */
 	if (part_ptr->node_bitmap)
 		bit_and(avail_bitmap, part_ptr->node_bitmap);
 	else
 		rc = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE;
-
 	if (job_req_node_filter(job_ptr, avail_bitmap))
 		rc = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE;
 	if (job_ptr->details->exc_node_bitmap) {
@@ -937,6 +985,20 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 		}
 	}
 
+	/* Enforce reservation: access control, time and nodes */
+	if (job_ptr->details->begin_time)
+		start_res = job_ptr->details->begin_time;
+	else
+		start_res = now;
+	i = job_test_resv(job_ptr, &start_res, false, &resv_bitmap);
+	if (i != SLURM_SUCCESS)
+		return i;
+	bit_and(avail_bitmap, resv_bitmap);
+	FREE_NULL_BITMAP(resv_bitmap);
+
+	/* Only consider nodes that are not DOWN or DRAINED */
+	bit_and(avail_bitmap, avail_node_bitmap);
+
 	if (rc == SLURM_SUCCESS) {
 		min_nodes = MAX(job_ptr->details->min_nodes, 
 				part_ptr->min_nodes);
@@ -968,7 +1030,7 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 #else
 		resp_data->proc_cnt = job_ptr->total_procs;
 #endif
-		resp_data->start_time = job_ptr->start_time;
+		resp_data->start_time = MAX(job_ptr->start_time, start_res);
 		job_ptr->start_time   = 0;  /* restore pending job start time */
 		resp_data->node_list  = bitmap2node_name(avail_bitmap);
 		FREE_NULL_BITMAP(avail_bitmap);
@@ -981,6 +1043,252 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 
 }
 
+/*
+ * epilog_slurmctld - execute the prolog_slurmctld for a job that has just
+ *	terminated.
+ * IN job_ptr - pointer to job that has been terminated
+ * RET SLURM_SUCCESS(0) or error code
+ */
+extern int epilog_slurmctld(struct job_record *job_ptr)
+{
+	int rc;
+	pthread_t thread_id_epilog;
+	pthread_attr_t thread_attr_epilog;
+
+	if ((slurmctld_conf.epilog_slurmctld == NULL) ||
+	    (slurmctld_conf.epilog_slurmctld[0] == '\0'))
+		return SLURM_SUCCESS;
+
+	if (access(slurmctld_conf.epilog_slurmctld, X_OK) < 0) {
+		error("Invalid EpilogSlurmctld: %m");
+		return errno;
+	}
+
+	slurm_attr_init(&thread_attr_epilog);
+	pthread_attr_setdetachstate(&thread_attr_epilog, 
+				    PTHREAD_CREATE_DETACHED);
+	while(1) {
+		rc = pthread_create(&thread_id_epilog,
+				    &thread_attr_epilog,
+				    _run_epilog, (void *) job_ptr);
+		if (rc == 0)
+			return SLURM_SUCCESS;
+		if (errno == EAGAIN)
+			continue;
+		error("pthread_create: %m");
+		return errno;
+	}
+}
+
+static char **_build_env(struct job_record *job_ptr)
+{
+	char **my_env, *name;
+
+	my_env = xmalloc(sizeof(char *));
+	my_env[0] = NULL;
+#ifdef HAVE_CRAY_XT
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_RESV_ID, &name);
+	setenvf(&env, "BASIL_RESERVATION_ID", "%s", name);
+	xfree(name);
+#endif
+#ifdef HAVE_BG
+	select_g_get_jobinfo(job_ptr->select_jobinfo, 
+			     SELECT_DATA_BLOCK_ID, &name);
+	setenvf(&my_env, "MPIRUN_PARTITION", "%s", name);
+#endif
+	setenvf(&my_env, "SLURM_JOB_ACCOUNT", "%s", job_ptr->account);
+	if (job_ptr->details) {
+		setenvf(&my_env, "SLURM_JOB_CONSTRAINTS", 
+			"%s", job_ptr->details->features);
+	}
+	setenvf(&my_env, "SLURM_JOB_GID", "%u", job_ptr->group_id);
+	name = gid_to_string((uid_t) job_ptr->group_id);
+	setenvf(&my_env, "SLURM_JOB_GROUP", "%s", name);
+	xfree(name);
+	setenvf(&my_env, "SLURM_JOB_ID", "%u", job_ptr->job_id);
+	setenvf(&my_env, "SLURM_JOB_NAME", "%s", job_ptr->name);
+	setenvf(&my_env, "SLURM_JOB_NODELIST", "%s", job_ptr->nodes);
+	setenvf(&my_env, "SLURM_JOB_PARTITION", "%s", job_ptr->partition);
+	setenvf(&my_env, "SLURM_JOB_UID", "%u", job_ptr->user_id);
+	name = uid_to_string((uid_t) job_ptr->user_id);
+	setenvf(&my_env, "SLURM_JOB_USER", "%s", name);
+	xfree(name);
+
+	return my_env;
+}
+
+static void *_run_epilog(void *arg)
+{
+	struct job_record *job_ptr = (struct job_record *) arg;
+	uint32_t job_id;
+	pid_t cpid;
+	int i, status, wait_rc;
+	char *argv[2], **my_env;
+	/* Locks: Read config, job */
+	slurmctld_lock_t config_read_lock = { 
+		READ_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
+
+	lock_slurmctld(config_read_lock);
+	argv[0] = xstrdup(slurmctld_conf.epilog_slurmctld);
+	argv[1] = NULL;
+	my_env = _build_env(job_ptr);
+	job_id = job_ptr->job_id;
+	unlock_slurmctld(config_read_lock);
+
+	if ((cpid = fork()) < 0) {
+		error("epilog_slurmctld fork error: %m");
+		goto fini;
+	}
+	if (cpid == 0) {
+#ifdef SETPGRP_TWO_ARGS
+		setpgrp(0, 0);
+#else
+		setpgrp();
+#endif
+		execve(argv[0], argv, my_env);
+		exit(127);
+	}
+
+	while (1) {
+		wait_rc = waitpid(cpid, &status, 0);
+		if (wait_rc < 0) {
+			if (errno == EINTR)
+				continue;
+			error("epilog_slurmctld waitpid error: %m");
+			break;
+		} else if (wait_rc > 0) {
+			killpg(cpid, SIGKILL);	/* kill children too */
+			break;
+		}
+	}
+	if (status != 0) {
+		error("epilog_slurmctld job %u epilog exit status %u:%u",
+		      job_id, WEXITSTATUS(status), WTERMSIG(status));
+	} else
+		debug2("epilog_slurmctld job %u prolog completed", job_id);
+
+ fini:	xfree(argv[0]);
+	for (i=0; my_env[i]; i++)
+		xfree(my_env[i]);
+	xfree(my_env);
+	return NULL;
+}
+
+/*
+ * prolog_slurmctld - execute the prolog_slurmctld for a job that has just
+ *	been allocated resources.
+ * IN job_ptr - pointer to job that will be initiated
+ * RET SLURM_SUCCESS(0) or error code
+ */
+extern int prolog_slurmctld(struct job_record *job_ptr)
+{
+	int rc;
+	pthread_t thread_id_prolog;
+	pthread_attr_t thread_attr_prolog;
+
+	if ((slurmctld_conf.prolog_slurmctld == NULL) ||
+	    (slurmctld_conf.prolog_slurmctld[0] == '\0'))
+		return SLURM_SUCCESS;
+
+	if (access(slurmctld_conf.prolog_slurmctld, X_OK) < 0) {
+		error("Invalid PrologSlurmctld: %m");
+		return errno;
+	}
+
+	if (job_ptr->details)
+		job_ptr->details->prolog_running = 1;
+
+	slurm_attr_init(&thread_attr_prolog);
+	pthread_attr_setdetachstate(&thread_attr_prolog, 
+				    PTHREAD_CREATE_DETACHED);
+	while(1) {
+		rc = pthread_create(&thread_id_prolog,
+				    &thread_attr_prolog,
+				    _run_prolog, (void *) job_ptr);
+		if (rc == 0)
+			return SLURM_SUCCESS;
+		if (errno == EAGAIN)
+			continue;
+		error("pthread_create: %m");
+		return errno;
+	}
+}
+
+static void *_run_prolog(void *arg)
+{
+	struct job_record *job_ptr = (struct job_record *) arg;
+	uint32_t job_id;
+	pid_t cpid;
+	int i, status, wait_rc;
+	char *argv[2], **my_env;
+	/* Locks: Read config, job */
+	slurmctld_lock_t config_read_lock = { 
+		READ_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
+
+	lock_slurmctld(config_read_lock);
+	argv[0] = xstrdup(slurmctld_conf.prolog_slurmctld);
+	argv[1] = NULL;
+	my_env = _build_env(job_ptr);
+	job_id = job_ptr->job_id;
+	unlock_slurmctld(config_read_lock);
+
+	if ((cpid = fork()) < 0) {
+		error("prolog_slurmctld fork error: %m");
+		goto fini;
+	}
+	if (cpid == 0) {
+#ifdef SETPGRP_TWO_ARGS
+		setpgrp(0, 0);
+#else
+		setpgrp();
+#endif
+		execve(argv[0], argv, my_env);
+		exit(127);
+	}
+
+	while (1) {
+		wait_rc = waitpid(cpid, &status, 0);
+		if (wait_rc < 0) {
+			if (errno == EINTR)
+				continue;
+			error("prolog_slurmctld waitpid error: %m");
+			break;
+		} else if (wait_rc > 0) {
+			killpg(cpid, SIGKILL);	/* kill children too */
+			break;
+		}
+	}
+	if (status != 0) {
+		error("prolog_slurmctld job %u prolog exit status %u:%u",
+		      job_id, WEXITSTATUS(status), WTERMSIG(status));
+	} else
+		debug2("prolog_slurmctld job %u prolog completed", job_id);
+
+ fini:	xfree(argv[0]);
+	for (i=0; my_env[i]; i++)
+		xfree(my_env[i]);
+	xfree(my_env);
+	lock_slurmctld(config_read_lock);
+	if (job_ptr->job_id != job_id) {
+		error("prolog_slurmctld job %u pointer invalid", job_id);
+		job_ptr = find_job_record(job_id);
+		if (job_ptr == NULL)
+			error("prolog_slurmctld job %u now defunct", job_id);
+	}
+	if (job_ptr) {
+		if (job_ptr->details)
+			job_ptr->details->prolog_running = 0;
+		if (job_ptr->batch_flag &&
+		    ((job_ptr->job_state == JOB_RUNNING) ||
+		     (job_ptr->job_state == JOB_SUSPENDED)))
+			launch_job(job_ptr);
+	}
+	unlock_slurmctld(config_read_lock);
+
+	return NULL;
+}
+
 /*
  * build_feature_list - Translate a job's feature string into a feature_list
  * IN  details->features
diff --git a/src/slurmctld/job_scheduler.h b/src/slurmctld/job_scheduler.h
index 17b39ec51395df22e67492c2823c91399d21ebeb..e8a06472b78519f330a5590669a7b65abda5c101 100644
--- a/src/slurmctld/job_scheduler.h
+++ b/src/slurmctld/job_scheduler.h
@@ -7,10 +7,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al.
  *  Derived from dsh written by Jim Garlick <garlick1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -65,6 +66,14 @@ extern int build_feature_list(struct job_record *job_ptr);
  */
 extern int build_job_queue(struct job_queue **job_queue);
 
+/*
+ * epilog_slurmctld - execute the prolog_slurmctld for a job that has just
+ *	terminated.
+ * IN job_ptr - pointer to job that has been terminated
+ * RET SLURM_SUCCESS(0) or error code
+ */
+extern int epilog_slurmctld(struct job_record *job_ptr);
+
 /*
  * job_is_completing - Determine if jobs are in the process of completing.
  * RET - True of any job is in the process of completing
@@ -100,6 +109,14 @@ extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr,
 /* Print a job's dependency information based upon job_ptr->depend_list */
 extern void print_job_dependency(struct job_record *job_ptr);
 
+/*
+ * prolog_slurmctld - execute the prolog_slurmctld for a job that has just
+ *	been allocated resources.
+ * IN job_ptr - pointer to job that will be initiated
+ * RET SLURM_SUCCESS(0) or error code
+ */
+extern int prolog_slurmctld(struct job_record *job_ptr);
+
 /* 
  * schedule - attempt to schedule all pending jobs
  *	pending jobs for each partition will be scheduled in priority  
diff --git a/src/slurmctld/licenses.c b/src/slurmctld/licenses.c
index 002c07d507e3a5266617d45a37d62de78ac28465..ae9deeddc448af236febabcf495eeaf9c76aed2f 100644
--- a/src/slurmctld/licenses.c
+++ b/src/slurmctld/licenses.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/licenses.h b/src/slurmctld/licenses.h
index 449e1002b1138748518a997500cd6affaf62e2d5..9af0dbe99fbd06994944950b70ee6d0a2db32c79 100644
--- a/src/slurmctld/licenses.h
+++ b/src/slurmctld/licenses.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/locks.c b/src/slurmctld/locks.c
index 71eca604beb63c3b813192e746095330e2a4431d..c0f5377bfc0df1caa239892a31fb48603c1d6445 100644
--- a/src/slurmctld/locks.c
+++ b/src/slurmctld/locks.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, Randy Sanchez <rsancez@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/locks.h b/src/slurmctld/locks.h
index 997f35722a0d5c1b02d28a545acfc3bd23801fc0..4094a68eb9c41e55da80e0343fe5b6325712e2de 100644
--- a/src/slurmctld/locks.h
+++ b/src/slurmctld/locks.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, Randy Sanchez <rsancez@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index 75769a26b9995cd1cdd7a58bb0c3abc4772fb826..93886a5c84b0023b712f7dca2b60060243fb507d 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -3,16 +3,16 @@
  *	Note: there is a global node table (node_record_table_ptr), its 
  *	hash table (node_hash_table), time stamp (last_node_update) and 
  *	configuration list (config_list)
- *
- *  $Id: node_mgr.c 15820 2008-12-04 01:16:52Z jette $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -76,7 +76,7 @@
 #define MAX_RETRIES	10
 
 /* Change NODE_STATE_VERSION value when changing the state save format */
-#define NODE_STATE_VERSION      "VER002"
+#define NODE_STATE_VERSION      "VER003"
 
 /* Global variables */
 List config_list = NULL;		/* list of config_record entries */
@@ -88,6 +88,7 @@ time_t last_node_update = (time_t) NULL;	/* time of last update to
 						 * node records */
 bitstr_t *avail_node_bitmap = NULL;	/* bitmap of available nodes */
 bitstr_t *idle_node_bitmap  = NULL;	/* bitmap of idle nodes */
+bitstr_t *power_node_bitmap = NULL;	/* bitmap of powered down nodes */
 bitstr_t *share_node_bitmap = NULL;  	/* bitmap of sharable nodes */
 bitstr_t *up_node_bitmap    = NULL;  	/* bitmap of non-down nodes */
 
@@ -102,12 +103,13 @@ static void 	_make_node_down(struct node_record *node_ptr,
 				time_t event_time);
 static void	_node_did_resp(struct node_record *node_ptr);
 static bool	_node_is_hidden(struct node_record *node_ptr);
-static void 	_pack_node (struct node_record *dump_node_ptr, bool cr_flag,
-				Buf buffer);
+static void 	_pack_node (struct node_record *dump_node_ptr,
+			    uint32_t cr_flag, Buf buffer);
 static void	_sync_bitmaps(struct node_record *node_ptr, int job_count);
 static void	_update_config_ptr(bitstr_t *bitmap,
 				struct config_record *config_ptr);
 static int	_update_node_features(char *node_names, char *features);
+static int	_update_node_weight(char *node_names, uint32_t weight);
 static bool 	_valid_node_state_change(uint16_t old, uint16_t new); 
 #ifndef HAVE_FRONT_END
 static void	_node_not_resp (struct node_record *node_ptr, time_t msg_time);
@@ -126,22 +128,28 @@ static void	_dump_hash (void);
  */
 char * bitmap2node_name (bitstr_t *bitmap) 
 {
-	int i;
+	int i, first, last;
 	hostlist_t hl;
 	char buf[8192];
 
 	if (bitmap == NULL)
 		return xstrdup("");
 
+	first = bit_ffs(bitmap);
+	if (first == -1)
+		return xstrdup("");
+
+	last  = bit_fls(bitmap);
 	hl = hostlist_create("");
-	for (i = 0; i < node_record_count; i++) {
-		if (bit_test (bitmap, i) == 0)
+	for (i = first; i <= last; i++) {
+		if (bit_test(bitmap, i) == 0)
 			continue;
 		hostlist_push(hl, node_record_table_ptr[i].name);
 	}
 	hostlist_uniq(hl);
 	hostlist_ranged_string(hl, sizeof(buf), buf);
 	hostlist_destroy(hl);
+
 	return xstrdup(buf);
 }
 
@@ -200,7 +208,7 @@ create_node_record (struct config_record *config_ptr, char *node_name)
 		(node_record_count + 1) * sizeof (struct node_record);
 	new_buffer_size = 
 		((int) ((new_buffer_size / BUF_SIZE) + 1)) * BUF_SIZE;
-	if (node_record_count == 0)
+	if (!node_record_table_ptr)
 		node_record_table_ptr = 
 			(struct node_record *) xmalloc (new_buffer_size);
 	else if (old_buffer_size != new_buffer_size)
@@ -241,12 +249,14 @@ static int _delete_config_record (void)
 /* dump_all_node_state - save the state of all nodes to file */
 int dump_all_node_state ( void )
 {
+	/* Save high-water mark to avoid buffer growth with copies */
+	static int high_buffer_size = (1024 * 1024);
 	int error_code = 0, inx, log_fd;
 	char *old_file, *new_file, *reg_file;
 	/* Locks: Read config and node */
 	slurmctld_lock_t node_read_lock = { READ_LOCK, NO_LOCK, READ_LOCK, 
 						NO_LOCK };
-	Buf buffer = init_buf(BUF_SIZE*16);
+	Buf buffer = init_buf(high_buffer_size);
 	DEF_TIMERS;
 
 	START_TIMER;
@@ -281,7 +291,7 @@ int dump_all_node_state ( void )
 	} else {
 		int pos = 0, nwrite = get_buf_offset(buffer), amount;
 		char *data = (char *)get_buf_data(buffer);
-
+		high_buffer_size = MAX(nwrite, high_buffer_size);
 		while (nwrite > 0) {
 			amount = write(log_fd, &data[pos], nwrite);
 			if ((amount < 0) && (errno != EINTR)) {
@@ -414,6 +424,13 @@ extern int load_all_node_state ( bool state_only )
 	time_t time_stamp, now = time(NULL);
 	Buf buffer;
 	char *ver_str = NULL;
+	hostset_t hs = NULL;
+	slurm_ctl_conf_t *conf = slurm_conf_lock();
+	bool power_save_mode = false;
+
+	if (conf->suspend_program && conf->resume_program)
+		power_save_mode = true;
+	slurm_conf_unlock();
 
 	/* read the file */
 	state_file = xstrdup (slurmctld_conf.state_save_location);
@@ -519,6 +536,14 @@ extern int load_all_node_state ( bool state_only )
 				if (node_state & NODE_STATE_FAIL)
 					node_ptr->node_state |=
 						NODE_STATE_FAIL;
+				if (node_state & NODE_STATE_POWER_SAVE) {
+					if (power_save_mode)
+						node_ptr->node_state=node_state;
+					else if (hs)
+						hostset_insert(hs, node_name);
+					else
+						hs = hostset_create(node_name);
+				}
 			}
 			if (node_ptr->reason == NULL)
 				node_ptr->reason = reason;
@@ -528,6 +553,14 @@ extern int load_all_node_state ( bool state_only )
 			node_ptr->features = features;
 		} else {
 			node_cnt++;
+			if ((node_state & NODE_STATE_POWER_SAVE) && 
+			    (!power_save_mode)) {
+				node_state &= (~NODE_STATE_POWER_SAVE);
+				if (hs)
+					hostset_insert(hs, node_name);
+				else
+					hs = hostset_create(node_name);
+			}
 			node_ptr->node_state    = node_state;
 			xfree(node_ptr->reason);
 			node_ptr->reason        = reason;
@@ -547,15 +580,20 @@ extern int load_all_node_state ( bool state_only )
 		xfree (node_name);
 	}
 
-	info ("Recovered state of %d nodes", node_cnt);
+fini:	info("Recovered state of %d nodes", node_cnt);
+	if (hs) {
+		char node_names[128];
+		hostset_ranged_string(hs, sizeof(node_names), node_names);
+		info("Cleared POWER_SAVE flag from nodes %s", node_names);
+		hostset_destroy(hs);
+	}
 	free_buf (buffer);
 	return error_code;
 
 unpack_error:
-	error ("Incomplete node data checkpoint file");
-	info("Recovered state of %d nodes", node_cnt);
-	free_buf (buffer);
-	return EFAULT;
+	error("Incomplete node data checkpoint file");
+	error_code = EFAULT;
+	goto fini;
 }
 
 /* 
@@ -724,7 +762,7 @@ static int _list_find_config (void *config_entry, void *key)
  *	representation
  * IN node_names  - list of nodes
  * IN best_effort - if set don't return an error on invalid node name entries 
- * OUT bitmap     - set to bitmap or NULL on error 
+ * OUT bitmap     - set to bitmap, may not have all bits set on error 
  * RET 0 if no error, otherwise EINVAL
  * global: node_record_table_ptr - pointer to global node table
  * NOTE: the caller must bit_free() memory at bitmap when no longer required
@@ -743,7 +781,7 @@ extern int node_name2bitmap (char *node_names, bool best_effort,
 	*bitmap = my_bitmap;
 	
 	if (node_names == NULL) {
-		error ("node_name2bitmap: node_names is NULL");
+		info("node_name2bitmap: node_names is NULL");
 		return rc;
 	}
 
@@ -764,11 +802,8 @@ extern int node_name2bitmap (char *node_names, bool best_effort,
 		} else {
 			error ("node_name2bitmap: invalid node specified %s",
 			       this_node_name);
-			if (!best_effort) {
-				free (this_node_name);
+			if (!best_effort)
 				rc = EINVAL;
-				break;
-			}
 		}
 		free (this_node_name);
 	}
@@ -811,11 +846,11 @@ extern void pack_all_node (char **buffer_ptr, int *buffer_size,
 {
 	int inx;
 	uint32_t nodes_packed, tmp_offset;
+	uint16_t base_state;
 	Buf buffer;
-	bool cr_flag = false;
+	static uint32_t cr_flag = NO_VAL;
 	time_t now = time(NULL);
 	struct node_record *node_ptr = node_record_table_ptr;
-	char *select_type;
 
 	/*
 	 * If Consumable Resources enabled, get allocated_cpus.
@@ -823,10 +858,13 @@ extern void pack_all_node (char **buffer_ptr, int *buffer_size,
 	 * use dependeing upon node state (entire node is either 
 	 * allocated or not).
 	 */
-	select_type = slurm_get_select_type();
-	if (strcmp(select_type, "select/cons_res") == 0)
-		cr_flag = true;
-	xfree(select_type);
+	if (cr_flag == NO_VAL) {
+		cr_flag = 0;  /* call is no-op for select/linear and bluegene */
+		if (select_g_get_info_from_plugin(SELECT_CR_PLUGIN,
+						  NULL, &cr_flag)) {
+			cr_flag = NO_VAL;	/* error */
+		}
+	}
 
 	buffer_ptr[0] = NULL;
 	*buffer_size = 0;
@@ -848,6 +886,9 @@ extern void pack_all_node (char **buffer_ptr, int *buffer_size,
 		if (((show_flags & SHOW_ALL) == 0) && (uid != 0) &&
 		    (_node_is_hidden(node_ptr)))
 			continue;
+		base_state = node_ptr->node_state & NODE_STATE_BASE;
+		if (base_state == NODE_STATE_FUTURE)
+			continue;
 		if ((node_ptr->name == NULL) ||
 		    (node_ptr->name[0] == '\0'))
 			continue;
@@ -878,9 +919,11 @@ extern void pack_all_node (char **buffer_ptr, int *buffer_size,
  *	changes to load_node_config in api/node_info.c
  * NOTE: READ lock_slurmctld config before entry
  */
-static void _pack_node (struct node_record *dump_node_ptr, bool cr_flag,
+static void _pack_node (struct node_record *dump_node_ptr, uint32_t cr_flag,
 		Buf buffer) 
 {
+	uint16_t threads;
+
 	packstr (dump_node_ptr->name, buffer);
 	pack16  (dump_node_ptr->node_state, buffer);
 	if (slurmctld_conf.fast_schedule) {	
@@ -891,6 +934,7 @@ static void _pack_node (struct node_record *dump_node_ptr, bool cr_flag,
 		pack16  (dump_node_ptr->config_ptr->threads, buffer);
 		pack32  (dump_node_ptr->config_ptr->real_memory, buffer);
 		pack32  (dump_node_ptr->config_ptr->tmp_disk, buffer);
+		threads = dump_node_ptr->config_ptr->threads;
 	} else {	
 		/* Individual node data used for scheduling */
 		pack16  (dump_node_ptr->cpus, buffer);
@@ -899,10 +943,11 @@ static void _pack_node (struct node_record *dump_node_ptr, bool cr_flag,
 		pack16  (dump_node_ptr->threads, buffer);
 		pack32  (dump_node_ptr->real_memory, buffer);
 		pack32  (dump_node_ptr->tmp_disk, buffer);
+		threads = dump_node_ptr->threads;
 	}
 	pack32  (dump_node_ptr->config_ptr->weight, buffer);
 
-	if (cr_flag) {
+	if (cr_flag == 1) {
 		uint16_t allocated_cpus;
 		int error_code;
 		error_code = select_g_get_select_nodeinfo(dump_node_ptr,
@@ -911,7 +956,8 @@ static void _pack_node (struct node_record *dump_node_ptr, bool cr_flag,
 			error ("_pack_node: error from "
 				"select_g_get_select_nodeinfo: %m");
 			allocated_cpus = 0;
-		}
+		} else
+			allocated_cpus *= threads;
 		pack16(allocated_cpus, buffer);
 	} else if ((dump_node_ptr->node_state & NODE_STATE_COMPLETING) ||
 		   (dump_node_ptr->node_state == NODE_STATE_ALLOCATED)) {
@@ -970,6 +1016,7 @@ void set_slurmd_addr (void)
 {
 	int i;
 	struct node_record *node_ptr = node_record_table_ptr;
+	uint16_t base_state;
 	DEF_TIMERS;
 
 	START_TIMER;
@@ -977,6 +1024,9 @@ void set_slurmd_addr (void)
 		if ((node_ptr->name == NULL) ||
 		    (node_ptr->name[0] == '\0'))
 			continue;
+		base_state = node_ptr->node_state & NODE_STATE_BASE;
+		if (base_state == NODE_STATE_FUTURE)
+			continue;
 		if (node_ptr->port == 0)
 			node_ptr->port = slurmctld_conf.slurmd_port;
 		slurm_set_addr (&node_ptr->slurm_addr,
@@ -1076,6 +1126,26 @@ int update_node ( update_node_msg_t * update_node_msg )
 							NODE_STATE_NO_RESPOND;
 					node_ptr->last_response = now;
 					ping_nodes_now = true;
+				} else if (base_state == NODE_STATE_FUTURE) {
+					if (node_ptr->port == 0) {
+						node_ptr->port = slurmctld_conf.
+								 slurmd_port;
+					}
+					slurm_set_addr(	&node_ptr->slurm_addr,
+							node_ptr->port,
+							node_ptr->comm_name);
+					if (node_ptr->slurm_addr.sin_port) {
+						state_val = NODE_STATE_IDLE;
+						node_ptr->node_state |=
+							NODE_STATE_NO_RESPOND;
+						node_ptr->last_response = now;
+						ping_nodes_now = true;
+					} else {
+						error("slurm_set_addr failure "
+						      "on %s", 
+		       				      node_ptr->comm_name);
+						state_val = base_state;
+					}
 				} else
 					state_val = base_state;
 			}
@@ -1083,10 +1153,8 @@ int update_node ( update_node_msg_t * update_node_msg )
 				/* We must set node DOWN before killing 
 				 * its jobs */
 				_make_node_down(node_ptr, now);
-				kill_running_job_by_node_name (this_node_name,
-							       false);
-			}
-			else if (state_val == NODE_STATE_IDLE) {
+				kill_running_job_by_node_name (this_node_name);
+			} else if (state_val == NODE_STATE_IDLE) {
 				/* assume they want to clear DRAIN and
 				 * FAIL flags too */
 				base_state &= NODE_STATE_BASE;
@@ -1115,15 +1183,13 @@ int update_node ( update_node_msg_t * update_node_msg )
 				bit_set (up_node_bitmap, node_inx);
 				node_ptr->last_idle = now;
 				reset_job_priority();
-			}
-			else if (state_val == NODE_STATE_ALLOCATED) {
+			} else if (state_val == NODE_STATE_ALLOCATED) {
 				if (!(node_ptr->node_state & (NODE_STATE_DRAIN
 						| NODE_STATE_FAIL)))
 					bit_set(avail_node_bitmap, node_inx);
 				bit_set (up_node_bitmap, node_inx);
 				bit_clear (idle_node_bitmap, node_inx);
-			}
-			else if (state_val == NODE_STATE_DRAIN) {
+			} else if (state_val == NODE_STATE_DRAIN) {
 				bit_clear (avail_node_bitmap, node_inx);
 				state_val = node_ptr->node_state |
 					NODE_STATE_DRAIN;
@@ -1135,8 +1201,7 @@ int update_node ( update_node_msg_t * update_node_msg )
 						slurmctld_cluster_name,
 						node_ptr, now, NULL);
 				}
-			}
-			else if (state_val == NODE_STATE_FAIL) {
+			} else if (state_val == NODE_STATE_FAIL) {
 				bit_clear (avail_node_bitmap, node_inx);
 				state_val = node_ptr->node_state |
 					NODE_STATE_FAIL;
@@ -1147,13 +1212,33 @@ int update_node ( update_node_msg_t * update_node_msg )
 						acct_db_conn, 
 						slurmctld_cluster_name,
 						node_ptr, now, NULL);
-			}
-			else if (state_val == NODE_STATE_NO_RESPOND) {
+			} else if (state_val == NODE_STATE_POWER_SAVE) {
+				if (node_ptr->node_state &
+				    NODE_STATE_POWER_SAVE) {
+					verbose("node %s already powered down",
+						this_node_name);
+				} else {
+					node_ptr->last_idle = 0;
+					info("powering down node %s",
+					     this_node_name);
+				}
+				continue;
+			} else if (state_val == NODE_STATE_POWER_UP) {
+				if (!(node_ptr->node_state &
+				    NODE_STATE_POWER_SAVE)) {
+					verbose("node %s already powered up",
+						this_node_name);
+				} else {
+					node_ptr->last_idle = now;
+					info("powering up node %s",
+					     this_node_name);
+				}
+				continue;
+			} else if (state_val == NODE_STATE_NO_RESPOND) {
 				node_ptr->node_state |= NODE_STATE_NO_RESPOND;
 				state_val = base_state;
 				bit_clear(avail_node_bitmap, node_inx);
-			}
-			else {
+			} else {
 				info ("Invalid node state specified %u", 
 					state_val);
 				err_code = 1;
@@ -1189,6 +1274,18 @@ int update_node ( update_node_msg_t * update_node_msg )
 			update_node_msg->node_names, 
 			update_node_msg->features);
 	}
+	
+
+	/* Update weight. Weight is part of config_ptr, 
+	 * hence do the splitting if required */
+	if ((error_code == 0) && (update_node_msg->weight != NO_VAL))	{
+		error_code = _update_node_weight(update_node_msg->node_names,
+						 update_node_msg->weight);
+		if (!error_code) 
+			/* sort config_list by weight for scheduling */
+			list_sort(config_list, &list_compare_config);
+
+	}
 
 	return error_code;
 }
@@ -1238,6 +1335,88 @@ extern void restore_node_features(void)
 	}
 }
 
+/*
+ * _update_node_weight - Update weight associated with nodes
+ *	build new config list records as needed
+ * IN node_names - List of nodes to update
+ * IN weight - New weight value
+ * RET: SLURM_SUCCESS or error code
+ */
+static int _update_node_weight(char *node_names, uint32_t weight)
+{
+	bitstr_t *node_bitmap = NULL, *tmp_bitmap;
+	ListIterator config_iterator;
+	struct config_record *config_ptr, *new_config_ptr;
+	struct config_record *first_new = NULL;
+	int rc, config_cnt, tmp_cnt;
+
+	rc = node_name2bitmap(node_names, false, &node_bitmap);
+	if (rc) {
+		info("_update_node_weight: invalid node_name");
+		return rc;
+	}
+
+	/* For each config_record with one of these nodes, 
+	 * update it (if all nodes updated) or split it into 
+	 * a new entry */
+	config_iterator = list_iterator_create(config_list);
+	if (config_iterator == NULL)
+		fatal("list_iterator_create malloc failure");
+	while ((config_ptr = (struct config_record *)
+			list_next(config_iterator))) {
+		if (config_ptr == first_new)
+			break;	/* done with all original records */
+
+		tmp_bitmap = bit_copy(node_bitmap);
+		bit_and(tmp_bitmap, config_ptr->node_bitmap);
+		config_cnt = bit_set_count(config_ptr->node_bitmap);
+		tmp_cnt = bit_set_count(tmp_bitmap);
+		if (tmp_cnt == 0) {
+			/* no overlap, leave alone */
+		} else if (tmp_cnt == config_cnt) {
+			/* all nodes changed, update in situ */
+			config_ptr->weight = weight;
+		} else {
+			/* partial update, split config_record */
+			new_config_ptr = create_config_record();
+			if (first_new == NULL);
+				first_new = new_config_ptr;
+			new_config_ptr->magic       = config_ptr->magic;
+			new_config_ptr->cpus        = config_ptr->cpus;
+			new_config_ptr->sockets     = config_ptr->sockets;
+			new_config_ptr->cores       = config_ptr->cores;
+			new_config_ptr->threads     = config_ptr->threads;
+			new_config_ptr->real_memory = config_ptr->real_memory;
+			new_config_ptr->tmp_disk    = config_ptr->tmp_disk;
+			/* Change weight for the given node */
+			new_config_ptr->weight      = weight;
+			if (config_ptr->feature) {
+				new_config_ptr->feature = xstrdup(config_ptr->
+								  feature);
+			}
+			build_config_feature_array(new_config_ptr);
+			new_config_ptr->node_bitmap = bit_copy(tmp_bitmap);
+			new_config_ptr->nodes = 
+				bitmap2node_name(tmp_bitmap);
+			_update_config_ptr(tmp_bitmap, new_config_ptr);
+
+			/* Update remaining records */ 
+			bit_not(tmp_bitmap);
+			bit_and(config_ptr->node_bitmap, tmp_bitmap);
+			xfree(config_ptr->nodes);
+			config_ptr->nodes = bitmap2node_name(
+				config_ptr->node_bitmap);
+		}
+		bit_free(tmp_bitmap);
+	}
+	list_iterator_destroy(config_iterator);
+	bit_free(node_bitmap);
+ 
+	info("_update_node_weight: nodes %s weight set to: %u",
+		node_names, weight);
+	return SLURM_SUCCESS;
+}
+
 /*
  * _update_node_features - Update features associated with nodes
  *	build new config list records as needed
@@ -1417,6 +1596,8 @@ static bool _valid_node_state_change(uint16_t old, uint16_t new)
 		case NODE_STATE_DRAIN:
 		case NODE_STATE_FAIL:
 		case NODE_STATE_NO_RESPOND:
+		case NODE_STATE_POWER_SAVE:
+		case NODE_STATE_POWER_UP:
 			return true;
 			break;
 
@@ -1424,6 +1605,7 @@ static bool _valid_node_state_change(uint16_t old, uint16_t new)
 			if (base_state == NODE_STATE_UNKNOWN)
 				return false;
 			if ((base_state == NODE_STATE_DOWN)
+			||  (base_state == NODE_STATE_FUTURE)
 			||  (node_flags & NODE_STATE_DRAIN)
 			||  (node_flags & NODE_STATE_FAIL))
 				return true;
@@ -1462,6 +1644,7 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 	char *reason_down = NULL;
 	uint16_t base_state, node_flags;
 	time_t now = time(NULL);
+	static uint32_t cr_flag = NO_VAL, gang_flag = NO_VAL;
 
 	node_ptr = find_node_record (reg_msg->node_name);
 	if (node_ptr == NULL)
@@ -1502,16 +1685,45 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 	}
 	node_ptr->threads = reg_msg->threads;
 #else
+	if (cr_flag == NO_VAL) {
+		cr_flag = 0;  /* call is no-op for select/linear and bluegene */
+		if (select_g_get_info_from_plugin(SELECT_CR_PLUGIN,
+						  NULL, &cr_flag)) {
+			cr_flag = NO_VAL;	/* error */
+		}
+	}
+	if (gang_flag == NO_VAL) {
+		char *sched_type = slurm_get_sched_type();
+		if (strcmp(sched_type, "sched/gang"))
+			gang_flag = 0;
+		else
+			gang_flag = 1;
+		xfree(sched_type);
+	}
+
 	if (slurmctld_conf.fast_schedule != 2) {
-		int tot1, tot2;
-		tot1 = reg_msg->sockets * reg_msg->cores * reg_msg->threads;
-		tot2 = config_ptr->sockets * config_ptr->cores *
-			config_ptr->threads;
-		if (tot1 < tot2) {
+		int cores1, cores2;	/* total cores on node */
+		int threads1, threads2;	/* total threads on node */
+		cores1 = reg_msg->sockets * reg_msg->cores;
+		threads1 = cores1 * reg_msg->threads;
+		cores2 = config_ptr->sockets * config_ptr->cores;
+		threads2 = cores2 * config_ptr->threads;
+		if (threads1 < threads2) {
 			error("Node %s has low socket*core*thread count %u",
-				reg_msg->node_name, tot1);
+				reg_msg->node_name, threads1);
 			error_code = EINVAL;
 			reason_down = "Low socket*core*thread count";
+		} else if ((slurmctld_conf.fast_schedule == 0) &&
+			   ((cr_flag == 1) || (gang_flag == 1)) && 
+			   ((cores1 > cores2) || (threads1 > threads2))) {
+			error("Node %s has high socket*core*thread count %u, "
+			      "extra resources ignored", 
+			      reg_msg->node_name, threads1);
+			/* Preserve configured values */
+			reg_msg->cpus    = config_ptr->cpus;
+			reg_msg->sockets = config_ptr->sockets;
+			reg_msg->cores   = config_ptr->cores;
+			reg_msg->threads = config_ptr->threads;
 		}
 	}
 	node_ptr->sockets = reg_msg->sockets;
@@ -1519,15 +1731,24 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 	node_ptr->threads = reg_msg->threads;
 #endif
 
-	if ((slurmctld_conf.fast_schedule != 2)
-	&&  (reg_msg->cpus < config_ptr->cpus)) {
-		error ("Node %s has low cpu count %u", 
-			reg_msg->node_name, reg_msg->cpus);
-		error_code  = EINVAL;
-		reason_down = "Low CPUs";
+	if (slurmctld_conf.fast_schedule != 2) {
+		if (reg_msg->cpus < config_ptr->cpus) {
+			error ("Node %s has low cpu count %u", 
+				reg_msg->node_name, reg_msg->cpus);
+			error_code  = EINVAL;
+			reason_down = "Low CPUs";
+		} else if ((slurmctld_conf.fast_schedule == 0) &&
+			   ((cr_flag == 1) || (gang_flag == 1)) &&
+			   (reg_msg->cpus > config_ptr->cpus)) {
+			error("Node %s has high CPU count %u, "
+			      "extra resources ignored",
+			      reg_msg->node_name, reg_msg->cpus);
+			reg_msg->cpus    = config_ptr->cpus;
+		}
 	}
-	if ((node_ptr->cpus != reg_msg->cpus)
-	&&  (slurmctld_conf.fast_schedule == 0)) {
+
+	if ((node_ptr->cpus != reg_msg->cpus) &&
+	    (slurmctld_conf.fast_schedule == 0)) {
 		for (i=0; i<node_ptr->part_cnt; i++) {
 			node_ptr->part_pptr[i]->total_cpus += 
 				(reg_msg->cpus - node_ptr->cpus);
@@ -1535,8 +1756,8 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 	}
 	node_ptr->cpus = reg_msg->cpus;
 
-	if ((slurmctld_conf.fast_schedule != 2) 
-	&&  (reg_msg->real_memory < config_ptr->real_memory)) {
+	if ((slurmctld_conf.fast_schedule != 2) &&
+	    (reg_msg->real_memory < config_ptr->real_memory)) {
 		error ("Node %s has low real_memory size %u", 
 		       reg_msg->node_name, reg_msg->real_memory);
 		error_code  = EINVAL;
@@ -1544,8 +1765,8 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 	}
 	node_ptr->real_memory = reg_msg->real_memory;
 
-	if ((slurmctld_conf.fast_schedule != 2)
-	&&  (reg_msg->tmp_disk < config_ptr->tmp_disk)) {
+	if ((slurmctld_conf.fast_schedule != 2) &&
+	    (reg_msg->tmp_disk < config_ptr->tmp_disk)) {
 		error ("Node %s has low tmp_disk size %u",
 		       reg_msg->node_name, reg_msg->tmp_disk);
 		error_code = EINVAL;
@@ -1575,7 +1796,6 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg)
 		}
 		last_node_update = time (NULL);
 		set_node_down(reg_msg->node_name, reason_down);
-		_sync_bitmaps(node_ptr, reg_msg->job_count);
 	} else if (reg_msg->status == ESLURMD_PROLOG_FAILED) {
 		if ((node_flags & (NODE_STATE_DRAIN | NODE_STATE_FAIL)) == 0) {
 #ifdef HAVE_BG
@@ -2008,8 +2228,15 @@ void node_not_resp (char *name, time_t msg_time)
 
 	for (i=0; i<node_record_count; i++) {
 		node_ptr = node_record_table_ptr + i;
-		node_ptr->not_responding = true;
+		if ((node_ptr->node_state & NODE_STATE_BASE)
+		    != NODE_STATE_DOWN) {
+			node_ptr->not_responding = true;
+			bit_clear (avail_node_bitmap, i);
+			node_ptr->node_state |= NODE_STATE_NO_RESPOND;
+			last_node_update = time(NULL);
+		}
 	}
+			
 #else
 	node_ptr = find_node_record (name);
 	if (node_ptr == NULL) {
@@ -2105,7 +2332,8 @@ void set_node_down (char *name, char *reason)
 		xstrcat(node_ptr->reason, time_buf);
 	}
 	_make_node_down(node_ptr, now);
-	(void) kill_running_job_by_node_name(name, false);
+	(void) kill_running_job_by_node_name(name);
+	_sync_bitmaps(node_ptr, 0);
 
 	return;
 }
@@ -2205,7 +2433,8 @@ void msg_to_slurmd (slurm_msg_type_t msg_type)
 	int i;
 	shutdown_msg_t *shutdown_req;
 	agent_arg_t *kill_agent_args;
-	
+	uint16_t base_state;
+
 	kill_agent_args = xmalloc (sizeof (agent_arg_t));
 	kill_agent_args->msg_type = msg_type;
 	kill_agent_args->retry = 0;
@@ -2217,6 +2446,9 @@ void msg_to_slurmd (slurm_msg_type_t msg_type)
 	}
 
 	for (i = 0; i < node_record_count; i++) {
+		base_state = node_record_table_ptr[i].node_state;
+		if (base_state == NODE_STATE_FUTURE)
+			continue;
 		hostlist_push(kill_agent_args->hostlist, 
 			      node_record_table_ptr[i].name);
 		kill_agent_args->node_count++;
@@ -2357,9 +2589,8 @@ void make_node_idle(struct node_record *node_ptr,
 	time_t now = time(NULL);
 	
 	xassert(node_ptr);
-	if (job_ptr			/* Specific job completed */
-	&&  (job_ptr->job_state & JOB_COMPLETING)	/* Not a replay */
-	&&  (bit_test(job_ptr->node_bitmap, inx))) {	/* Not a replay */
+	if (job_ptr &&			/* Specific job completed */
+	    (bit_test(job_ptr->node_bitmap, inx))) {	/* Not a replay */
 		last_job_update = now;
 		bit_clear(job_ptr->node_bitmap, inx);
 		if (job_ptr->node_cnt) {
@@ -2452,6 +2683,7 @@ void node_fini(void)
 
 	FREE_NULL_BITMAP(idle_node_bitmap);
 	FREE_NULL_BITMAP(avail_node_bitmap);
+	FREE_NULL_BITMAP(power_node_bitmap);
 	FREE_NULL_BITMAP(share_node_bitmap);
 	FREE_NULL_BITMAP(up_node_bitmap);
 
@@ -2490,10 +2722,11 @@ extern int send_nodes_to_accounting(time_t event_time)
 }
 
 /* Given a config_record, clear any existing feature_array and
- * if feature is set, then rebuild feature_array */
+ * if feature is set, then rebuild feature_array
+ * Filter out any white-space from the feature string */
 extern void  build_config_feature_array(struct config_record *config_ptr)
 {
-	int i;
+	int i, j;
 	char *tmp_str, *token, *last = NULL;
 
 	/* clear any old feature_array */
@@ -2506,7 +2739,13 @@ extern void  build_config_feature_array(struct config_record *config_ptr)
 	if (config_ptr->feature) {
 		i = strlen(config_ptr->feature) + 1;	/* oversized */
 		config_ptr->feature_array = xmalloc(i * sizeof(char *));
-		tmp_str = xstrdup(config_ptr->feature);
+		tmp_str = xmalloc(i);
+		for (i=0, j=0; config_ptr->feature[i]; i++) {
+			if (!isspace(config_ptr->feature[i]))
+				tmp_str[j++] = config_ptr->feature[i];
+		}
+		if (i != j)
+			strcpy(config_ptr->feature, tmp_str);
 		i = 0;
 		token = strtok_r(tmp_str, ",", &last);
 		while (token) {
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index 7fb6d043d72674cefe2495f035d59da378c3fda4..f0fd311378ecc1d0b1d656d69d08a5e70651255d 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -3,13 +3,14 @@
  *	Note: there is a global node table (node_record_table_ptr) 
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -65,8 +66,11 @@
 
 #include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/basil_interface.h"
+#include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/node_scheduler.h"
+#include "src/slurmctld/reservation.h"
 #include "src/slurmctld/sched_plugin.h"
 #include "src/slurmctld/slurmctld.h"
 
@@ -74,7 +78,7 @@
 #define MAX_RETRIES   10
 
 struct node_set {		/* set of nodes with same configuration */
-	uint32_t cpus_per_node;	/* NOTE: This is the minimum count,
+	uint16_t cpus_per_node;	/* NOTE: This is the minimum count,
 				 * if FastSchedule==0 then individual 
 				 * nodes within the same configuration 
 				 * line (in slurm.conf) can actually 
@@ -103,8 +107,11 @@ static int _pick_best_nodes(struct node_set *node_set_ptr,
 			    struct part_record *part_ptr,
 			    uint32_t min_nodes, uint32_t max_nodes,
 			    uint32_t req_nodes, bool test_only);
+static void _reset_feature_counts(struct job_details *details_ptr);
+static bool _valid_feature_counts(struct job_details *details_ptr);
 static bitstr_t *_valid_features(struct job_details *detail_ptr, 
-				 struct config_record *config_ptr);
+				 struct config_record *config_ptr,
+				 bool update_count);
 
 
 /*
@@ -157,7 +164,12 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout,
 		error("slurm_sched_freealloc(%u): %m", job_ptr->job_id);
 	if (select_g_job_fini(job_ptr) != SLURM_SUCCESS)
 		error("select_g_job_fini(%u): %m", job_ptr->job_id);
-	
+	(void) epilog_slurmctld(job_ptr);
+
+#ifdef HAVE_CRAY_XT
+	basil_release(job_ptr);
+#endif /* HAVE_CRAY_XT */
+
 	agent_args = xmalloc(sizeof(agent_arg_t));
 	if (timeout)
 		agent_args->msg_type = REQUEST_KILL_TIMELIMIT;
@@ -277,6 +289,13 @@ static int _match_feature(char *seek, struct node_set *node_set_ptr)
  *	(uint16_t)NO_VAL	= default
  *	0			= exclusive
  *	1			= share=yes
+ *
+ * Return values:
+ *	0 = no sharing
+ *	1 = user requested sharing
+ *	2 = sharing enforced (either by partition or cons_res)
+ * (cons_res plugin needs to distinguish between "enforced" and
+ *  "requested" sharing)
  */
 static int
 _resolve_shared_status(uint16_t user_flag, uint16_t part_max_share,
@@ -287,13 +306,15 @@ _resolve_shared_status(uint16_t user_flag, uint16_t part_max_share,
 		return 0;
 	/* sharing if part=FORCE */
 	if (part_max_share & SHARED_FORCE)
-		return 1;
+		return 2;
 
 	if (cons_res_flag) {
 		/* sharing unless user requested exclusive */
 		if (user_flag == 0)
 			return 0;
-		return 1;
+		if (user_flag == 1)
+			return 1;
+		return 2;
 	} else {
 		/* no sharing if part=NO */
 		if (part_max_share == 1)
@@ -323,10 +344,31 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 	uint32_t saved_min_nodes, saved_job_min_nodes;
 	bitstr_t *saved_req_node_bitmap = NULL;
 	uint32_t saved_num_procs, saved_req_nodes;
-	int tmp_node_set_size;
+	int rc, tmp_node_set_size;
 	struct node_set *tmp_node_set_ptr;
 	int error_code = SLURM_SUCCESS, i;
 	bitstr_t *feature_bitmap, *accumulate_bitmap = NULL;
+	bitstr_t *save_avail_node_bitmap = NULL, *resv_bitmap;
+
+	/* Mark nodes reserved for other jobs as off limit for this job */
+	if (job_ptr->resv_name == NULL) {
+		time_t start_res = time(NULL);
+		rc = job_test_resv(job_ptr, &start_res, false, &resv_bitmap);
+		if ((rc != SLURM_SUCCESS) ||
+		    (job_ptr->details->req_node_bitmap &&
+		     (!bit_super_set(job_ptr->details->req_node_bitmap,
+				     resv_bitmap)))) {
+			FREE_NULL_BITMAP(resv_bitmap);
+			return ESLURM_NODES_BUSY;	/* reserved */
+		}
+		if (resv_bitmap &&
+		    (!bit_equal(resv_bitmap, avail_node_bitmap))) {
+			bit_and(resv_bitmap, avail_node_bitmap);
+			save_avail_node_bitmap = avail_node_bitmap;
+			avail_node_bitmap = resv_bitmap;
+		} else
+			FREE_NULL_BITMAP(resv_bitmap);
+	}
 
 	/* save job and request state */
 	saved_min_nodes = min_nodes;
@@ -348,7 +390,8 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 	    (job_ptr->details->req_node_layout == NULL)) {
 		ListIterator feat_iter;
 		struct feature_record *feat_ptr;
-		feat_iter = list_iterator_create(job_ptr->details->feature_list);
+		feat_iter = list_iterator_create(
+				job_ptr->details->feature_list);
 		while ((feat_ptr = (struct feature_record *)
 				list_next(feat_iter))) {
 			if (feat_ptr->count == 0)
@@ -361,9 +404,11 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 				if (!_match_feature(feat_ptr->name, 
 						    node_set_ptr+i))
 					continue;
-				tmp_node_set_ptr[tmp_node_set_size].cpus_per_node =
+				tmp_node_set_ptr[tmp_node_set_size].
+					cpus_per_node =
 					node_set_ptr[i].cpus_per_node;
-				tmp_node_set_ptr[tmp_node_set_size].real_memory =
+				tmp_node_set_ptr[tmp_node_set_size].
+					real_memory =
 					node_set_ptr[i].real_memory;
 				tmp_node_set_ptr[tmp_node_set_size].nodes =
 					node_set_ptr[i].nodes;
@@ -371,11 +416,13 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 					node_set_ptr[i].weight;
 				tmp_node_set_ptr[tmp_node_set_size].features = 
 					xstrdup(node_set_ptr[i].features);
-				tmp_node_set_ptr[tmp_node_set_size].feature_array =
+				tmp_node_set_ptr[tmp_node_set_size].
+					feature_array =
 					node_set_ptr[i].feature_array;
-				tmp_node_set_ptr[tmp_node_set_size].feature_bits = 
+				tmp_node_set_ptr[tmp_node_set_size].
+					feature_bits = 
 					bit_copy(node_set_ptr[i].feature_bits);
-				tmp_node_set_ptr[tmp_node_set_size].my_bitmap = 
+				tmp_node_set_ptr[tmp_node_set_size].my_bitmap =
 					bit_copy(node_set_ptr[i].my_bitmap);
 				tmp_node_set_size++;
 			}
@@ -391,29 +438,34 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 #if 0
 {
 			char *tmp_str = bitmap2node_name(feature_bitmap);
-			info("job %u needs %u nodes with feature %s, using %s", 
-				job_ptr->job_id, feat_ptr->count, 
-				feat_ptr->name, tmp_str);
+			info("job %u needs %u nodes with feature %s, "
+			     "using %s, error_code=%d", 
+			     job_ptr->job_id, feat_ptr->count, 
+			     feat_ptr->name, tmp_str, error_code);
 			xfree(tmp_str);
 }
 #endif
 			for (i=0; i<tmp_node_set_size; i++) {
 				xfree(tmp_node_set_ptr[i].features);
-				FREE_NULL_BITMAP(tmp_node_set_ptr[i].feature_bits);
-				FREE_NULL_BITMAP(tmp_node_set_ptr[i].my_bitmap);
+				FREE_NULL_BITMAP(tmp_node_set_ptr[i].
+						 feature_bits);
+				FREE_NULL_BITMAP(tmp_node_set_ptr[i].
+						 my_bitmap);
 			}
 			if (error_code != SLURM_SUCCESS)
 				break;
 			if (feature_bitmap) {
 				if (job_ptr->details->req_node_bitmap) {
-					bit_or(job_ptr->details->req_node_bitmap,
+					bit_or(job_ptr->details->
+					       req_node_bitmap,
 					       feature_bitmap);
 				} else {
 					job_ptr->details->req_node_bitmap =
 						bit_copy(feature_bitmap);
 				}
 				if (accumulate_bitmap) {
-					bit_or(accumulate_bitmap, feature_bitmap);
+					bit_or(accumulate_bitmap, 
+					       feature_bitmap);
 					bit_free(feature_bitmap);
 				} else
 					accumulate_bitmap = feature_bitmap;
@@ -479,6 +531,12 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 	job_ptr->num_procs = saved_num_procs;
 	job_ptr->details->min_nodes = saved_job_min_nodes;
 
+	/* Restore available node bitmap, ignoring reservations */
+	if (save_avail_node_bitmap) {
+		bit_free(avail_node_bitmap);
+		avail_node_bitmap = save_avail_node_bitmap;
+	}
+
 	return error_code;
 }
 
@@ -534,6 +592,8 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 	bool runable_avail = false;	/* Job can run with available nodes */
 	bool tried_sched = false;	/* Tried to schedule with avail nodes */
 	static uint32_t cr_enabled = NO_VAL;
+	static bool sched_gang_test = false;
+	static bool sched_gang = false;
 	select_type_plugin_info_t cr_type = SELECT_TYPE_INFO_NONE; 
 	int shared = 0, select_mode;
 
@@ -551,7 +611,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 	if (cr_enabled == NO_VAL) {
 		cr_enabled = 0;	/* select/linear and bluegene are no-ops */
 		error_code = select_g_get_info_from_plugin (SELECT_CR_PLUGIN, 
-							    &cr_enabled);
+							    NULL, &cr_enabled);
 		if (error_code != SLURM_SUCCESS) {
 			cr_enabled = NO_VAL;
 			return error_code;
@@ -569,23 +629,19 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 
 		cr_type = (select_type_plugin_info_t) slurmctld_conf.
 							select_type_param;
-                debug3("Job %u shared %d cr_enabled %d CR type %d num_procs %d", 
-		     job_ptr->job_id, shared, cr_enabled, cr_type, 
-		     job_ptr->num_procs);
 
-		if (shared == 0) {
-			partially_idle_node_bitmap = bit_copy(idle_node_bitmap);
-		} else {
-			/* Update partially_idle_node_bitmap to reflect the
-			 * idle and partially idle nodes */
-			error_code = select_g_get_info_from_plugin (
-					SELECT_BITMAP, 
-					&partially_idle_node_bitmap);
-			if (error_code != SLURM_SUCCESS) {
-				FREE_NULL_BITMAP(partially_idle_node_bitmap);
-				return error_code;
-			}
+		/* Set the partially_idle_node_bitmap to reflect the
+		 * idle and partially idle nodes */
+		error_code = select_g_get_info_from_plugin (SELECT_BITMAP,
+					job_ptr, &partially_idle_node_bitmap);
+		if (error_code != SLURM_SUCCESS) {
+			FREE_NULL_BITMAP(partially_idle_node_bitmap);
+			return error_code;
 		}
+                debug3("Job %u shared %d CR type %d num_procs %d nbits %d", 
+		     job_ptr->job_id, shared, cr_enabled, cr_type, 
+		     job_ptr->num_procs,
+		     bit_set_count(partially_idle_node_bitmap));
         }
 
 	if (job_ptr->details->req_node_bitmap) {  /* specific nodes required */
@@ -616,17 +672,33 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				return ESLURM_NODES_BUSY;
 			}
 		}
-		if (shared) {
-			if (!bit_super_set(job_ptr->details->req_node_bitmap, 
-					   share_node_bitmap)) {
-				FREE_NULL_BITMAP(partially_idle_node_bitmap);
-				return ESLURM_NODES_BUSY;
-			}
-		} else {
-			if (!bit_super_set(job_ptr->details->req_node_bitmap, 
-					   idle_node_bitmap)) {
-				FREE_NULL_BITMAP(partially_idle_node_bitmap);
-				return ESLURM_NODES_BUSY;
+		/* If preemption is available via sched/gang, then
+		 * do NOT limit the set of available nodes by their
+		 * current 'sharable' or 'idle' setting */
+		if (!sched_gang_test) {
+			char *sched_type = slurm_get_sched_type();
+			if (strcmp(sched_type, "sched/gang") == 0)
+				sched_gang = true;
+			xfree(sched_type);
+			sched_gang_test = true;
+		}
+		if (!sched_gang) {
+			if (shared) {
+				if (!bit_super_set(job_ptr->details->
+						   req_node_bitmap, 
+						   share_node_bitmap)) {
+					FREE_NULL_BITMAP(
+						partially_idle_node_bitmap);
+					return ESLURM_NODES_BUSY;
+				}
+			} else {
+				if (!bit_super_set(job_ptr->details->
+						   req_node_bitmap, 
+						   idle_node_bitmap)) {
+					FREE_NULL_BITMAP(
+						partially_idle_node_bitmap);
+					return ESLURM_NODES_BUSY;
+				}
 			}
 		}
 
@@ -648,6 +720,9 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 			max_feature = j;
 	}
 
+	debug3("_pick_best_nodes: job %u idle_nodes %u share_nodes %u",
+		job_ptr->job_id, bit_set_count(idle_node_bitmap),
+		bit_set_count(share_node_bitmap));
 	/* Accumulate resources for this job based upon its required 
 	 * features (possibly with node counts). */
 	for (j = min_feature; j <= max_feature; j++) {
@@ -655,9 +730,10 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 			if (!bit_test(node_set_ptr[i].feature_bits, j))
 				continue;
 
-			if (total_bitmap)
-				bit_or(total_bitmap, node_set_ptr[i].my_bitmap);
-			else {
+			if (total_bitmap) {
+				bit_or(total_bitmap, 
+				       node_set_ptr[i].my_bitmap);
+			} else {
 				total_bitmap = bit_copy(
 						node_set_ptr[i].my_bitmap);
 				if (total_bitmap == NULL)
@@ -669,16 +745,29 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				bit_and(node_set_ptr[i].my_bitmap,
 					partially_idle_node_bitmap);
 			}
-			if (shared) {
-				bit_and(node_set_ptr[i].my_bitmap,
-					share_node_bitmap);
-			} else {
-				bit_and(node_set_ptr[i].my_bitmap,
-					idle_node_bitmap);
+			/* If preemption is available via sched/gang, then
+			 * do NOT limit the set of available nodes by their
+			 * current 'sharable' or 'idle' setting */
+			if (!sched_gang_test) {
+				char *sched_type = slurm_get_sched_type();
+				if (strcmp(sched_type, "sched/gang") == 0)
+					sched_gang = true;
+				xfree(sched_type);
+				sched_gang_test = true;
 			}
-			if (avail_bitmap)
-				bit_or(avail_bitmap, node_set_ptr[i].my_bitmap);
-			else {
+			if (!sched_gang) {
+				if (shared) {
+					bit_and(node_set_ptr[i].my_bitmap,
+						share_node_bitmap);
+				} else {
+					bit_and(node_set_ptr[i].my_bitmap,
+						idle_node_bitmap);
+				}
+			}
+			if (avail_bitmap) {
+				bit_or(avail_bitmap, 
+				       node_set_ptr[i].my_bitmap);
+			} else {
 				avail_bitmap = bit_copy(
 					node_set_ptr[i].my_bitmap);
 				if (avail_bitmap == NULL)
@@ -688,7 +777,8 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 			tried_sched = false;	/* need to test these nodes */
 
 			if (shared && ((i+1) < node_set_size) && 
-			    (node_set_ptr[i].weight == node_set_ptr[i+1].weight)) {
+			    (node_set_ptr[i].weight == 
+			     node_set_ptr[i+1].weight)) {
 				/* Keep accumulating so we can pick the
 				 * most lightly loaded nodes */
 				continue;
@@ -782,11 +872,11 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 					fatal("bit_copy malloc failure");
 				bit_and(avail_bitmap, avail_node_bitmap);
 				pick_code = select_g_job_test(job_ptr, 
-							      avail_bitmap, 
-							      min_nodes, 
-							      max_nodes,
-							      req_nodes,
-							      SELECT_MODE_TEST_ONLY);
+						avail_bitmap, 
+						min_nodes, 
+						max_nodes,
+						req_nodes,
+						SELECT_MODE_TEST_ONLY);
 				if (pick_code == SLURM_SUCCESS) {
 					runable_ever  = true;
 					if (bit_set_count(avail_bitmap) <=
@@ -799,11 +889,11 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 			}
 			if (!runable_ever) {
 				pick_code = select_g_job_test(job_ptr, 
-							      total_bitmap, 
-							      min_nodes, 
-							      max_nodes,
-							      req_nodes, 
-							      SELECT_MODE_TEST_ONLY);
+						total_bitmap, 
+						min_nodes, 
+						max_nodes,
+						req_nodes, 
+						SELECT_MODE_TEST_ONLY);
 				if (pick_code == SLURM_SUCCESS) {
 					FREE_NULL_BITMAP(possible_bitmap);
 					possible_bitmap = total_bitmap;
@@ -824,7 +914,8 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 		error_code = ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE;
 	if (!runable_ever) {
 		error_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
-		info("_pick_best_nodes: job %u never runnable", job_ptr->job_id);
+		info("_pick_best_nodes: job %u never runnable", 
+		     job_ptr->job_id);
 	}
 
 	if (error_code == SLURM_SUCCESS) {
@@ -898,6 +989,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		 fail_reason = WAIT_PART_NODE_LIMIT;
 	if (fail_reason != WAIT_NO_REASON) {
 		job_ptr->state_reason = fail_reason;
+		xfree(job_ptr->state_desc);
 		last_job_update = now;
 		if (job_ptr->priority == 0)	/* user/admin hold */
 			return ESLURM_JOB_HELD;
@@ -955,11 +1047,16 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 			debug3("JobId=%u not runnable with present config",
 			       job_ptr->job_id);
 			job_ptr->state_reason = WAIT_PART_NODE_LIMIT;
+			xfree(job_ptr->state_desc);
 			if (job_ptr->priority != 0)  /* Move to end of queue */
 				job_ptr->priority = 1;
 			last_job_update = now;
+		} else if (error_code == ESLURM_RESERVATION_NOT_USABLE) {
+			job_ptr->state_reason = WAIT_RESERVATION;
+			xfree(job_ptr->state_desc);
 		} else {
 			job_ptr->state_reason = WAIT_RESOURCES;
+			xfree(job_ptr->state_desc);
 			if (error_code == ESLURM_NODES_BUSY)
 				slurm_sched_job_is_pending();
 		}
@@ -971,6 +1068,15 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		goto cleanup;
 	}
 
+#ifdef HAVE_CRAY_XT
+	if (basil_reserve(job_ptr) != SLURM_SUCCESS) {
+		job_ptr->state_reason = WAIT_RESOURCES;
+		xfree(job_ptr->state_desc);
+		error_code = ESLURM_NODES_BUSY;
+		goto cleanup;
+	}
+#endif	/* HAVE_CRAY_XT */
+
 	/* This job may be getting requeued, clear vestigial 
 	 * state information before over-writting and leaking 
 	 * memory. */
@@ -990,8 +1096,8 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 				    (365 * 24 * 60 * 60); /* secs in year */
 	else
 		job_ptr->end_time = job_ptr->start_time + 
-				    (job_ptr->time_limit * 60);   /* secs */
-
+			(job_ptr->time_limit * 60);   /* secs */
+	
 	if (select_g_job_begin(job_ptr) != SLURM_SUCCESS) {
 		/* Leave job queued, something is hosed */
 		error("select_g_job_begin(%u): %m", job_ptr->job_id);
@@ -1004,6 +1110,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 
 	/* assign the nodes and stage_in the job */
 	job_ptr->state_reason = WAIT_NO_REASON;
+	xfree(job_ptr->state_desc);
 	job_ptr->nodes = bitmap2node_name(select_bitmap);
 	select_bitmap = NULL;	/* nothing left to free */
 	allocate_nodes(job_ptr);
@@ -1018,9 +1125,9 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 
 	acct_policy_job_begin(job_ptr);
 
-	jobacct_storage_g_job_start(
-		acct_db_conn, slurmctld_cluster_name, job_ptr);
-
+	jobacct_storage_g_job_start(acct_db_conn, slurmctld_cluster_name, 
+				    job_ptr);
+	prolog_slurmctld(job_ptr);
 	slurm_sched_newalloc(job_ptr);
 
       cleanup:
@@ -1039,11 +1146,48 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	return error_code;
 }
 
+/* Clear tmp_cnt for all features of given job */
+static void _reset_feature_counts(struct job_details *details_ptr)
+{
+	ListIterator feat_iter;
+	struct feature_record *feat_ptr;
+
+	if (details_ptr->feature_list == NULL)	/* no constraints */
+		return;
+
+	feat_iter = list_iterator_create(details_ptr->feature_list);
+	while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) {
+		feat_ptr->tmp_cnt = 0;
+	}
+	list_iterator_destroy(feat_iter);
+}
+
+/* Verify that tmp_cnt >= count for all features of given job */
+static bool _valid_feature_counts(struct job_details *details_ptr)
+{
+	ListIterator feat_iter;
+	struct feature_record *feat_ptr;
+	bool result = true;
+
+	if (details_ptr->feature_list == NULL)	/* no constraints */
+		return result;
+
+	feat_iter = list_iterator_create(details_ptr->feature_list);
+	while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) {
+		if (feat_ptr->tmp_cnt >= feat_ptr->count)
+			continue;
+		result = false;
+		break;
+	}
+	list_iterator_destroy(feat_iter);
+	return result;
+}
+
 /*
  * job_req_node_filter - job reqeust node filter.
  *	clear from a bitmap the nodes which can not be used for a job
  *	test memory size, required features, processor count, etc.
- * NOTE: Does not support exclusive OR of features or feature counts.
+ * NOTE: Does not support exclusive OR of features.
  *	It just matches first element of XOR and ignores count.
  * IN job_ptr - pointer to node to be scheduled
  * IN/OUT bitmap - set of nodes being considered for use
@@ -1065,23 +1209,27 @@ extern int job_req_node_filter(struct job_record *job_ptr,
 		return EINVAL;
 	}
 
+	_reset_feature_counts(detail_ptr);
 	mc_ptr = detail_ptr->mc_ptr;
 	for (i=0; i< node_record_count; i++) {
 		if (!bit_test(avail_bitmap, i))
 			continue;
 		node_ptr = node_record_table_ptr + i;
 		config_ptr = node_ptr->config_ptr;
-		feature_bitmap = _valid_features(detail_ptr, config_ptr);
-		if ((feature_bitmap == NULL) || (!bit_test(feature_bitmap, 0))) {
+		feature_bitmap = _valid_features(detail_ptr, config_ptr, true);
+		if ((feature_bitmap == NULL) || 
+		    (!bit_test(feature_bitmap, 0))) {
 			bit_clear(avail_bitmap, i);
 			continue;
 		}
 		FREE_NULL_BITMAP(feature_bitmap);
 		if (slurmctld_conf.fast_schedule) {
-			if ((detail_ptr->job_min_procs    > config_ptr->cpus       )
+			if ((detail_ptr->job_min_procs    > 
+			     config_ptr->cpus       )
 			||  ((detail_ptr->job_min_memory & (~MEM_PER_CPU)) > 
 			      config_ptr->real_memory) 
-			||  (detail_ptr->job_min_tmp_disk > config_ptr->tmp_disk)) {
+			||  (detail_ptr->job_min_tmp_disk > 
+			     config_ptr->tmp_disk)) {
 				bit_clear(avail_bitmap, i);
 				continue;
 			}
@@ -1091,15 +1239,18 @@ extern int job_req_node_filter(struct job_record *job_ptr,
 			||   (mc_ptr->min_threads     > config_ptr->threads  )
 			||   (mc_ptr->job_min_sockets > config_ptr->sockets  )
 			||   (mc_ptr->job_min_cores   > config_ptr->cores    )
-			||   (mc_ptr->job_min_threads > config_ptr->threads  ))) {
+			||   (mc_ptr->job_min_threads > 
+			      config_ptr->threads  ))) {
 				bit_clear(avail_bitmap, i);
 				continue;
 			}
 		} else {
-			if ((detail_ptr->job_min_procs    > node_ptr->cpus       )
+			if ((detail_ptr->job_min_procs    > 
+			     node_ptr->cpus       )
 			||  ((detail_ptr->job_min_memory & (~MEM_PER_CPU)) >
 			      node_ptr->real_memory) 
-			||  (detail_ptr->job_min_tmp_disk > node_ptr->tmp_disk)) {
+			||  (detail_ptr->job_min_tmp_disk > 
+			     node_ptr->tmp_disk)) {
 				bit_clear(avail_bitmap, i);
 				continue;
 			}
@@ -1116,6 +1267,10 @@ extern int job_req_node_filter(struct job_record *job_ptr,
 		}
 	}
 	FREE_NULL_BITMAP(feature_bitmap);
+
+	if (!_valid_feature_counts(detail_ptr))
+		return EINVAL;
+
 	return SLURM_SUCCESS;
 }
 
@@ -1133,26 +1288,57 @@ static int _build_node_list(struct job_record *job_ptr,
 			    struct node_set **node_set_pptr,
 			    int *node_set_size)
 {
-	int node_set_inx;
+	int i, node_set_inx, power_cnt, rc;
 	struct node_set *node_set_ptr;
 	struct config_record *config_ptr;
 	struct part_record *part_ptr = job_ptr->part_ptr;
 	ListIterator config_iterator;
 	int check_node_config, config_filter = 0;
 	struct job_details *detail_ptr = job_ptr->details;
-	bitstr_t *exc_node_mask = NULL;
+	bitstr_t *power_up_bitmap = NULL, *usable_node_mask = NULL;
 	multi_core_data_t *mc_ptr = detail_ptr->mc_ptr;
 	bitstr_t *tmp_feature;
+	uint32_t max_weight = 0;
+
+	if (job_ptr->resv_name) {
+		/* Limit node selection to those in selected reservation */
+		time_t start_res = time(NULL);
+		rc = job_test_resv(job_ptr, &start_res, false, &usable_node_mask);
+		if (rc != SLURM_SUCCESS) {
+			job_ptr->state_reason = WAIT_RESERVATION;
+			xfree(job_ptr->state_desc);
+			if (rc == ESLURM_INVALID_TIME_VALUE)
+				return ESLURM_RESERVATION_NOT_USABLE;
+			/* Defunct reservation or accesss denied */
+			return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
+		}
+		if ((detail_ptr->req_node_bitmap) &&
+		    (!bit_super_set(detail_ptr->req_node_bitmap, 
+				    usable_node_mask))) {
+			job_ptr->state_reason = WAIT_RESERVATION;
+			xfree(job_ptr->state_desc);
+			FREE_NULL_BITMAP(usable_node_mask);
+			/* Required nodes outside of the reservation */
+			return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
+		}
+	}
 
 	node_set_inx = 0;
 	node_set_ptr = (struct node_set *) 
 			xmalloc(sizeof(struct node_set) * 2);
 	node_set_ptr[node_set_inx+1].my_bitmap = NULL;
 	if (detail_ptr->exc_node_bitmap) {
-		exc_node_mask = bit_copy(detail_ptr->exc_node_bitmap);
-		if (exc_node_mask == NULL)
-			fatal("bit_copy malloc failure");
-		bit_not(exc_node_mask);
+		if (usable_node_mask) {
+			bit_not(detail_ptr->exc_node_bitmap);
+			bit_and(usable_node_mask, detail_ptr->exc_node_bitmap);
+			bit_not(detail_ptr->exc_node_bitmap);
+		} else {
+			usable_node_mask = 
+				bit_copy(detail_ptr->exc_node_bitmap);
+			if (usable_node_mask == NULL)
+				fatal("bit_copy malloc failure");
+			bit_not(usable_node_mask);
+		}
 	}
 
 	config_iterator = list_iterator_create(config_list);
@@ -1195,9 +1381,9 @@ static int _build_node_list(struct job_record *job_ptr,
 			fatal("bit_copy malloc failure");
 		bit_and(node_set_ptr[node_set_inx].my_bitmap,
 			part_ptr->node_bitmap);
-		if (exc_node_mask) {
+		if (usable_node_mask) {
 			bit_and(node_set_ptr[node_set_inx].my_bitmap,
-				exc_node_mask);
+				usable_node_mask);
 		}
 		node_set_ptr[node_set_inx].nodes =
 			bit_set_count(node_set_ptr[node_set_inx].my_bitmap);
@@ -1211,7 +1397,8 @@ static int _build_node_list(struct job_record *job_ptr,
 			continue;
 		}
 
-		tmp_feature = _valid_features(job_ptr->details, config_ptr);
+		tmp_feature = _valid_features(job_ptr->details, config_ptr, 
+					      false);
 		if (tmp_feature == NULL) {
 			FREE_NULL_BITMAP(node_set_ptr[node_set_inx].my_bitmap);
 			continue;
@@ -1221,9 +1408,10 @@ static int _build_node_list(struct job_record *job_ptr,
 		node_set_ptr[node_set_inx].cpus_per_node =
 			config_ptr->cpus;
 		node_set_ptr[node_set_inx].real_memory =
-			config_ptr->real_memory;		
+			config_ptr->real_memory;
 		node_set_ptr[node_set_inx].weight =
 			config_ptr->weight;
+		max_weight = MAX(max_weight, config_ptr->weight);
 		node_set_ptr[node_set_inx].features = 
 			xstrdup(config_ptr->feature);
 		node_set_ptr[node_set_inx].feature_array = 
@@ -1242,7 +1430,7 @@ static int _build_node_list(struct job_record *job_ptr,
 	xfree(node_set_ptr[node_set_inx].features);
 	FREE_NULL_BITMAP(node_set_ptr[node_set_inx].my_bitmap);
 	FREE_NULL_BITMAP(node_set_ptr[node_set_inx].feature_bits);
-	FREE_NULL_BITMAP(exc_node_mask);
+	FREE_NULL_BITMAP(usable_node_mask);
 
 	if (node_set_inx == 0) {
 		info("No nodes satisfy job %u requirements", 
@@ -1251,6 +1439,51 @@ static int _build_node_list(struct job_record *job_ptr,
 		return ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
 	}
 
+	/* If any nodes are powered down, put them into a new node_set
+	 * record with a higher scheduling weight . This means we avoid
+	 * scheduling jobs on powered down nodes where possible. */
+	for (i = (node_set_inx-1); i >= 0; i--) {
+		power_cnt = bit_overlap(node_set_ptr[i].my_bitmap,
+				        power_node_bitmap);
+		if (power_cnt == 0)
+			continue;	/* no nodes powered down */
+		if (power_cnt == node_set_ptr[i].nodes) {
+			node_set_ptr[i].weight += max_weight;	/* avoid all */
+			continue;	/* all nodes powered down */
+		}
+
+		/* Some nodes powered down, others up, split record */
+		node_set_ptr[node_set_inx].cpus_per_node =
+			node_set_ptr[i].cpus_per_node;
+		node_set_ptr[node_set_inx].real_memory =
+			node_set_ptr[i].real_memory;
+		node_set_ptr[node_set_inx].nodes = power_cnt;
+		node_set_ptr[i].nodes -= power_cnt;
+		node_set_ptr[node_set_inx].weight =
+			node_set_ptr[i].weight + max_weight;
+		node_set_ptr[node_set_inx].features =
+			xstrdup(node_set_ptr[i].features);
+		node_set_ptr[node_set_inx].feature_array =
+			node_set_ptr[i].feature_array;
+		node_set_ptr[node_set_inx].feature_bits =
+			bit_copy(node_set_ptr[i].feature_bits);
+		node_set_ptr[node_set_inx].my_bitmap = 
+			bit_copy(node_set_ptr[i].my_bitmap);
+		bit_and(node_set_ptr[node_set_inx].my_bitmap,
+			power_node_bitmap);
+		if (power_up_bitmap == NULL) {
+			power_up_bitmap = bit_copy(power_node_bitmap);
+			bit_not(power_up_bitmap);
+		}
+		bit_and(node_set_ptr[i].my_bitmap, power_up_bitmap);
+
+		node_set_inx++;
+		xrealloc(node_set_ptr,
+			 sizeof(struct node_set) * (node_set_inx + 2));
+		node_set_ptr[node_set_inx + 1].my_bitmap = NULL;
+	}
+	FREE_NULL_BITMAP(power_up_bitmap);
+
 	*node_set_size = node_set_inx;
 	*node_set_pptr = node_set_ptr;
 	return SLURM_SUCCESS;
@@ -1358,25 +1591,8 @@ static int _nodes_in_sets(bitstr_t *req_bitmap,
 	return error_code;
 }
 
-/* Update record of a job's allocated processors for each step */
-static void _alloc_step_cpus(struct job_record *job_ptr)
-{
-	ListIterator step_iterator;
-	struct step_record *step_ptr;
-
-	if (job_ptr->step_list == NULL)
-		return;
-
-	step_iterator = list_iterator_create(job_ptr->step_list);
-	while ((step_ptr = (struct step_record *) list_next(step_iterator))) {
-		step_alloc_lps(step_ptr);
-	}
-	list_iterator_destroy(step_iterator);
-}
-
 /*
- * build_node_details - set cpu counts and addresses for allocated nodes:
- *	cpu_count_reps, cpus_per_node, node_addr, node_cnt, num_cpu_groups
+ * build_node_details - sets addresses for allocated nodes
  * IN job_ptr - pointer to a job record
  */
 extern void build_node_details(struct job_record *job_ptr)
@@ -1384,107 +1600,30 @@ extern void build_node_details(struct job_record *job_ptr)
 	hostlist_t host_list = NULL;
 	struct node_record *node_ptr;
 	char *this_node_name;
-        int error_code = SLURM_SUCCESS;
-	int node_inx = 0, cpu_inx = -1;
-        int cr_count = 0;
-	uint32_t total_procs = 0;
+	int node_inx = 0;
 
 	if ((job_ptr->node_bitmap == NULL) || (job_ptr->nodes == NULL)) {
 		/* No nodes allocated, we're done... */
-		job_ptr->num_cpu_groups = 0;
 		job_ptr->node_cnt = 0;
-		job_ptr->cpus_per_node = NULL;
-		job_ptr->cpu_count_reps = NULL;
 		job_ptr->node_addr = NULL;
-		job_ptr->alloc_lps_cnt = 0;
-		xfree(job_ptr->alloc_lps);
-		xfree(job_ptr->used_lps);
 		return;
 	}
-
-	job_ptr->num_cpu_groups = 0;
 	
 	/* Use hostlist here to insure ordering of info matches that of srun */
 	if ((host_list = hostlist_create(job_ptr->nodes)) == NULL)
 		fatal("hostlist_create error for %s: %m", job_ptr->nodes);
-
 	job_ptr->node_cnt = hostlist_count(host_list);	
-
-	xrealloc(job_ptr->cpus_per_node, 
-		 (sizeof(uint32_t) * job_ptr->node_cnt));
-	xrealloc(job_ptr->cpu_count_reps, 
-		 (sizeof(uint32_t) * job_ptr->node_cnt));
 	xrealloc(job_ptr->node_addr, 
 		 (sizeof(slurm_addr) * job_ptr->node_cnt));	
 
-	job_ptr->alloc_lps_cnt = job_ptr->node_cnt;
-	xrealloc(job_ptr->alloc_lps,
-		 (sizeof(uint32_t) * job_ptr->node_cnt));
-	xrealloc(job_ptr->used_lps,
-		 (sizeof(uint32_t) * job_ptr->node_cnt));
-
 	while ((this_node_name = hostlist_shift(host_list))) {
-		node_ptr = find_node_record(this_node_name);
-		     		
-		if (node_ptr) {
-			uint16_t usable_lps = 0;
-#ifdef HAVE_BG
-			if(job_ptr->node_cnt == 1) {
-				memcpy(&job_ptr->node_addr[node_inx++],
-				       &node_ptr->slurm_addr, 
-				       sizeof(slurm_addr));
-				cpu_inx++;
-				
-				job_ptr->cpus_per_node[cpu_inx] =
-					job_ptr->num_procs;
-				total_procs += job_ptr->num_procs;
-				job_ptr->cpu_count_reps[cpu_inx] = 1;
-				job_ptr->alloc_lps[0] = job_ptr->num_procs;
-				job_ptr->used_lps[0]  = 0;
-				goto cleanup;
-			}
-#endif
-			error_code = select_g_get_extra_jobinfo( 
-				node_ptr, job_ptr, SELECT_AVAIL_CPUS, 
-				&usable_lps);
-			if (error_code == SLURM_SUCCESS) {
-				if (job_ptr->alloc_lps) {
-					job_ptr->used_lps[cr_count] = 0;
-					job_ptr->alloc_lps[cr_count++] =
-								usable_lps;
-				}
-			} else {
-				error("Unable to get extra jobinfo "
-				      "from JobId=%u", job_ptr->job_id);
-				/* Job is likely completed according to 
-				 * select plugin */
-				if (job_ptr->alloc_lps) {
-					job_ptr->used_lps[cr_count] = 0;
-					job_ptr->alloc_lps[cr_count++] = 0;
-				}
-			}
-			
+		if ((node_ptr = find_node_record(this_node_name))) {
 			memcpy(&job_ptr->node_addr[node_inx++],
 			       &node_ptr->slurm_addr, sizeof(slurm_addr));
-
-			if ((cpu_inx == -1) ||
-			    (job_ptr->cpus_per_node[cpu_inx] !=
-			     usable_lps)) {
-				cpu_inx++;
-				job_ptr->cpus_per_node[cpu_inx] =
-					usable_lps;
-				job_ptr->cpu_count_reps[cpu_inx] = 1;
-			} else
-				job_ptr->cpu_count_reps[cpu_inx]++;
-			total_procs +=  usable_lps;
-
 		} else {
 			error("Invalid node %s in JobId=%u",
 			      this_node_name, job_ptr->job_id);
 		}
-#ifdef HAVE_BG
- cleanup:	
-#endif
 		free(this_node_name);
 	}
 	hostlist_destroy(host_list);
@@ -1492,9 +1631,6 @@ extern void build_node_details(struct job_record *job_ptr)
 		error("Node count mismatch for JobId=%u (%u,%u)",
 		      job_ptr->job_id, job_ptr->node_cnt, node_inx);
 	}
-	job_ptr->num_cpu_groups = cpu_inx + 1;
-	job_ptr->total_procs = total_procs;
-	_alloc_step_cpus(job_ptr);	/* reset counters */
 }
 
 /*
@@ -1502,6 +1638,8 @@ extern void build_node_details(struct job_record *job_ptr)
  *	the available nodes
  * IN details_ptr - job requirement details, includes requested features
  * IN config_ptr - node's configuration record
+ * IN update_count - if set, then increment tmp_cnt (temporary counter)
+ *	for matched features
  * RET NULL if request is not satisfied, otherwise a bitmap indicating 
  *	which mutually exclusive features are satisfied. For example
  *	_valid_features("[fs1|fs2|fs3|fs4]", "fs3") returns a bitmap with
@@ -1512,34 +1650,44 @@ extern void build_node_details(struct job_record *job_ptr)
  *	mutually exclusive feature list.
  */
 static bitstr_t *_valid_features(struct job_details *details_ptr, 
-				 struct config_record *config_ptr)
+				 struct config_record *config_ptr,
+				 bool update_count)
 {
 	bitstr_t *result_bits = (bitstr_t *) NULL;
 	ListIterator feat_iter;
 	struct feature_record *feat_ptr;
-	int found, last_op, position = 0, result;
-	int save_op = FEATURE_OP_AND, save_result=1;
+	bool found, test_names, result;
+	int last_op, position = 0;
+	int save_op = FEATURE_OP_AND, save_result = 1;
 
-	if (details_ptr->feature_list == NULL) {/* no constraints */
+	if (details_ptr->feature_list == NULL) {	/* no constraints */
 		result_bits = bit_alloc(MAX_FEATURES);
 		bit_set(result_bits, 0);
 		return result_bits;
 	}
 
-	result = 1;				/* assume good for now */
+	result = true;				/* assume good for now */
 	last_op = FEATURE_OP_AND;
 	feat_iter = list_iterator_create(details_ptr->feature_list);
 	while ((feat_ptr = (struct feature_record *) list_next(feat_iter))) {
-		found = 0;
-		if (feat_ptr->count)
-			found = 1;
-		else if (config_ptr->feature_array) {
+		test_names = false;
+		found = false;
+		if (feat_ptr->count) {
+			found = true;
+			if (update_count)
+				test_names = true;
+		} else	
+			test_names = true;
+
+		if (test_names && config_ptr->feature_array) {
 			int i;
 			for (i=0; config_ptr->feature_array[i]; i++) {
 				if (strcmp(feat_ptr->name, 
 					   config_ptr->feature_array[i]))
 					continue;
-				found = 1;
+				found = true;
+				if (update_count && feat_ptr->count)
+					feat_ptr->tmp_cnt++;
 				break;
 			}
 		}
@@ -1654,6 +1802,8 @@ extern void re_kill_job(struct job_record *job_ptr)
 
 	if (agent_args->node_count == 0) {
 		xfree(kill_job);
+		if(agent_args->hostlist)
+			hostlist_destroy(agent_args->hostlist);
 		xfree(agent_args);
 		hostlist_destroy(kill_hostlist);
 		return;
diff --git a/src/slurmctld/node_scheduler.h b/src/slurmctld/node_scheduler.h
index a59fa5b1e1e9a3a5bc6c3b93b8cb5a18306763d6..d8ae5d80a1a32af3b4cb515826b00ced2477d098 100644
--- a/src/slurmctld/node_scheduler.h
+++ b/src/slurmctld/node_scheduler.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -47,8 +48,7 @@
 extern void allocate_nodes(struct job_record *job_ptr);
 
 /*
- * build_node_details - set cpu counts and addresses for allocated nodes:
- *	cpu_count_reps, cpus_per_node, node_addr, node_cnt, num_cpu_groups
+ * build_node_details - sets addresses for allocated nodes
  * IN job_ptr - pointer to a job record
  */
 extern void build_node_details(struct job_record *job_ptr);
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index 6a0b0976fdc9be42369f6ed1eacfcf0c0709ccb8..9ad1c2ec279ef3c96a51bf7216effd3ff773820d 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -2,15 +2,16 @@
  *  partition_mgr.c - manage the partition information of slurm
  *	Note: there is a global partition list (part_list) and
  *	time stamp (last_part_update)
- *  $Id: partition_mgr.c 15121 2008-09-19 18:31:06Z da $
+ *  $Id: partition_mgr.c 17701 2009-06-03 21:02:09Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -67,7 +68,7 @@
 #include "src/slurmctld/slurmctld.h"
 
 /* Change PART_STATE_VERSION value when changing the state save format */
-#define PART_STATE_VERSION      "VER001"
+#define PART_STATE_VERSION      "VER002"
 
 /* Global variables */
 struct part_record default_part;	/* default configuration values */
@@ -75,6 +76,7 @@ List part_list = NULL;			/* partition list */
 char *default_part_name = NULL;		/* name of default partition */
 struct part_record *default_part_loc = NULL; /* default partition location */
 time_t last_part_update;	/* time of last update to partition records */
+uint16_t part_max_priority = 0;         /* max priority in all partitions */
 
 static int    _build_part_bitmap(struct part_record *part_ptr);
 static int    _delete_part_record(char *name);
@@ -109,8 +111,7 @@ static int _build_part_bitmap(struct part_record *part_ptr)
 	part_ptr->total_nodes = 0;
 
 	if (part_ptr->node_bitmap == NULL) {
-		part_ptr->node_bitmap = 
-			(bitstr_t *) bit_alloc(node_record_count);
+		part_ptr->node_bitmap = bit_alloc(node_record_count);
 		if (part_ptr->node_bitmap == NULL)
 			fatal("bit_alloc malloc failure");
 		old_bitmap = NULL;
@@ -222,6 +223,7 @@ struct part_record *create_part_record(void)
 	part_ptr->disable_root_jobs = default_part.disable_root_jobs;
 	part_ptr->hidden            = default_part.hidden;
 	part_ptr->max_time          = default_part.max_time;
+	part_ptr->default_time      = default_part.default_time;
 	part_ptr->max_nodes         = default_part.max_nodes;
 	part_ptr->max_nodes_orig    = default_part.max_nodes;
 	part_ptr->min_nodes         = default_part.min_nodes;
@@ -230,6 +232,9 @@ struct part_record *create_part_record(void)
 	part_ptr->state_up          = default_part.state_up;
 	part_ptr->max_share         = default_part.max_share;
 	part_ptr->priority          = default_part.priority;
+	if(part_max_priority)
+		part_ptr->norm_priority = (double)default_part.priority 
+			/ (double)part_max_priority;
 	part_ptr->node_bitmap       = NULL;
 
 	if (default_part.allow_groups)
@@ -237,6 +242,12 @@ struct part_record *create_part_record(void)
 	else
 		part_ptr->allow_groups = NULL;
 
+	if (default_part.allow_alloc_nodes)
+		part_ptr->allow_alloc_nodes = xstrdup(default_part.
+						      allow_alloc_nodes);
+	else
+		part_ptr->allow_alloc_nodes = NULL;
+
 	if (default_part.nodes)
 		part_ptr->nodes = xstrdup(default_part.nodes);
 	else
@@ -278,6 +289,8 @@ static int _delete_part_record(char *name)
 /* dump_all_part_state - save the state of all partitions to file */
 int dump_all_part_state(void)
 {
+	/* Save high-water mark to avoid buffer growth with copies */
+	static int high_buffer_size = BUF_SIZE;
 	ListIterator part_iterator;
 	struct part_record *part_ptr;
 	int error_code = 0, log_fd;
@@ -285,7 +298,7 @@ int dump_all_part_state(void)
 	/* Locks: Read partition */
 	slurmctld_lock_t part_read_lock =
 	    { READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK };
-	Buf buffer = init_buf(BUF_SIZE);
+	Buf buffer = init_buf(high_buffer_size);
 	DEF_TIMERS;
 
 	START_TIMER;
@@ -296,6 +309,8 @@ int dump_all_part_state(void)
 	/* write partition records to buffer */
 	lock_slurmctld(part_read_lock);
 	part_iterator = list_iterator_create(part_list);
+	if (!part_iterator)
+		fatal("list_iterator_create malloc");
 	while ((part_ptr = (struct part_record *) list_next(part_iterator))) {
 		xassert (part_ptr->magic == PART_MAGIC);
 		_dump_part_state(part_ptr, buffer);
@@ -321,7 +336,7 @@ int dump_all_part_state(void)
 	} else {
 		int pos = 0, nwrite = get_buf_offset(buffer), amount;
 		char *data = (char *)get_buf_data(buffer);
-
+		high_buffer_size = MAX(nwrite, high_buffer_size);
 		while (nwrite > 0) {
 			amount = write(log_fd, &data[pos], nwrite);
 			if ((amount < 0) && (errno != EINTR)) {
@@ -372,6 +387,7 @@ static void _dump_part_state(struct part_record *part_ptr, Buf buffer)
 
 	packstr(part_ptr->name,          buffer);
 	pack32(part_ptr->max_time,       buffer);
+	pack32(part_ptr->default_time,   buffer);
 	pack32(part_ptr->max_nodes_orig, buffer);
 	pack32(part_ptr->min_nodes_orig, buffer);
 
@@ -383,6 +399,7 @@ static void _dump_part_state(struct part_record *part_ptr, Buf buffer)
 
 	pack16(part_ptr->state_up,       buffer);
 	packstr(part_ptr->allow_groups,  buffer);
+	packstr(part_ptr->allow_alloc_nodes, buffer);
 	packstr(part_ptr->nodes,         buffer);
 }
 
@@ -395,7 +412,7 @@ static void _dump_part_state(struct part_record *part_ptr, Buf buffer)
 int load_all_part_state(void)
 {
 	char *part_name, *allow_groups, *nodes, *state_file, *data = NULL;
-	uint32_t max_time, max_nodes, min_nodes;
+	uint32_t max_time, default_time, max_nodes, min_nodes;
 	time_t time;
 	uint16_t def_part_flag, hidden, root_only;
 	uint16_t max_share, priority, state_up;
@@ -405,6 +422,7 @@ int load_all_part_state(void)
 	int state_fd;
 	Buf buffer;
 	char *ver_str = NULL;
+	char* allow_alloc_nodes = NULL;
 
 	/* read the file */
 	state_file = xstrdup(slurmctld_conf.state_save_location);
@@ -458,6 +476,7 @@ int load_all_part_state(void)
 	while (remaining_buf(buffer) > 0) {
 		safe_unpackstr_xmalloc(&part_name, &name_len, buffer);
 		safe_unpack32(&max_time, buffer);
+		safe_unpack32(&default_time, buffer);
 		safe_unpack32(&max_nodes, buffer);
 		safe_unpack32(&min_nodes, buffer);
 
@@ -467,8 +486,12 @@ int load_all_part_state(void)
 		safe_unpack16(&max_share, buffer);
 		safe_unpack16(&priority,  buffer);
 
+		if(priority > part_max_priority) 
+			part_max_priority = priority;
+
 		safe_unpack16(&state_up, buffer);
 		safe_unpackstr_xmalloc(&allow_groups, &name_len, buffer);
+		safe_unpackstr_xmalloc(&allow_alloc_nodes, &name_len, buffer);
 		safe_unpackstr_xmalloc(&nodes, &name_len, buffer);
 
 		/* validity test as possible */
@@ -494,6 +517,7 @@ int load_all_part_state(void)
 			part_cnt++;
 			part_ptr->hidden         = hidden;
 			part_ptr->max_time       = max_time;
+			part_ptr->default_time   = default_time;
 			part_ptr->max_nodes      = max_nodes;
 			part_ptr->max_nodes_orig = max_nodes;
 			part_ptr->min_nodes      = min_nodes;
@@ -506,9 +530,17 @@ int load_all_part_state(void)
 			part_ptr->root_only      = root_only;
 			part_ptr->max_share      = max_share;
 			part_ptr->priority       = priority;
+
+			if(part_max_priority) 
+				part_ptr->norm_priority = 
+					(double)part_ptr->priority 
+					/ (double)part_max_priority;
+
 			part_ptr->state_up       = state_up;
 			xfree(part_ptr->allow_groups);
 			part_ptr->allow_groups   = allow_groups;
+			xfree(part_ptr->allow_alloc_nodes);
+			part_ptr->allow_alloc_nodes   = allow_alloc_nodes;
 			xfree(part_ptr->nodes);
 			part_ptr->nodes = nodes;
 		} else {
@@ -559,6 +591,7 @@ int init_part_conf(void)
 	default_part.disable_root_jobs = slurmctld_conf.disable_root_jobs;
 	default_part.hidden         = 0;
 	default_part.max_time       = INFINITE;
+	default_part.default_time   = NO_VAL;
 	default_part.max_nodes      = INFINITE;
 	default_part.max_nodes_orig = INFINITE;
 	default_part.min_nodes      = 1;
@@ -567,11 +600,13 @@ int init_part_conf(void)
 	default_part.state_up       = 1;
 	default_part.max_share      = 1;
 	default_part.priority       = 1;
+	default_part.norm_priority  = 0;
 	default_part.total_nodes    = 0;
 	default_part.total_cpus     = 0;
 	xfree(default_part.nodes);
 	xfree(default_part.allow_groups);
 	xfree(default_part.allow_uids);
+	xfree(default_part.allow_alloc_nodes);
 	FREE_NULL_BITMAP(default_part.node_bitmap);
 
 	if (part_list)		/* delete defunct partitions */
@@ -617,6 +652,7 @@ static void _list_delete_part(void *part_entry)
 	xfree(part_ptr->name);
 	xfree(part_ptr->allow_groups);
 	xfree(part_ptr->allow_uids);
+	xfree(part_ptr->allow_alloc_nodes);
 	xfree(part_ptr->nodes);
 	FREE_NULL_BITMAP(part_ptr->node_bitmap);
 	xfree(part_entry);
@@ -701,7 +737,7 @@ extern void pack_all_part(char **buffer_ptr, int *buffer_size,
 
 	buffer = init_buf(BUF_SIZE);
 
-	/* write haeader: version and time */
+	/* write header: version and time */
 	parts_packed = 0;
 	pack32(parts_packed, buffer);
 	pack_time(now, buffer);
@@ -711,7 +747,8 @@ extern void pack_all_part(char **buffer_ptr, int *buffer_size,
 	while ((part_ptr = (struct part_record *) list_next(part_iterator))) {
 		xassert (part_ptr->magic == PART_MAGIC);
 		if (((show_flags & SHOW_ALL) == 0) && (uid != 0) &&
-		    ((part_ptr->hidden) || (validate_group (part_ptr, uid) == 0)))
+		    ((part_ptr->hidden) 
+		     || (validate_group (part_ptr, uid) == 0)))
 			continue;
 		pack_part(part_ptr, buffer);
 		parts_packed++;
@@ -736,8 +773,8 @@ extern void pack_all_part(char **buffer_ptr, int *buffer_size,
  * IN/OUT buffer - buffer in which data is placed, pointers automatically 
  *	updated
  * global: default_part_loc - pointer to the default partition
- * NOTE: if you make any changes here be sure to make the corresponding 
- *	changes to load_part_config in api/partition_info.c
+ * NOTE: if you make any changes here be sure to make the corresponding changes
+ *	to _unpack_partition_info_members() in common/slurm_protocol_pack.c
  */
 void pack_part(struct part_record *part_ptr, Buf buffer)
 {
@@ -752,6 +789,7 @@ void pack_part(struct part_record *part_ptr, Buf buffer)
 
 	packstr(part_ptr->name, buffer);
 	pack32(part_ptr->max_time, buffer);
+	pack32(part_ptr->default_time, buffer);
 	pack32(part_ptr->max_nodes_orig, buffer);
 	pack32(part_ptr->min_nodes_orig, buffer);
 	altered = part_ptr->total_nodes;
@@ -771,6 +809,7 @@ void pack_part(struct part_record *part_ptr, Buf buffer)
 
 	pack16(part_ptr->state_up, buffer);
 	packstr(part_ptr->allow_groups, buffer);
+	packstr(part_ptr->allow_alloc_nodes, buffer);
 	packstr(part_ptr->nodes, buffer);
 	if (part_ptr->node_bitmap) {
 		bit_fmt(node_inx_ptr, BUF_SIZE,
@@ -782,32 +821,44 @@ void pack_part(struct part_record *part_ptr, Buf buffer)
 
 
 /* 
- * update_part - update a partition's configuration data
+ * update_part - create or update a partition's configuration data
  * IN part_desc - description of partition changes
+ * IN create_flag - create a new partition
  * RET 0 or an error code
  * global: part_list - list of partition entries
  *	last_part_update - update time of partition records
  */
-int update_part(update_part_msg_t * part_desc)
+extern int update_part (update_part_msg_t * part_desc, bool create_flag)
 {
 	int error_code;
 	struct part_record *part_ptr;
 
 	if (part_desc->name == NULL) {
-		error("update_part: invalid partition name, NULL");
+		info("update_part: invalid partition name, NULL");
 		return ESLURM_INVALID_PARTITION_NAME;
 	}
 
 	error_code = SLURM_SUCCESS;
 	part_ptr = list_find_first(part_list, &list_find_part, 
-					part_desc->name);
+				   part_desc->name);
 
-	if (part_ptr == NULL) {
-		info("update_part: partition %s does not exist, "
-			"being created", part_desc->name);
+	if (create_flag) {
+		if (part_ptr) {
+			verbose("Duplicate partition name for create (%s)",
+				part_desc->name);
+			return ESLURM_INVALID_PARTITION_NAME;
+		}
+		info("update_part: partition %s being created",
+		     part_desc->name);
 		part_ptr = create_part_record();
 		xfree(part_ptr->name);
 		part_ptr->name = xstrdup(part_desc->name);
+	} else {
+		if (!part_ptr) {
+			verbose("Update for partition not found (%s)",
+				part_desc->name);
+			return ESLURM_INVALID_PARTITION_NAME;
+		}
 	}
 
 	last_part_update = time(NULL);
@@ -824,6 +875,17 @@ int update_part(update_part_msg_t * part_desc)
 		part_ptr->max_time = part_desc->max_time;
 	}
 
+	if ((part_desc->default_time != NO_VAL) && 
+	    (part_desc->default_time > part_ptr->max_time)) {
+		info("update_part: DefaultTime would exceed MaxTime for "
+		     "partition %s", part_desc->name);
+	} else if (part_desc->default_time != NO_VAL) {
+		info("update_part: setting default_time to %u "
+		     "for partition %s", 
+		     part_desc->default_time, part_desc->name);
+		part_ptr->default_time = part_desc->default_time;
+	}
+
 	if (part_desc->max_nodes != NO_VAL) {
 		info("update_part: setting max_nodes to %u for partition %s", 
 		     part_desc->max_nodes, part_desc->name);
@@ -875,6 +937,26 @@ int update_part(update_part_msg_t * part_desc)
 		info("update_part: setting priority to %u for partition %s",
 		     part_desc->priority, part_desc->name);
 		part_ptr->priority = part_desc->priority;
+		
+		/* If the max_priority changes we need to change all
+		   the normalized priorities of all the other
+		   partitions.  If not then just set this partitions.
+		*/
+		if(part_ptr->priority > part_max_priority) {
+			ListIterator itr = list_iterator_create(part_list);
+			struct part_record *part2 = NULL;
+
+			part_max_priority = part_ptr->priority;
+
+			while((part2 = list_next(itr))) {
+				part2->norm_priority = (double)part2->priority 
+					/ (double)part_max_priority;
+			}
+			list_iterator_destroy(itr);
+		} else {
+			part_ptr->norm_priority = (double)part_ptr->priority 
+				/ (double)part_max_priority;
+		}
 	}
 
 	if (part_desc->default_part == 1) {
@@ -882,7 +964,8 @@ int update_part(update_part_msg_t * part_desc)
 			info("update_part: setting default partition to %s", 
 			     part_desc->name);
 		} else if (strcmp(default_part_name, part_desc->name) != 0) {
-			info("update_part: changing default partition from %s to %s", 
+			info("update_part: changing default "
+			     "partition from %s to %s", 
 			     default_part_name, part_desc->name);
 		}
 		xfree(default_part_name);
@@ -909,6 +992,24 @@ int update_part(update_part_msg_t * part_desc)
 		}
 	}
 
+	if (part_desc->allow_alloc_nodes != NULL) {
+		xfree(part_ptr->allow_alloc_nodes);
+		if ((part_desc->allow_alloc_nodes[0] == '\0') ||
+		    (strcasecmp(part_desc->allow_alloc_nodes, "ALL") == 0)) {
+			part_ptr->allow_alloc_nodes = NULL;
+			info("update_part: setting allow_alloc_nodes to ALL"
+			     " for partition %s",part_desc->name);
+		}
+		else {
+			part_ptr->allow_alloc_nodes = part_desc->
+						      allow_alloc_nodes;
+			part_desc->allow_alloc_nodes = NULL;
+			info("update_part: setting allow_alloc_nodes to %s for "
+			     "partition %s", 
+			     part_ptr->allow_alloc_nodes, part_desc->name);
+		}
+	}
+
 	if (part_desc->nodes != NULL) {
 		char *backup_node_list = part_ptr->nodes;
 
@@ -928,10 +1029,14 @@ int update_part(update_part_msg_t * part_desc)
 			xfree(part_ptr->nodes);
 			part_ptr->nodes = backup_node_list;
 		} else {
-			info("update_part: setting nodes to %s for partition %s", 
+			info("update_part: setting nodes to %s "
+			     "for partition %s", 
 			     part_ptr->nodes, part_desc->name);
 			xfree(backup_node_list);
 		}
+	} else if (part_ptr->node_bitmap == NULL) {
+		/* Newly created partition needs a bitmap, even if empty */
+		part_ptr->node_bitmap = bit_alloc(node_record_count);
 	}
 
 	if (error_code == SLURM_SUCCESS) {
@@ -974,6 +1079,35 @@ extern int validate_group(struct part_record *part_ptr, uid_t run_uid)
 
 }
 
+/*
+ * validate_alloc_node - validate that the allocating node
+ * is allowed to use this partition
+ * IN part_ptr - pointer to a partition
+ * IN alloc_node - allocting node of the request
+ * RET 1 if permitted to run, 0 otherwise
+ */
+extern int validate_alloc_node(struct part_record *part_ptr, char* alloc_node)
+{
+	int status;
+	
+ 	if (part_ptr->allow_alloc_nodes == NULL)
+ 		return 1;	/* all allocating nodes allowed */
+ 	if (alloc_node == NULL)
+ 		return 1;	/* if no allocating node defined
+				 * let it go */
+ 
+ 	hostlist_t hl = hostlist_create(part_ptr->allow_alloc_nodes);
+ 	status=hostlist_find(hl,alloc_node);
+ 	hostlist_destroy(hl);
+	
+ 	if(status==-1)
+		status=0;
+ 	else
+		status=1;
+	
+ 	return status;
+}
+
 /*
  * load_part_uid_allow_list - reload the allow_uid list of partitions
  *	if required (updated group file or force set)
@@ -1192,5 +1326,9 @@ extern int delete_partition(delete_part_msg_t *part_desc_ptr)
 	list_delete_all(part_list, list_find_part, part_desc_ptr->name);
 	last_part_update = time(NULL);
 
+	slurm_sched_partition_change();	/* notify sched plugin */
+	select_g_reconfigure();		/* notify select plugin too */
+	reset_job_priority();		/* free jobs */
+
 	return SLURM_SUCCESS;
 }
diff --git a/src/slurmctld/ping_nodes.c b/src/slurmctld/ping_nodes.c
index 7ad74efe8d44add66ff4ee3118e17349dba23d51..f2c51feab1d8af42f06ab2ce7f072a5fc55a1749 100644
--- a/src/slurmctld/ping_nodes.c
+++ b/src/slurmctld/ping_nodes.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  ping_nodes.c - ping the slurmd daemons to test if they respond
  *****************************************************************************
- *  Copyright (C) 2003-2006 The Regents of the University of California.
+ *  Copyright (C) 2003-2007 The Regents of the University of California.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -176,7 +177,10 @@ void ping_nodes (void)
 		node_ptr = &node_record_table_ptr[i];
 		base_state   = node_ptr->node_state & NODE_STATE_BASE;
 		no_resp_flag = node_ptr->node_state & NODE_STATE_NO_RESPOND;
-		
+
+		if ((base_state == NODE_STATE_FUTURE) ||
+		    (node_ptr->node_state & NODE_STATE_POWER_SAVE))
+			continue;
 		if ((slurmctld_conf.slurmd_timeout == 0) &&
 		    (base_state != NODE_STATE_UNKNOWN)   &&
 		    (no_resp_flag == 0))
@@ -241,7 +245,7 @@ void ping_nodes (void)
 		hostlist_uniq(ping_agent_args->hostlist);
 		hostlist_ranged_string(ping_agent_args->hostlist, 
 			sizeof(host_str), host_str);
-		verbose("Spawning ping agent for %s", host_str);
+		debug("Spawning ping agent for %s", host_str);
 		ping_begin();
 		agent_queue_request(ping_agent_args);
 	}
@@ -253,8 +257,8 @@ void ping_nodes (void)
 		hostlist_uniq(reg_agent_args->hostlist);
 		hostlist_ranged_string(reg_agent_args->hostlist, 
 			sizeof(host_str), host_str);
-		verbose("Spawning registration agent for %s %d hosts", 
-			host_str, reg_agent_args->node_count);
+		debug("Spawning registration agent for %s %d hosts", 
+		      host_str, reg_agent_args->node_count);
 		ping_begin();
 		agent_queue_request(reg_agent_args);
 	}
@@ -287,7 +291,8 @@ extern void run_health_check(void)
 		node_ptr   = &node_record_table_ptr[i];
 		base_state = node_ptr->node_state & NODE_STATE_BASE;
 
-		if (base_state == NODE_STATE_DOWN)
+		if ((base_state == NODE_STATE_DOWN) ||
+		    (base_state == NODE_STATE_FUTURE))
 			continue;
 
 #ifdef HAVE_FRONT_END		/* Operate only on front-end */
@@ -306,7 +311,7 @@ extern void run_health_check(void)
 		hostlist_uniq(check_agent_args->hostlist);
 		hostlist_ranged_string(check_agent_args->hostlist, 
 			sizeof(host_str), host_str);
-		verbose("Spawning health check agent for %s", host_str);
+		debug("Spawning health check agent for %s", host_str);
 		ping_begin();
 		agent_queue_request(check_agent_args);
 	}
diff --git a/src/slurmctld/ping_nodes.h b/src/slurmctld/ping_nodes.h
index 2fae42d4b0459e4ddb29185a848a3bce91a2e707..1fc7af887d2d6d2571e7766870935b2a7a9827ee 100644
--- a/src/slurmctld/ping_nodes.h
+++ b/src/slurmctld/ping_nodes.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2003 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/port_mgr.c b/src/slurmctld/port_mgr.c
new file mode 100644
index 0000000000000000000000000000000000000000..d11e71f260d1479e24215a3e3796eefd490d41f9
--- /dev/null
+++ b/src/slurmctld/port_mgr.c
@@ -0,0 +1,319 @@
+/*****************************************************************************\
+ *  port_mgr.c - manage the reservation of I/O ports on the nodes.
+ *	Design for use with OpenMPI.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <string.h>
+#include <stdlib.h>
+
+#include "src/common/bitstring.h"
+#include "src/common/hostlist.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
+#include "src/slurmctld/slurmctld.h"
+
+#define  _DEBUG 0
+
+bitstr_t **port_resv_table = (bitstr_t **) NULL;
+int        port_resv_cnt   = 0;
+int        port_resv_min   = 0;
+int        port_resv_max   = 0;
+
+static void _dump_resv_port_info(void);
+static void _make_all_resv(void);
+static void _make_step_resv(struct step_record *step_ptr);
+static void _rebuild_port_array(struct step_record *step_ptr);
+
+static void _dump_resv_port_info(void)
+{
+#if _DEBUG
+	int i;
+	char *tmp_char;
+
+	for (i=0; i<port_resv_cnt; i++) {
+		if (bit_set_count(port_resv_table[i]) == 0)
+			continue;
+
+		tmp_char = bitmap2node_name(port_resv_table[i]);
+		info("Port %d: %s", (i+port_resv_min), tmp_char);
+		xfree(tmp_char);
+	}
+#endif
+}
+
+/* Builds the job step's resv_port_array based upon resv_ports (a string) */
+static void _rebuild_port_array(struct step_record *step_ptr)
+{
+	int i;
+	char *tmp_char;
+	hostlist_t hl;
+
+	i = strlen(step_ptr->resv_ports);
+	tmp_char = xmalloc(i+3);
+	sprintf(tmp_char, "[%s]", step_ptr->resv_ports);
+	hl = hostlist_create(tmp_char);
+	xfree(tmp_char);
+	if (hl == NULL)
+		fatal("malloc failure: hostlist_create");
+
+	step_ptr->resv_port_array = xmalloc(sizeof(int) * 
+					    step_ptr->resv_port_cnt);
+	step_ptr->resv_port_cnt = 0;
+	while ((tmp_char = hostlist_shift(hl))) {
+		i = atoi(tmp_char);
+		if (i > 0)
+			step_ptr->resv_port_array[step_ptr->resv_port_cnt++]=i;
+		free(tmp_char);
+	}
+	hostlist_destroy(hl);
+	if (step_ptr->resv_port_cnt == 0) {
+		error("Problem recovering resv_port_array for step %u.%u: %s",
+		      step_ptr->job_ptr->job_id, step_ptr->step_id, 
+		      step_ptr->resv_ports);
+		xfree(step_ptr->resv_ports);
+	}
+}
+
+/* Update the local reservation table for one job step.
+ * Builds the job step's resv_port_array based upon resv_ports (a string) */
+static void _make_step_resv(struct step_record *step_ptr)
+{
+	int i, j;
+
+	if ((step_ptr->resv_port_cnt == 0) ||
+	    (step_ptr->resv_ports == NULL) ||
+	    (step_ptr->resv_ports[0] == '\0'))
+		return;
+
+	if (step_ptr->resv_port_array == NULL)
+		_rebuild_port_array(step_ptr);
+
+	for (i=0; i<step_ptr->resv_port_cnt; i++) {
+		if ((step_ptr->resv_port_array[i] < port_resv_min) ||
+		    (step_ptr->resv_port_array[i] > port_resv_max)) 
+			continue;
+		j = step_ptr->resv_port_array[i] - port_resv_min;
+		bit_or(port_resv_table[j], step_ptr->step_node_bitmap);
+	}
+}
+
+/* Identify every job step with a port reservation and put the 
+ * reservation into the local reservation table. */
+static void _make_all_resv(void)
+{
+	struct job_record *job_ptr;
+	struct step_record *step_ptr;
+	ListIterator job_iterator, step_iterator;
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		step_iterator = list_iterator_create(job_ptr->step_list);
+		while ((step_ptr = (struct step_record *) 
+				   list_next(step_iterator))) {
+			_make_step_resv(step_ptr);
+		}
+		list_iterator_destroy(step_iterator);
+	}
+	list_iterator_destroy(job_iterator);
+}
+
+/* Configure reserved ports.
+ * Call with mpi_params==NULL to free memory */
+extern int reserve_port_config(char *mpi_params)
+{
+	char *tmp_e=NULL, *tmp_p=NULL;
+	int i, p_min, p_max;
+
+	if (mpi_params)
+		tmp_p = strstr(mpi_params, "ports=");
+	if (tmp_p == NULL) {
+		if (port_resv_table) {
+			info("Clearing port reservations");
+			for (i=0; i<port_resv_cnt; i++)
+				bit_free(port_resv_table[i]);
+			xfree(port_resv_table);
+			port_resv_cnt = 0;
+			port_resv_min = port_resv_max = 0;
+		}
+		return SLURM_SUCCESS;
+	}
+
+	tmp_p += 6;
+	p_min = strtol(tmp_p, &tmp_e, 10);
+	if ((p_min < 1) || (tmp_e[0] != '-')) {
+		info("invalid MpiParams: %s", mpi_params);
+		return SLURM_ERROR;
+	}
+	tmp_e++;
+	p_max = strtol(tmp_e, NULL, 10);
+	if (p_max < p_min) {
+		info("invalid MpiParams: %s", mpi_params);
+		return SLURM_ERROR;
+	}
+
+	if ((p_min == port_resv_min) && (p_max == port_resv_max)) {
+		_dump_resv_port_info();
+		return SLURM_SUCCESS;	/* No change */
+	}
+
+	port_resv_min = p_min;
+	port_resv_max = p_max;
+	port_resv_cnt = p_max - p_min + 1;
+	debug("Ports available for reservation %u-%u", 
+	      port_resv_min, port_resv_max);
+
+	xfree(port_resv_table);
+	port_resv_table = xmalloc(sizeof(bitstr_t *) * port_resv_cnt);
+	for (i=0; i<port_resv_cnt; i++)
+		port_resv_table[i] = bit_alloc(node_record_count);
+
+	_make_all_resv();
+	_dump_resv_port_info();
+	return SLURM_SUCCESS;
+}
+
+/* Reserve ports for a job step
+ * NOTE: We keep track of last port reserved and go round-robin through full
+ *       set of available ports. This helps avoid re-using busy ports when
+ *       restarting job steps.
+ * RET SLURM_SUCCESS or an error code */
+extern int resv_port_alloc(struct step_record *step_ptr)
+{
+	int i, port_inx;
+	int *port_array = NULL;
+	char port_str[16], *tmp_str;
+	hostlist_t hl;
+	static int last_port_alloc = 0;
+
+	if (step_ptr->resv_port_cnt > port_resv_cnt) {
+		info("step %u.%u needs %u reserved ports, but only %d exist",
+		     step_ptr->job_ptr->job_id, step_ptr->step_id,
+		     step_ptr->resv_port_cnt, port_resv_cnt);
+		return ESLURM_PORTS_INVALID;
+	}
+
+	/* Identify available ports */
+	port_array = xmalloc(sizeof(int) * step_ptr->resv_port_cnt);
+	port_inx = 0;
+	for (i=0; i<port_resv_cnt; i++) {
+		if (++last_port_alloc >= port_resv_cnt)
+			last_port_alloc = 0;
+		if (bit_overlap(step_ptr->step_node_bitmap,
+				port_resv_table[last_port_alloc]))
+			continue;
+		port_array[port_inx++] = last_port_alloc;
+		if (port_inx >= step_ptr->resv_port_cnt)
+			break;
+	}
+	if (port_inx < step_ptr->resv_port_cnt) {
+		info("insufficient ports for step %u.%u to reserve (%d of %u)",
+		     step_ptr->job_ptr->job_id, step_ptr->step_id,
+		     port_inx, step_ptr->resv_port_cnt);
+		xfree(port_array);
+		return ESLURM_PORTS_BUSY;
+	}
+
+	/* Reserve selected ports */
+	hl = hostlist_create(NULL);
+	if (hl == NULL)
+		fatal("malloc: hostlist_create");
+	for (i=0; i<port_inx; i++) {
+		/* NOTE: We give the port a name like "[1234]" rather than 
+		 * just "1234" to avoid hostlists of the form "1[234-236]" */
+		bit_or(port_resv_table[port_array[i]], 
+		       step_ptr->step_node_bitmap);
+		port_array[i] += port_resv_min;
+		snprintf(port_str, sizeof(port_str), "[%d]", port_array[i]);
+		hostlist_push(hl, port_str);
+	}
+	hostlist_sort(hl);
+	for (i=1024; ; i*=2) {
+		step_ptr->resv_ports = xmalloc(i);
+		if (hostlist_ranged_string(hl, i, step_ptr->resv_ports) >= 0)
+			break;
+		xfree(step_ptr->resv_ports);
+	}
+	hostlist_destroy(hl);
+	step_ptr->resv_port_array = port_array;
+
+	if (step_ptr->resv_ports[0] == '[') {
+		/* Remove brackets from hostlist */
+		i = strlen(step_ptr->resv_ports);
+		step_ptr->resv_ports[i-1] = '\0';
+		tmp_str = xmalloc(i);
+		strcpy(tmp_str, step_ptr->resv_ports + 1);
+		xfree(step_ptr->resv_ports);
+		step_ptr->resv_ports = tmp_str;
+	}
+
+	debug("reserved ports %s for step %u.%u",
+	      step_ptr->resv_ports,
+	      step_ptr->job_ptr->job_id, step_ptr->step_id);
+
+	return SLURM_SUCCESS;
+}
+
+/* Release reserved ports for a job step
+ * RET SLURM_SUCCESS or an error code */
+extern void resv_port_free(struct step_record *step_ptr)
+{
+	int i, j;
+
+	if (step_ptr->resv_port_array == NULL)
+		return;
+
+	bit_not(step_ptr->step_node_bitmap);
+	for (i=0; i<step_ptr->resv_port_cnt; i++) {
+		if ((step_ptr->resv_port_array[i] < port_resv_min) ||
+		    (step_ptr->resv_port_array[i] > port_resv_max)) 
+			continue;
+		j = step_ptr->resv_port_array[i] - port_resv_min;
+		bit_and(port_resv_table[j], step_ptr->step_node_bitmap);
+		
+	}
+	bit_not(step_ptr->step_node_bitmap);
+	xfree(step_ptr->resv_port_array);
+
+	debug("freed ports %s for step %u.%u",
+	      step_ptr->resv_ports,
+	      step_ptr->job_ptr->job_id, step_ptr->step_id);
+}
diff --git a/src/slurmctld/port_mgr.h b/src/slurmctld/port_mgr.h
new file mode 100644
index 0000000000000000000000000000000000000000..af4a87a418531da210dae065c4e3f9049677ddc5
--- /dev/null
+++ b/src/slurmctld/port_mgr.h
@@ -0,0 +1,57 @@
+/*****************************************************************************\
+ *  port_mgr.h - manage the reservation of I/O ports on the nodes.
+ *	Design for use with OpenMPI.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _HAVE_PORT_MGR_H
+#define _HAVE_PORT_MGR_H
+
+#include "src/slurmctld/slurmctld.h"
+
+/* Configure reserved ports.
+ * Call with mpi_params==NULL to free memory */
+extern int reserve_port_config(char *mpi_params);
+
+/* Reserve ports for a job step
+ * RET SLURM_SUCCESS or an error code */
+extern int resv_port_alloc(struct step_record *step_ptr);
+
+/* Release reserved ports for a job step
+ * RET SLURM_SUCCESS or an error code */
+extern void resv_port_free(struct step_record *step_ptr);
+
+#endif	/* !_HAVE_PORT_MGR_H */
diff --git a/src/slurmctld/power_save.c b/src/slurmctld/power_save.c
index ec454af5f5fe012b27ec8c8ce21d405b62b54fe7..4fb0b53cbb0c6d628fa617379ad6f0f7d59a7a25 100644
--- a/src/slurmctld/power_save.c
+++ b/src/slurmctld/power_save.c
@@ -1,17 +1,20 @@
 /*****************************************************************************\
  *  power_save.c - support node power saving mode. Nodes which have been 
  *  idle for an extended period of time will be placed into a power saving 
- *  mode by running an arbitrary script (typically to set frequency governor).
+ *  mode by running an arbitrary script. This script can lower the voltage
+ *  or frequency of the nodes or can completely power the nodes off.
  *  When the node is restored to normal operation, another script will be 
  *  executed. Many parameters are available to control this mode of operation.
  *****************************************************************************
  *  Copyright (C) 2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -43,35 +46,56 @@
 #  include "config.h"
 #endif
 
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+
 #include "src/common/bitstring.h"
 #include "src/common/xstring.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/slurmctld.h"
 
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <stdlib.h>
-#include <unistd.h>
+#if defined (HAVE_DECL_STRSIGNAL) && !HAVE_DECL_STRSIGNAL
+#  ifndef strsignal
+     extern char *strsignal(int);
+#  endif
+#endif /* defined HAVE_DECL_STRSIGNAL && !HAVE_DECL_STRSIGNAL */
+
+#define _DEBUG			0
+#define PID_CNT			10
+#define MAX_SHUTDOWN_DELAY	120	/* seconds to wait for child procs
+					 * to exit after daemon shutdown 
+					 * request, then orphan or kill proc */
 
-#define _DEBUG 0
+/* Records for tracking processes forked to suspend/resume nodes */
+pid_t  child_pid[PID_CNT];	/* pid of process		*/
+time_t child_time[PID_CNT];	/* start time of process	*/
 
-int idle_time, suspend_rate, resume_rate;
+int idle_time, suspend_rate, resume_timeout, resume_rate, suspend_timeout;
 char *suspend_prog = NULL, *resume_prog = NULL;
 char *exc_nodes = NULL, *exc_parts = NULL;
-time_t last_config = (time_t) 0;
+time_t last_config = (time_t) 0, last_suspend = (time_t) 0;
+uint16_t slurmd_timeout;
 
-bitstr_t *exc_node_bitmap = NULL;
-int suspend_cnt, resume_cnt;
+bitstr_t *exc_node_bitmap = NULL, *suspend_node_bitmap = NULL;
+int   suspend_cnt,   resume_cnt;
+float suspend_cnt_f, resume_cnt_f;
 
 static void  _clear_power_config(void);
 static void  _do_power_work(void);
 static void  _do_resume(char *host);
 static void  _do_suspend(char *host);
 static int   _init_power_config(void);
-static void  _kill_zombies(void);
+static int   _kill_procs(void);
+static int   _reap_procs(void);
 static void  _re_wake(void);
 static pid_t _run_prog(char *prog, char *arg);
+static void  _shutdown_power(void);
 static bool  _valid_prog(char *file_name);
 
 /* Perform any power change work to nodes */
@@ -80,20 +104,34 @@ static void _do_power_work(void)
 	static time_t last_log = 0, last_work_scan = 0;
 	int i, wake_cnt = 0, sleep_cnt = 0, susp_total = 0;
 	time_t now = time(NULL), delta_t;
-	uint16_t base_state, susp_state;
+	uint16_t base_state, comp_state, susp_state;
 	bitstr_t *wake_node_bitmap = NULL, *sleep_node_bitmap = NULL;
 	struct node_record *node_ptr;
+	bool run_suspend = false;
 
 	/* Set limit on counts of nodes to have state changed */
 	delta_t = now - last_work_scan;
 	if (delta_t >= 60) {
-		suspend_cnt = 0;
-		resume_cnt  = 0;
+		suspend_cnt_f = 0.0;
+		resume_cnt_f  = 0.0;
 	} else {
 		float rate = (60 - delta_t) / 60.0;
-		suspend_cnt *= rate;
-		resume_cnt  *= rate;
+		suspend_cnt_f *= rate;
+		resume_cnt_f  *= rate;
 	}
+	suspend_cnt = (suspend_cnt_f + 0.5);
+	resume_cnt  = (resume_cnt_f  + 0.5);
+
+	if (now > (last_suspend + suspend_timeout)) {
+		/* ready to start another round of node suspends */
+		run_suspend = true;
+		if (last_suspend) {
+			bit_nclear(suspend_node_bitmap, 0, 
+				   (node_record_count - 1));
+			last_suspend = (time_t) 0;
+		}
+	}
+
 	last_work_scan = now;
 
 	/* Build bitmaps identifying each node which should change state */
@@ -101,42 +139,59 @@ static void _do_power_work(void)
 		node_ptr = &node_record_table_ptr[i];
 		base_state = node_ptr->node_state & NODE_STATE_BASE;
 		susp_state = node_ptr->node_state & NODE_STATE_POWER_SAVE;
+		comp_state = node_ptr->node_state & NODE_STATE_COMPLETING;
 
 		if (susp_state)
 			susp_total++;
-		if (susp_state
-		&&  ((suspend_rate == 0) || (suspend_cnt <= suspend_rate))
-		&&  ((base_state == NODE_STATE_ALLOCATED)
-		||   (node_ptr->last_idle > (now - idle_time)))) {
-			if (wake_node_bitmap == NULL)
-				wake_node_bitmap = bit_alloc(node_record_count);
+
+		/* Resume nodes as appropriate */
+		if (susp_state &&
+		    ((resume_rate == 0) || (resume_cnt < resume_rate))	&&
+		    (bit_test(suspend_node_bitmap, i) == 0)		&&
+		    ((base_state == NODE_STATE_ALLOCATED) ||
+		     (node_ptr->last_idle > (now - idle_time)))) {
+			if (wake_node_bitmap == NULL) {
+				wake_node_bitmap = 
+					bit_alloc(node_record_count);
+			}
 			wake_cnt++;
-			suspend_cnt++;
+			resume_cnt++;
+			resume_cnt_f++;
 			node_ptr->node_state &= (~NODE_STATE_POWER_SAVE);
+			bit_clear(power_node_bitmap, i);
+			node_ptr->node_state   |= NODE_STATE_NO_RESPOND;
+			node_ptr->last_response = now + resume_timeout;
 			bit_set(wake_node_bitmap, i);
 		}
-		if ((susp_state == 0)
-		&&  ((resume_rate == 0) || (resume_cnt <= resume_rate))
-		&&  (base_state == NODE_STATE_IDLE)
-		&&  (node_ptr->last_idle < (now - idle_time))
-		&&  ((exc_node_bitmap == NULL) || 
+
+		/* Suspend nodes as appropriate */
+		if (run_suspend 					&& 
+		    (susp_state == 0)					&&
+		    ((suspend_rate == 0) || (suspend_cnt < suspend_rate)) &&
+		    (base_state == NODE_STATE_IDLE)			&&
+		    (comp_state == 0)					&&
+		    (node_ptr->last_idle < (now - idle_time))		&&
+		    ((exc_node_bitmap == NULL) || 
 		     (bit_test(exc_node_bitmap, i) == 0))) {
-			if (sleep_node_bitmap == NULL)
-				sleep_node_bitmap = bit_alloc(node_record_count);
+			if (sleep_node_bitmap == NULL) {
+				sleep_node_bitmap = 
+					bit_alloc(node_record_count);
+			}
 			sleep_cnt++;
-			resume_cnt++;
+			suspend_cnt++;
+			suspend_cnt_f++;
 			node_ptr->node_state |= NODE_STATE_POWER_SAVE;
-			bit_set(sleep_node_bitmap, i);
+			bit_set(power_node_bitmap, i);
+			bit_set(sleep_node_bitmap,   i);
+			bit_set(suspend_node_bitmap, i);
+			last_suspend = now;
 		}
 	}
-	if ((now - last_log) > 600) {
-		info("Power save mode %d nodes", susp_total);
+	if (((now - last_log) > 600) && (susp_total > 0)) {
+		info("Power save mode: %d nodes", susp_total);
 		last_log = now;
 	}
 
-	if ((wake_cnt == 0) && (sleep_cnt == 0))
-		_re_wake();	/* No work to be done now */
-
 	if (sleep_node_bitmap) {
 		char *nodes;
 		nodes = bitmap2node_name(sleep_node_bitmap);
@@ -162,50 +217,37 @@ static void _do_power_work(void)
 	}
 }
 
-/* Just in case some resume calls failed, re-issue the requests
- * periodically for active nodes. We do not increment resume_cnt
- * since there should be no change in power requirements. */
+/* If slurmctld crashes, the node state that it recovers could differ
+ * from the actual hardware state (e.g. ResumeProgram failed to complete).
+ * To address that, when a node that should be powered up for a running 
+ * job is not responding, they try running ResumeProgram again. */
 static void _re_wake(void)
 {
-	static time_t last_wakeup = 0;
-	static int last_inx = 0;
-	time_t now = time(NULL);
+	uint16_t base_state;
 	struct node_record *node_ptr;
 	bitstr_t *wake_node_bitmap = NULL;
-	int i, lim = MIN(node_record_count, 20);
-	uint16_t base_state, susp_state;
+	int i;
 
-	/* Run at most once per minute */
-	if ((now - last_wakeup) < 60)
-		return;
-	last_wakeup = now;
-
-	for (i=0; i<lim; i++) {
-		node_ptr = &node_record_table_ptr[last_inx];
+	node_ptr = node_record_table_ptr;
+	for (i=0; i<node_record_count; i++, node_ptr++) {
 		base_state = node_ptr->node_state & NODE_STATE_BASE;
-		susp_state = node_ptr->node_state & NODE_STATE_POWER_SAVE;
-
-		if ((susp_state == 0) &&
-		    ((base_state == NODE_STATE_ALLOCATED) ||
-		     (base_state == NODE_STATE_IDLE))) {
-			if (wake_node_bitmap == NULL)
-				wake_node_bitmap = bit_alloc(node_record_count);
-			bit_set(wake_node_bitmap, last_inx);
+		if ((base_state == NODE_STATE_ALLOCATED)		  &&
+		    (node_ptr->node_state & NODE_STATE_NO_RESPOND)	  &&
+		    ((node_ptr->node_state & NODE_STATE_POWER_SAVE) == 0) &&
+		    (bit_test(suspend_node_bitmap, i) == 0)) {
+			if (wake_node_bitmap == NULL) {
+				wake_node_bitmap = 
+					bit_alloc(node_record_count);
+			}
+			bit_set(wake_node_bitmap, i);
 		}
-		last_inx++;
-		if (last_inx >= node_record_count)
-			last_inx = 0;
 	}
 
 	if (wake_node_bitmap) {
 		char *nodes;
 		nodes = bitmap2node_name(wake_node_bitmap);
 		if (nodes) {
-#if _DEBUG
 			info("power_save: rewaking nodes %s", nodes);
-#else
-			debug("power_save: rewaking nodes %s", nodes);
-#endif
 			_run_prog(resume_prog, nodes);	
 		} else
 			error("power_save: bitmap2nodename");
@@ -219,7 +261,7 @@ static void _do_resume(char *host)
 #if _DEBUG
 	info("power_save: waking nodes %s", host);
 #else
-	debug("power_save: waking nodes %s", host);
+	verbose("power_save: waking nodes %s", host);
 #endif
 	_run_prog(resume_prog, host);	
 }
@@ -229,13 +271,18 @@ static void _do_suspend(char *host)
 #if _DEBUG
 	info("power_save: suspending nodes %s", host);
 #else
-	debug("power_save: suspending nodes %s", host);
+	verbose("power_save: suspending nodes %s", host);
 #endif
 	_run_prog(suspend_prog, host);	
 }
 
+/* run a suspend or resume program
+ * prog IN	- program to run
+ * arg IN	- program arguments, the hostlist expression
+ */
 static pid_t _run_prog(char *prog, char *arg)
 {
+	int i;
 	char program[1024], arg0[1024], arg1[1024], *pname;
 	pid_t child;
 
@@ -253,24 +300,122 @@ static pid_t _run_prog(char *prog, char *arg)
 
 	child = fork();
 	if (child == 0) {
-		int i;
 		for (i=0; i<128; i++)
 			close(i);
+		setpgrp();
 		execl(program, arg0, arg1, NULL);
 		exit(1);
-	} else if (child < 0)
+	} else if (child < 0) {
 		error("fork: %m");
+	} else {
+		/* save the pid */
+		for (i=0; i<PID_CNT; i++) {
+			if (child_pid[i])
+				continue;
+			child_pid[i]  = child;
+			child_time[i] = time(NULL);
+			break;
+		}
+		if (i == PID_CNT)
+			error("power_save: filled child_pid array");
+	}
 	return child;
 }
 
-/* We don't bother to track individual process IDs, 
- * just clean everything up here. We could capture 
- * the value of "child" in _run_prog() if we want 
- * to track each process. */
-static void  _kill_zombies(void)
+/* reap child processes previously forked to modify node state.
+ * return the count of empty slots in the child_pid array */
+static int  _reap_procs(void)
 {
-	while (waitpid(-1, NULL, WNOHANG) > 0)
-		;
+	int empties = 0, delay, i, max_timeout, rc, status;
+
+	max_timeout = MAX(suspend_timeout, resume_timeout);
+	for (i=0; i<PID_CNT; i++) {
+		if (child_pid[i] == 0) {
+			empties++;
+			continue;
+		}
+		rc = waitpid(child_pid[i], &status, WNOHANG);
+		if (rc == 0)
+			continue;
+
+		delay = difftime(time(NULL), child_time[i]);
+		if (delay > max_timeout) {
+			info("power_save: program %d ran for %d sec", 
+			     (int) child_pid[i], delay);
+		}
+
+		if (WIFEXITED(status)) {
+			rc = WEXITSTATUS(status);
+			if (rc != 0) {
+				error("power_save: program exit status of %d", 
+				      rc);
+			}
+		} else if (WIFSIGNALED(status)) {
+			error("power_save: program signalled: %s",
+			      strsignal(WTERMSIG(status)));
+		}
+
+		child_pid[i]  = 0;
+		child_time[i] = (time_t) 0;
+	}
+	return empties;
+}
+
+/* kill (or orphan) child processes previously forked to modify node state.
+ * return the count of killed/orphaned processes */
+static int  _kill_procs(void)
+{
+	int killed = 0, i, rc, status;
+
+	for (i=0; i<PID_CNT; i++) {
+		if (child_pid[i] == 0)
+			continue;
+
+		rc = waitpid(child_pid[i], &status, WNOHANG);
+		if (rc == 0) {
+#ifdef  POWER_SAVE_KILL_PROCS
+			error("power_save: killing process %d",
+			      child_pid[i]);
+			kill((0-child_pid[i]), SIGKILL);
+#else
+			error("power_save: orphaning process %d",
+			      child_pid[i]);
+#endif
+			killed++;
+		} else {
+			/* process already completed */
+		}
+		child_pid[i]  = 0;
+		child_time[i] = (time_t) 0;
+	}
+	return killed;
+}
+
+static void _shutdown_power(void)
+{
+	int i, proc_cnt, max_timeout;
+
+	max_timeout = MAX(suspend_timeout, resume_timeout);
+	/* Try to avoid orphan processes */
+	for (i=0; ; i++) {
+		proc_cnt = PID_CNT - _reap_procs();
+		if (proc_cnt == 0)	/* all procs completed */
+			break;
+		if (i >= max_timeout) {
+			error("power_save: orphaning %d processes which are "
+			      "not terminating so slurmctld can exit", 
+			      proc_cnt);
+			_kill_procs();
+			break;
+		} else if (i == 2) {
+			info("power_save: waiting for %d processes to "
+			     "complete", proc_cnt);
+		} else if (i % 5 == 0) {
+			debug("power_save: waiting for %d processes to "
+			      "complete", proc_cnt);
+		}
+		sleep(1);
+	}
 }
 
 /* Free all allocated memory */
@@ -290,10 +435,13 @@ static int _init_power_config(void)
 {
 	slurm_ctl_conf_t *conf = slurm_conf_lock();
 
-	last_config   = slurmctld_conf.last_update;
-	idle_time     = conf->suspend_time - 1;
-	suspend_rate  = conf->suspend_rate;
-	resume_rate   = conf->resume_rate;
+	last_config     = slurmctld_conf.last_update;
+	idle_time       = conf->suspend_time - 1;
+	suspend_rate    = conf->suspend_rate;
+	resume_timeout  = conf->resume_timeout;
+	resume_rate     = conf->resume_rate;
+	slurmd_timeout  = conf->slurmd_timeout;
+	suspend_timeout = conf->suspend_timeout;
 	_clear_power_config();
 	if (conf->suspend_program)
 		suspend_prog = xstrdup(conf->suspend_program);
@@ -306,36 +454,38 @@ static int _init_power_config(void)
 	slurm_conf_unlock();
 
 	if (idle_time < 0) {	/* not an error */
-		debug("power_save module disabled, idle_time < 0");
+		debug("power_save module disabled, SuspendTime < 0");
 		return -1;
 	}
 	if (suspend_rate < 1) {
-		error("power_save module disabled, suspend_rate < 1");
+		error("power_save module disabled, SuspendRate < 1");
 		return -1;
 	}
 	if (resume_rate < 1) {
-		error("power_save module disabled, resume_rate < 1");
+		error("power_save module disabled, ResumeRate < 1");
 		return -1;
 	}
-	if (suspend_prog == NULL)
-		info("WARNING: power_save module has NULL suspend program");
-	else if (!_valid_prog(suspend_prog)) {
-		error("power_save module disabled, invalid suspend program %s",
-			suspend_prog);
+	if (suspend_prog == NULL) {
+		error("power_save module disabled, NULL SuspendProgram");
+		return -1;
+	} else if (!_valid_prog(suspend_prog)) {
+		error("power_save module disabled, invalid SuspendProgram %s",
+		      suspend_prog);
 		return -1;
 	}
-	if (resume_prog == NULL)
-		info("WARNING: power_save module has NULL resume program");
-	else if (!_valid_prog(resume_prog)) {
-		error("power_save module disabled, invalid resume program %s",
-			resume_prog);
+	if (resume_prog == NULL) {
+		error("power_save module disabled, NULL ResumeProgram");
+		return -1;
+	} else if (!_valid_prog(resume_prog)) {
+		error("power_save module disabled, invalid ResumeProgram %s",
+		      resume_prog);
 		return -1;
 	}
 
-	if (exc_nodes
-	&&  (node_name2bitmap(exc_nodes, false, &exc_node_bitmap))) {
+	if (exc_nodes &&
+	    (node_name2bitmap(exc_nodes, false, &exc_node_bitmap))) {
 		error("power_save module disabled, "
-			"invalid excluded nodes %s", exc_nodes);
+		      "invalid SuspendExcNodes %s", exc_nodes);
 		return -1;
 	}
 
@@ -350,7 +500,7 @@ static int _init_power_config(void)
 			part_ptr = find_part_record(one_part);
 			if (!part_ptr) {
 				error("power_save module disabled, "
-					"invalid excluded partition %s",
+					"invalid SuspendExcPart %s",
 					one_part);
 				rc = -1;
 				break;
@@ -358,7 +508,8 @@ static int _init_power_config(void)
 			if (exc_node_bitmap)
 				bit_or(exc_node_bitmap, part_ptr->node_bitmap);
 			else
-				exc_node_bitmap = bit_copy(part_ptr->node_bitmap);
+				exc_node_bitmap = bit_copy(part_ptr->
+							   node_bitmap);
 			one_part = strtok_r(NULL, ",", &tmp);
 		}
 		xfree(part_list);
@@ -380,23 +531,27 @@ static bool _valid_prog(char *file_name)
 	struct stat buf;
 
 	if (file_name[0] != '/') {
-		debug("program %s not absolute pathname", file_name);
+		debug("power_save program %s not absolute pathname", 
+		     file_name);
 		return false;
 	}
 
-	if (stat(file_name, &buf)) {
-		debug("program %s not found", file_name);
+	if (access(file_name, X_OK) != 0) {
+		debug("power_save program %s not executable", file_name);
 		return false;
 	}
-	if (!S_ISREG(buf.st_mode)) {
-		debug("program %s not regular file", file_name);
+
+	if (stat(file_name, &buf)) {
+		debug("power_save program %s not found", file_name);
 		return false;
 	}
 	if (buf.st_mode & 022) {
-		debug("program %s has group or world write permission",
-			file_name);
+		debug("power_save program %s has group or "
+		      "world write permission",
+		      file_name);
 		return false;
 	}
+
 	return true;
 }
 
@@ -407,36 +562,63 @@ static bool _valid_prog(char *file_name)
  */
 extern void *init_power_save(void *arg)
 {
-        /* Locks: Write node, read jobs and partitions */
+        /* Locks: Read nodes */
+        slurmctld_lock_t node_read_lock = {
+                NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
+        /* Locks: Write nodes */
         slurmctld_lock_t node_write_lock = {
-                NO_LOCK, READ_LOCK, WRITE_LOCK, READ_LOCK };
-	time_t now, last_power_scan = 0;
+                NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	time_t now, boot_time = 0, last_power_scan = 0;
 
 	if (_init_power_config())
 		goto fini;
 
+	suspend_node_bitmap = bit_alloc(node_record_count);
+	if (suspend_node_bitmap == NULL)
+		fatal("power_save: malloc error");
+
 	while (slurmctld_config.shutdown_time == 0) {
 		sleep(1);
-		_kill_zombies();
 
-		if ((last_config != slurmctld_conf.last_update)
-		&&  (_init_power_config()))
+		if (_reap_procs() < 2) {
+			debug("power_save programs getting backlogged");
+			continue;
+		}
+
+		if ((last_config != slurmctld_conf.last_update) &&
+		    (_init_power_config())) {
+			info("power_save mode has been disabled due to "
+			     "configuration changes");
 			goto fini;
+		}
 
-		/* Only run every 60 seconds or after
-		 * a node state change, whichever 
-		 * happens first */
 		now = time(NULL);
-		if ((last_node_update < last_power_scan)
-		&&  (now < (last_power_scan + 60)))
-			continue;
+		if (boot_time == 0)
+			boot_time = now;
+
+		/* Only run every 60 seconds or after a node state change,
+		 *  whichever happens first */
+		if ((last_node_update >= last_power_scan) ||
+		    (now >= (last_power_scan + 60))) {
+			lock_slurmctld(node_write_lock);
+			_do_power_work();
+			unlock_slurmctld(node_write_lock);
+			last_power_scan = now;
+		}
 
-		lock_slurmctld(node_write_lock);
-		_do_power_work();
-		unlock_slurmctld(node_write_lock);
-		last_power_scan = now;
+		if (slurmd_timeout &&
+		    (now > (boot_time + (slurmd_timeout / 2)))) {
+			lock_slurmctld(node_read_lock);
+			_re_wake();
+			unlock_slurmctld(node_read_lock);
+			/* prevent additional executions */
+			boot_time += (365 * 24 * 60 * 60);
+			slurmd_timeout = 0;
+		}
 	}
 
 fini:	_clear_power_config();
+	FREE_NULL_BITMAP(suspend_node_bitmap);
+	_shutdown_power();
 	return NULL;
 }
diff --git a/src/slurmctld/private.key b/src/slurmctld/private.key
deleted file mode 100644
index 8d48b17b2db4e6ba40e287b879692e0d2e531713..0000000000000000000000000000000000000000
--- a/src/slurmctld/private.key
+++ /dev/null
@@ -1,15 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIICXQIBAAKBgQC9Ld+Z8bAGVVMqWR4Z6XjmHy7FWbuRnVk1LHIDAnhsYx95BVZU
-4lnlxjSHW0F+zSnvEWMlrbxFG+okUDXTK8if/mi3ZGDAsG+d0Qdxx2KDz1Ps1uze
-h/0asAtsezSCQgmWzOPurTLq6XmlnA2fLNyNzS0AOiubrp51VMHifvTLpwIDAQAB
-AoGAPubk5Uv+amyuhXMyVg1SXLnblFUcz/MQuWR42FVW8zsWOOg0Z28H0yXPS35l
-TaMsIUiXveyBoD0C4mYlL3zsbOs6PinarNrlLT0mTLxFnls3Q/OBTJpwYWo1vv95
-ncsU3S7gD4IEI9GfMqXbM3fdk5HjUkn+ctmjmyy6xEmYF+ECQQD1CXklbn4W+MWv
-LNdWp6CcCO52NT9aR+Sy461G6vhLawfVSsYiJUADehxCeKq2SjLX2iqcqJSwv+nq
-k+E+E9IxAkEAxaSiQVVDglUFMZD96lpDy0ro0q5vANGtjYMtzApR59Ur8lUTCyJG
-g8v1s2iaP+5uRztQG48SILJZ1ZqUVXHtVwJBAJPMlQAY94EPqN2SpIimL2Aumvc+
-AuqHttCYN+owzHdhJaZnpb7uzP/L5cPKWN3/P0+nTlCT9qvt9kAB1rjE5+ECQDDC
-+okV6S75nOtqs0qjdIBufdykzpwsHx5/08e4cBa7gDkshiNEFJOluXvG/e9x+uE2
-IAB8nuNUPVe26IvgLMcCQQCah2dYKHQaadIlky9+/OL+mVJC5IFlYrK5qm3Pjptw
-ykZB5OerjUdQSQYriB/1jAWu21rtXzlpRTjBnkQM6huM
------END RSA PRIVATE KEY-----
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index c9cd63ec7936bbf6fe9ae79b1d2a4553010fb666..06965a3a0814f58feb2472efa68fcb0d33bd498e 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -2,13 +2,14 @@
  *  proc_req.c - process incomming messages to slurmctld
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>, et. al. 
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -60,6 +61,7 @@
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
 #include "src/common/pack.h"
+#include "src/common/slurm_priority.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_cred.h"
@@ -74,6 +76,7 @@
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/read_config.h"
+#include "src/slurmctld/reservation.h"
 #include "src/slurmctld/sched_plugin.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/srun_comm.h"
@@ -96,6 +99,9 @@ inline static void  _slurm_rpc_complete_batch_script(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_conf(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_jobs(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_job_single(slurm_msg_t * msg);
+inline static void  _slurm_rpc_get_shares(slurm_msg_t *msg);
+inline static void  _slurm_rpc_get_topo(slurm_msg_t * msg);
+inline static void  _slurm_rpc_get_priority_factors(slurm_msg_t *msg);
 inline static void  _slurm_rpc_dump_nodes(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_partitions(slurm_msg_t * msg);
 inline static void  _slurm_rpc_epilog_complete(slurm_msg_t * msg);
@@ -111,7 +117,12 @@ inline static void  _slurm_rpc_job_alloc_info(slurm_msg_t * msg);
 inline static void  _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg);
 inline static void  _slurm_rpc_ping(slurm_msg_t * msg);
 inline static void  _slurm_rpc_reconfigure_controller(slurm_msg_t * msg);
+inline static void  _slurm_rpc_resv_create(slurm_msg_t * msg);
+inline static void  _slurm_rpc_resv_update(slurm_msg_t * msg);
+inline static void  _slurm_rpc_resv_delete(slurm_msg_t * msg);
+inline static void  _slurm_rpc_resv_show(slurm_msg_t * msg);
 inline static void  _slurm_rpc_requeue(slurm_msg_t * msg);
+inline static void  _slurm_rpc_takeover(slurm_msg_t * msg);
 inline static void  _slurm_rpc_shutdown_controller(slurm_msg_t * msg);
 inline static void  _slurm_rpc_shutdown_controller_immediate(slurm_msg_t *
 							     msg);
@@ -163,6 +174,14 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_dump_job_single(msg);
 		slurm_free_job_id_msg(msg->data);
 		break;
+	case REQUEST_SHARE_INFO:
+		_slurm_rpc_get_shares(msg);
+		slurm_free_shares_request_msg(msg->data);
+		break;
+	case REQUEST_PRIORITY_FACTORS:
+		_slurm_rpc_get_priority_factors(msg);
+		slurm_free_priority_factors_request_msg(msg->data);
+		break;
 	case REQUEST_JOB_END_TIME:
 		_slurm_rpc_end_time(msg);
 		slurm_free_job_alloc_info_msg(msg->data);
@@ -227,6 +246,10 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_shutdown_controller(msg);
 		/* No body to free */
 		break;
+	case REQUEST_TAKEOVER:
+		_slurm_rpc_takeover(msg);
+		/* No body to free */
+		break;
 	case REQUEST_SHUTDOWN:
 		_slurm_rpc_shutdown_controller(msg);
 		slurm_free_shutdown_msg(msg->data);
@@ -247,6 +270,7 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_update_node(msg);
 		slurm_free_update_node_msg(msg->data);
 		break;
+	case REQUEST_CREATE_PARTITION:
 	case REQUEST_UPDATE_PARTITION:
 		_slurm_rpc_update_partition(msg);
 		slurm_free_update_part_msg(msg->data);
@@ -255,6 +279,22 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_delete_partition(msg);
 		slurm_free_delete_part_msg(msg->data);
 		break;
+	case REQUEST_CREATE_RESERVATION:
+		_slurm_rpc_resv_create(msg);
+		slurm_free_resv_desc_msg(msg->data);
+		break;
+	case REQUEST_UPDATE_RESERVATION:
+		_slurm_rpc_resv_update(msg);
+		slurm_free_resv_desc_msg(msg->data);
+		break;
+	case REQUEST_DELETE_RESERVATION:
+		_slurm_rpc_resv_delete(msg);
+		slurm_free_resv_name_msg(msg->data);
+		break;
+	case REQUEST_RESERVATION_INFO:
+		_slurm_rpc_resv_show(msg);
+		slurm_free_resv_info_request_msg(msg->data);
+		break;
 	case REQUEST_NODE_REGISTRATION_STATUS:
 		error("slurmctld is talking with itself. "
 			"SlurmctldPort == SlurmdPort");
@@ -324,6 +364,10 @@ void slurmctld_req (slurm_msg_t * msg)
 		_slurm_rpc_accounting_first_reg(msg);
 		/* No body to free */
 		break;
+	case REQUEST_TOPO_INFO:
+		_slurm_rpc_get_topo(msg);
+		/* No body to free */
+		break;
 	default:
 		error("invalid RPC msg_type=%d", msg->msg_type);
 		slurm_send_rc_msg(msg, EINVAL);
@@ -340,8 +384,11 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 {
 	slurm_ctl_conf_t *conf = slurm_conf_lock();
 
+	memset(conf_ptr, 0, sizeof(slurm_ctl_conf_t));
+
 	conf_ptr->last_update         = time(NULL);
-	conf_ptr->accounting_storage_enforce = conf->accounting_storage_enforce;
+	conf_ptr->accounting_storage_enforce = 
+					conf->accounting_storage_enforce;
 	conf_ptr->accounting_storage_host =
 					xstrdup(conf->accounting_storage_host);
 	conf_ptr->accounting_storage_loc =
@@ -358,21 +405,25 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 
 	conf_ptr->backup_addr         = xstrdup(conf->backup_addr);
 	conf_ptr->backup_controller   = xstrdup(conf->backup_controller);
+	conf_ptr->batch_start_timeout = conf->batch_start_timeout;
 	conf_ptr->boot_time           = slurmctld_config.boot_time;
 
 	conf_ptr->cache_groups        = conf->cache_groups;
 	conf_ptr->checkpoint_type     = xstrdup(conf->checkpoint_type);
 	conf_ptr->cluster_name        = xstrdup(conf->cluster_name);
+	conf_ptr->complete_wait       = conf->complete_wait;
 	conf_ptr->control_addr        = xstrdup(conf->control_addr);
 	conf_ptr->control_machine     = xstrdup(conf->control_machine);
 	conf_ptr->crypto_type         = xstrdup(conf->crypto_type);
 
 	conf_ptr->def_mem_per_task    = conf->def_mem_per_task;
+	conf_ptr->debug_flags         = conf->debug_flags;
 	conf_ptr->disable_root_jobs   = conf->disable_root_jobs;
 
 	conf_ptr->enforce_part_limits = conf->enforce_part_limits;
 	conf_ptr->epilog              = xstrdup(conf->epilog);
 	conf_ptr->epilog_msg_time     = conf->epilog_msg_time;
+	conf_ptr->epilog_slurmctld    = xstrdup(conf->epilog_slurmctld);
 
 	conf_ptr->fast_schedule       = conf->fast_schedule;
 	conf_ptr->first_job_id        = conf->first_job_id;
@@ -385,6 +436,7 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->job_acct_gather_freq  = conf->job_acct_gather_freq;
 	conf_ptr->job_acct_gather_type  = xstrdup(conf->job_acct_gather_type);
 
+	conf_ptr->job_ckpt_dir        = xstrdup(conf->job_ckpt_dir);
 	conf_ptr->job_comp_host       = xstrdup(conf->job_comp_host);
 	conf_ptr->job_comp_loc        = xstrdup(conf->job_comp_loc);
 	conf_ptr->job_comp_pass       = xstrdup(conf->job_comp_pass);
@@ -402,6 +454,7 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->get_env_timeout     = conf->get_env_timeout;
 
 	conf_ptr->kill_wait           = conf->kill_wait;
+	conf_ptr->kill_on_bad_exit    = conf->kill_on_bad_exit;
 
 	conf_ptr->licenses            = xstrdup(conf->licenses);
 
@@ -410,16 +463,32 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->max_mem_per_task    = conf->max_mem_per_task;
 	conf_ptr->min_job_age         = conf->min_job_age;
 	conf_ptr->mpi_default         = xstrdup(conf->mpi_default);
+	conf_ptr->mpi_params          = xstrdup(conf->mpi_params);
 	conf_ptr->msg_timeout         = conf->msg_timeout;
 
 	conf_ptr->next_job_id         = get_next_job_id();
 	conf_ptr->node_prefix         = xstrdup(conf->node_prefix);
 
+	conf_ptr->over_time_limit     = conf->over_time_limit;
+
 	conf_ptr->plugindir           = xstrdup(conf->plugindir);
 	conf_ptr->plugstack           = xstrdup(conf->plugstack);
+
+	conf_ptr->priority_decay_hl   = conf->priority_decay_hl;
+	conf_ptr->priority_favor_small= conf->priority_favor_small;
+	conf_ptr->priority_max_age    = conf->priority_max_age;
+	conf_ptr->priority_reset_period = conf->priority_reset_period;
+	conf_ptr->priority_type       = xstrdup(conf->priority_type);
+	conf_ptr->priority_weight_age = conf->priority_weight_age;
+	conf_ptr->priority_weight_fs  = conf->priority_weight_fs;
+	conf_ptr->priority_weight_js  = conf->priority_weight_js;
+	conf_ptr->priority_weight_part= conf->priority_weight_part;
+	conf_ptr->priority_weight_qos = conf->priority_weight_qos;
+
 	conf_ptr->private_data        = conf->private_data;
 	conf_ptr->proctrack_type      = xstrdup(conf->proctrack_type);
 	conf_ptr->prolog              = xstrdup(conf->prolog);
+	conf_ptr->prolog_slurmctld    = xstrdup(conf->prolog_slurmctld);
 	conf_ptr->propagate_prio_process = 
 					slurmctld_conf.propagate_prio_process;
         conf_ptr->propagate_rlimits   = xstrdup(conf->propagate_rlimits);
@@ -428,6 +497,8 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 
 	conf_ptr->resume_program      = xstrdup(conf->resume_program);
 	conf_ptr->resume_rate         = conf->resume_rate;
+	conf_ptr->resume_timeout      = conf->resume_timeout;
+	conf_ptr->resv_over_run       = conf->resv_over_run;
 	conf_ptr->ret2service         = conf->ret2service;
 
 	conf_ptr->salloc_default_command = xstrdup(conf->
@@ -441,6 +512,7 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->sched_time_slice    = conf->sched_time_slice;
 	conf_ptr->schedtype           = xstrdup(conf->schedtype);
 	conf_ptr->select_type         = xstrdup(conf->select_type);
+	conf_ptr->select_conf_key_pairs = (void *)select_g_get_config();
 	conf_ptr->select_type_param   = conf->select_type_param;
 	conf_ptr->slurm_user_id       = conf->slurm_user_id;
 	conf_ptr->slurm_user_name     = xstrdup(conf->slurm_user_name);
@@ -455,8 +527,11 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->slurmd_port         = conf->slurmd_port;
 	conf_ptr->slurmd_spooldir     = xstrdup(conf->slurmd_spooldir);
 	conf_ptr->slurmd_timeout      = conf->slurmd_timeout;
+	conf_ptr->slurmd_user_id      = conf->slurmd_user_id;
+	conf_ptr->slurmd_user_name    = xstrdup(conf->slurmd_user_name);
 	conf_ptr->slurm_conf          = xstrdup(conf->slurm_conf);
 	conf_ptr->srun_prolog         = xstrdup(conf->srun_prolog);
+	conf_ptr->srun_io_timeout     = conf->srun_io_timeout;
 	conf_ptr->srun_epilog         = xstrdup(conf->srun_epilog);
 	conf_ptr->state_save_location = xstrdup(conf->state_save_location);
 	conf_ptr->suspend_exc_nodes   = xstrdup(conf->suspend_exc_nodes);
@@ -464,6 +539,7 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->suspend_program     = xstrdup(conf->suspend_program);
 	conf_ptr->suspend_rate        = conf->suspend_rate;
 	conf_ptr->suspend_time        = conf->suspend_time;
+	conf_ptr->suspend_timeout     = conf->suspend_timeout;
 	conf_ptr->switch_type         = xstrdup(conf->switch_type);
 
 	conf_ptr->task_epilog         = xstrdup(conf->task_epilog);
@@ -471,6 +547,8 @@ void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->task_plugin         = xstrdup(conf->task_plugin);
 	conf_ptr->task_plugin_param   = conf->task_plugin_param;
 	conf_ptr->tmp_fs              = xstrdup(conf->tmp_fs);
+	conf_ptr->topology_plugin     = xstrdup(conf->topology_plugin);
+	conf_ptr->track_wckey         = conf->track_wckey;
 	conf_ptr->tree_width          = conf->tree_width;
 
 	conf_ptr->wait_time           = conf->wait_time;
@@ -514,57 +592,36 @@ static void _kill_job_on_msg_fail(uint32_t job_id)
 }
 
 /* create a credential for a given job step, return error code */
-static int _make_step_cred(struct step_record *step_rec, 
+static int _make_step_cred(struct step_record *step_ptr, 
 			   slurm_cred_t *slurm_cred)
 {
 	slurm_cred_arg_t cred_arg;
-	struct job_record* job_ptr = step_rec->job_ptr;
+	struct job_record* job_ptr = step_ptr->job_ptr;
+	select_job_res_t select_ptr = job_ptr->select_job;
 
+	xassert(select_ptr && select_ptr->cpus);
 	cred_arg.jobid    = job_ptr->job_id;
-	cred_arg.stepid   = step_rec->step_id;
+	cred_arg.stepid   = step_ptr->step_id;
 	cred_arg.uid      = job_ptr->user_id;
 	cred_arg.job_mem  = job_ptr->details->job_min_memory;
-	cred_arg.task_mem = step_rec->mem_per_task;
 #ifdef HAVE_FRONT_END
 	cred_arg.hostlist = node_record_table_ptr[0].name;
 #else
-	cred_arg.hostlist = step_rec->step_layout->node_list;
+	cred_arg.hostlist = step_ptr->step_layout->node_list;
 #endif
-	cred_arg.alloc_lps_cnt = job_ptr->alloc_lps_cnt;
-	if ((cred_arg.alloc_lps_cnt > 0) &&
-	    bit_equal(job_ptr->node_bitmap, step_rec->step_node_bitmap)) {
-		cred_arg.alloc_lps = xmalloc(cred_arg.alloc_lps_cnt *
-				sizeof(uint32_t));
-		memcpy(cred_arg.alloc_lps, step_rec->job_ptr->alloc_lps,
-		       cred_arg.alloc_lps_cnt*sizeof(uint32_t));
-        } else if (cred_arg.alloc_lps_cnt > 0) {
-		/* Construct an array of allocated CPUs per node.
-		 * Translate from array based upon job's allocation
-		 * to array based upon nodes allocated to the step. */
-		int i, job_inx = -1, step_inx = -1;
-		int job_inx_target = job_ptr->node_cnt;
-		cred_arg.alloc_lps = xmalloc(cred_arg.alloc_lps_cnt *
-				sizeof(uint32_t));
-		for (i=0; i<node_record_count; i++) {
-			if (!bit_test(job_ptr->node_bitmap, i))
-				continue;
-			job_inx++;
-			if (!bit_test(step_rec->step_node_bitmap, i))
-				continue;
-			step_inx++;
-			cred_arg.alloc_lps[step_inx] = 
-					job_ptr->alloc_lps[job_inx];
-			if (job_inx == job_inx_target)
-				break;
-		}
-		cred_arg.alloc_lps_cnt = step_inx + 1;
-        } else {
-		error("No resources allocated to job %u", job_ptr->job_id);
-		cred_arg.alloc_lps = NULL;
-	}
+
+	/* Identify the cores allocated to this job step
+	 * The core_bitmap is based upon the nodes allocated to the _job_.
+	 * The slurmd must identify the appropriate cores to be used 
+	 * by each step. */
+	cred_arg.core_bitmap         = step_ptr->core_bitmap_job;
+	cred_arg.cores_per_socket    = select_ptr->cores_per_socket;
+	cred_arg.sockets_per_node    = select_ptr->sockets_per_node;
+	cred_arg.sock_core_rep_count = select_ptr->sock_core_rep_count;
+	cred_arg.job_nhosts          = select_ptr->nhosts;
+	cred_arg.job_hostlist        = job_ptr->nodes;
 
 	*slurm_cred = slurm_cred_create(slurmctld_config.cred_ctx, &cred_arg);
-	xfree(cred_arg.alloc_lps);
 	if (*slurm_cred == NULL) {
 		error("slurm_cred_create error");
 		return ESLURM_INVALID_JOB_CREDENTIAL;
@@ -617,6 +674,7 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 	if (error_code == SLURM_SUCCESS) {
 		do_unlock = true;
 		lock_slurmctld(job_write_lock);
+
 		error_code = job_allocate(job_desc_msg, immediate, 
 					  false, NULL,
 					  true, uid, &job_ptr);
@@ -625,8 +683,9 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 	}
 
 	/* return result */
-	if ((error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)
-	||  (error_code == ESLURM_JOB_HELD))
+	if ((error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE) ||
+	    (error_code == ESLURM_RESERVATION_NOT_USABLE) ||
+	    (error_code == ESLURM_JOB_HELD))
 		job_waiting = true;
 
 	if ((error_code == SLURM_SUCCESS)
@@ -635,20 +694,33 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 		info("_slurm_rpc_allocate_resources JobId=%u NodeList=%s %s",
 			job_ptr->job_id, job_ptr->nodes, TIME_STR);
 
-		/* send job_ID  and node_name_ptr */
-		alloc_msg.cpu_count_reps = xmalloc(sizeof(uint32_t) *
-				job_ptr->num_cpu_groups);
-		memcpy(alloc_msg.cpu_count_reps, job_ptr->cpu_count_reps,
-				(sizeof(uint32_t) * job_ptr->num_cpu_groups));
-		alloc_msg.cpus_per_node  = xmalloc(sizeof(uint32_t) *
-				job_ptr->num_cpu_groups);
-		memcpy(alloc_msg.cpus_per_node, job_ptr->cpus_per_node,
-				(sizeof(uint32_t) * job_ptr->num_cpu_groups));
+		/* send job_ID and node_name_ptr */
+		if (job_ptr->select_job && job_ptr->select_job->cpu_array_cnt) {
+			alloc_msg.num_cpu_groups = job_ptr->select_job->
+						   cpu_array_cnt;
+			alloc_msg.cpu_count_reps = xmalloc(sizeof(uint32_t) * 
+							   job_ptr->select_job->
+							   cpu_array_cnt);
+			memcpy(alloc_msg.cpu_count_reps, 
+			       job_ptr->select_job->cpu_array_reps,
+			       (sizeof(uint32_t) * job_ptr->select_job->
+						   cpu_array_cnt));
+			alloc_msg.cpus_per_node  = xmalloc(sizeof(uint16_t) * 
+							   job_ptr->select_job->
+							   cpu_array_cnt);
+			memcpy(alloc_msg.cpus_per_node, 
+			       job_ptr->select_job->cpu_array_value,
+			       (sizeof(uint16_t) * job_ptr->select_job->
+						   cpu_array_cnt));
+		} else {
+			alloc_msg.num_cpu_groups = 0;
+			alloc_msg.cpu_count_reps = NULL;
+			alloc_msg.cpus_per_node  = NULL;
+		}
 		alloc_msg.error_code     = error_code;
 		alloc_msg.job_id         = job_ptr->job_id;
 		alloc_msg.node_cnt       = job_ptr->node_cnt;
 		alloc_msg.node_list      = xstrdup(job_ptr->nodes);
-		alloc_msg.num_cpu_groups = job_ptr->num_cpu_groups;
 		alloc_msg.select_jobinfo = 
 			select_g_copy_jobinfo(job_ptr->select_jobinfo);
 		unlock_slurmctld(job_write_lock);
@@ -796,6 +868,59 @@ static void _slurm_rpc_dump_job_single(slurm_msg_t * msg)
 	xfree(dump);
 }
 
+static void  _slurm_rpc_get_shares(slurm_msg_t *msg)
+{
+	DEF_TIMERS;
+	shares_request_msg_t *req_msg = (shares_request_msg_t *) msg->data;
+	shares_response_msg_t resp_msg;
+	slurm_msg_t response_msg;
+	
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_SHARE_INFO from uid=%u",
+	       (unsigned int)uid);
+	resp_msg.assoc_shares_list = assoc_mgr_get_shares(acct_db_conn,
+							  uid,
+							  req_msg->acct_list, 
+							  req_msg->user_list);
+	slurm_msg_t_init(&response_msg);
+	response_msg.address  = msg->address;
+	response_msg.msg_type = RESPONSE_SHARE_INFO;
+	response_msg.data     = &resp_msg;
+	slurm_send_node_msg(msg->conn_fd, &response_msg);
+	if(resp_msg.assoc_shares_list)
+		list_destroy(resp_msg.assoc_shares_list);
+	END_TIMER2("_slurm_rpc_get_share");
+	debug2("_slurm_rpc_get_shares %s", TIME_STR);
+}
+
+static void  _slurm_rpc_get_priority_factors(slurm_msg_t *msg)
+{
+	DEF_TIMERS;
+	priority_factors_request_msg_t *req_msg =
+		(priority_factors_request_msg_t *) msg->data;
+	priority_factors_response_msg_t resp_msg;
+	slurm_msg_t response_msg;
+
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_PRIORITY_FACTORS from uid=%u",
+	       (unsigned int)uid);
+	resp_msg.priority_factors_list = priority_g_get_priority_factors_list(
+					 req_msg);
+	slurm_msg_t_init(&response_msg);
+	response_msg.address  = msg->address;
+	response_msg.msg_type = RESPONSE_PRIORITY_FACTORS;
+	response_msg.data     = &resp_msg;
+	slurm_send_node_msg(msg->conn_fd, &response_msg);
+	if(resp_msg.priority_factors_list)
+		list_destroy(resp_msg.priority_factors_list);
+	END_TIMER2("_slurm_rpc_get_priority_factors");
+	debug2("_slurm_rpc_get_priority_factors %s", TIME_STR);
+}
+
 /* _slurm_rpc_end_time - Process RPC for job end time */
 static void _slurm_rpc_end_time(slurm_msg_t * msg)
 {
@@ -811,7 +936,7 @@ static void _slurm_rpc_end_time(slurm_msg_t * msg)
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 
 	START_TIMER;
-	debug2("Processing RPC: REQUEST JOB_END_TIME from uid=%u",
+	debug2("Processing RPC: REQUEST_JOB_END_TIME from uid=%u",
 		(unsigned int) uid);
 	lock_slurmctld(job_read_lock);
 	rc = job_end_time(time_req_msg, &timeout_msg);
@@ -1067,8 +1192,8 @@ static void _slurm_rpc_complete_job_allocation(slurm_msg_t * msg)
 	/* init */
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_COMPLETE_JOB_ALLOCATION from "
-		"uid=%u, JobId=%u rc=%d",
-		uid, comp_msg->job_id, comp_msg->job_rc);
+	       "uid=%u, JobId=%u rc=%d",
+	       uid, comp_msg->job_id, comp_msg->job_rc);
 
 	lock_slurmctld(job_write_lock);
 
@@ -1112,8 +1237,8 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 	/* init */
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_COMPLETE_BATCH_SCRIPT from "
-		"uid=%u JobId=%u",
-		uid, comp_msg->job_id);
+	       "uid=%u JobId=%u",
+	       uid, comp_msg->job_id);
 
 	if (!validate_super_user(uid)) {
 		/* Only the slurmstepd can complete a batch script */
@@ -1141,7 +1266,7 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 		      slurm_strerror(comp_msg->slurm_rc));
 		if (error_code == SLURM_SUCCESS) {
 			update_node_msg_t update_node_msg;
-			bzero(&update_node_msg, sizeof(update_node_msg_t));
+			memset(&update_node_msg, 0, sizeof(update_node_msg_t));
 			update_node_msg.node_names =
 				comp_msg->node_name;
 			update_node_msg.node_state = NODE_STATE_DRAIN;
@@ -1222,8 +1347,8 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 	if (error_code == SLURM_SUCCESS) {
 		/* issue the RPC */
 		lock_slurmctld(job_write_lock);
-		error_code =
-			step_create(req_step_msg, &step_rec, false, false);
+		error_code = step_create(req_step_msg, &step_rec, 
+					 false, false);
 	}
 	if (error_code == SLURM_SUCCESS)
 		error_code = _make_step_cred(step_rec, &slurm_cred);
@@ -1232,8 +1357,8 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 	/* return result */
 	if (error_code) {
 		unlock_slurmctld(job_write_lock);
-		error("_slurm_rpc_job_step_create: %s", 
-			slurm_strerror(error_code));
+		info("_slurm_rpc_job_step_create: %s", 
+		     slurm_strerror(error_code));
 		slurm_send_rc_msg(msg, error_code);
 	} else {
 		slurm_step_layout_t *layout = step_rec->step_layout;
@@ -1243,6 +1368,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 			req_step_msg->node_list, TIME_STR);
 
 		job_step_resp.job_step_id = step_rec->step_id;
+		job_step_resp.resv_ports  = xstrdup(step_rec->resv_ports);
 		job_step_resp.step_layout = slurm_step_layout_copy(layout);
 		
 		job_step_resp.cred        = slurm_cred;
@@ -1256,6 +1382,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 		resp.data = &job_step_resp;
 		
 		slurm_send_node_msg(msg->conn_fd, &resp);
+		xfree(job_step_resp.resv_ports);
 		slurm_step_layout_destroy(job_step_resp.step_layout);
 		slurm_cred_destroy(slurm_cred);
 		switch_free_jobinfo(job_step_resp.switch_job);
@@ -1470,7 +1597,7 @@ static void _slurm_rpc_job_alloc_info(slurm_msg_t * msg)
 	END_TIMER2("_slurm_rpc_job_alloc_info");
 
 	/* return result */
-	if (error_code || (job_ptr == NULL)) {
+	if (error_code || (job_ptr == NULL) || (job_ptr->select_job == NULL)) {
 		if (do_unlock)
 			unlock_slurmctld(job_read_lock);
 		debug2("_slurm_rpc_job_alloc_info: JobId=%u, uid=%u: %s",
@@ -1482,15 +1609,20 @@ static void _slurm_rpc_job_alloc_info(slurm_msg_t * msg)
 			job_info_msg->job_id, job_ptr->nodes, TIME_STR);
 
 		/* send job_ID  and node_name_ptr */
+		job_info_resp_msg.num_cpu_groups = job_ptr->select_job->
+						   cpu_array_cnt;
 		job_info_resp_msg.cpu_count_reps = 
-			xmalloc(sizeof(uint32_t) * job_ptr->num_cpu_groups);
+				xmalloc(sizeof(uint32_t) * 
+					job_ptr->select_job->cpu_array_cnt);
 		memcpy(job_info_resp_msg.cpu_count_reps, 
-		       job_ptr->cpu_count_reps,
-		       (sizeof(uint32_t) * job_ptr->num_cpu_groups));
+		       job_ptr->select_job->cpu_array_reps,
+		       (sizeof(uint32_t) * job_ptr->select_job->cpu_array_cnt));
 		job_info_resp_msg.cpus_per_node  = 
-			xmalloc(sizeof(uint32_t) * job_ptr->num_cpu_groups);
-		memcpy(job_info_resp_msg.cpus_per_node, job_ptr->cpus_per_node,
-		       (sizeof(uint32_t) * job_ptr->num_cpu_groups));
+				xmalloc(sizeof(uint16_t) * 
+					job_ptr->select_job->cpu_array_cnt);
+		memcpy(job_info_resp_msg.cpus_per_node, 
+		       job_ptr->select_job->cpu_array_value,
+		       (sizeof(uint16_t) * job_ptr->select_job->cpu_array_cnt));
 		job_info_resp_msg.error_code     = error_code;
 		job_info_resp_msg.job_id         = job_info_msg->job_id;
 		job_info_resp_msg.node_addr      = xmalloc(sizeof(slurm_addr) *
@@ -1499,7 +1631,6 @@ static void _slurm_rpc_job_alloc_info(slurm_msg_t * msg)
 		       (sizeof(slurm_addr) * job_ptr->node_cnt));
 		job_info_resp_msg.node_cnt       = job_ptr->node_cnt;
 		job_info_resp_msg.node_list      = xstrdup(job_ptr->nodes);
-		job_info_resp_msg.num_cpu_groups = job_ptr->num_cpu_groups;
 		job_info_resp_msg.select_jobinfo = 
 			select_g_copy_jobinfo(job_ptr->select_jobinfo);
 		unlock_slurmctld(job_read_lock);
@@ -1545,32 +1676,35 @@ static void _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg)
 	END_TIMER2("_slurm_rpc_job_alloc_info_lite");
 
 	/* return result */
-	if (error_code || (job_ptr == NULL)) {
+	if (error_code || (job_ptr == NULL) || (job_ptr->select_job == NULL)) {
 		if (do_unlock)
 			unlock_slurmctld(job_read_lock);
-		debug2("_slurm_rpc_job_alloc_info_lite: JobId=%u, uid=%u: %s",
-			job_info_msg->job_id, uid, 
-			slurm_strerror(error_code));
+		info("_slurm_rpc_job_alloc_info_lite: JobId=%u, uid=%u: %s",
+		     job_info_msg->job_id, uid, slurm_strerror(error_code));
 		slurm_send_rc_msg(msg, error_code);
 	} else {
 		info("_slurm_rpc_job_alloc_info_lite JobId=%u NodeList=%s %s",
 			job_info_msg->job_id, job_ptr->nodes, TIME_STR);
 
 		/* send job_ID  and node_name_ptr */
+		job_info_resp_msg.num_cpu_groups = job_ptr->select_job->
+						   cpu_array_cnt;
 		job_info_resp_msg.cpu_count_reps = 
-			xmalloc(sizeof(uint32_t) * job_ptr->num_cpu_groups);
+				xmalloc(sizeof(uint32_t) * 
+					job_ptr->select_job->cpu_array_cnt);
 		memcpy(job_info_resp_msg.cpu_count_reps, 
-		       job_ptr->cpu_count_reps,
-		       (sizeof(uint32_t) * job_ptr->num_cpu_groups));
+		       job_ptr->select_job->cpu_array_reps,
+		       (sizeof(uint32_t) * job_ptr->select_job->cpu_array_cnt));
 		job_info_resp_msg.cpus_per_node  = 
-			xmalloc(sizeof(uint32_t) * job_ptr->num_cpu_groups);
-		memcpy(job_info_resp_msg.cpus_per_node, job_ptr->cpus_per_node,
-		       (sizeof(uint32_t) * job_ptr->num_cpu_groups));
+				xmalloc(sizeof(uint16_t) * 
+					job_ptr->select_job->cpu_array_cnt);
+		memcpy(job_info_resp_msg.cpus_per_node, 
+		       job_ptr->select_job->cpu_array_value,
+		       (sizeof(uint16_t) * job_ptr->select_job->cpu_array_cnt));
 		job_info_resp_msg.error_code     = error_code;
 		job_info_resp_msg.job_id         = job_info_msg->job_id;
 		job_info_resp_msg.node_cnt       = job_ptr->node_cnt;
 		job_info_resp_msg.node_list      = xstrdup(job_ptr->nodes);
-		job_info_resp_msg.num_cpu_groups = job_ptr->num_cpu_groups;
 		job_info_resp_msg.select_jobinfo = 
 			select_g_copy_jobinfo(job_ptr->select_jobinfo);
 		unlock_slurmctld(job_read_lock);
@@ -1598,7 +1732,11 @@ static void _slurm_rpc_ping(slurm_msg_t * msg)
 
 
 /* _slurm_rpc_reconfigure_controller - process RPC to re-initialize 
- *	slurmctld from configuration file */
+ *	slurmctld from configuration file 
+ * Anything you add to this function must be added to the
+ * slurm_reconfigure function inside controller.c try
+ * to keep these in sync.  
+ */
 static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg)
 {
 	int error_code = SLURM_SUCCESS;
@@ -1647,11 +1785,34 @@ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg)
 		slurm_send_rc_msg(msg, SLURM_SUCCESS);
 		slurm_sched_partition_change();	/* notify sched plugin */
 		select_g_reconfigure();		/* notify select plugin too */
+		priority_g_reconfig();          /* notify priority plugin too */
 		schedule();			/* has its own locks */
 		save_all_state();
 	}
 }
 
+/* _slurm_rpc_takeover - process takeover RPC */
+static void _slurm_rpc_takeover(slurm_msg_t * msg)
+{
+	int error_code = SLURM_SUCCESS;
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	/* We could authenticate here, if desired */
+	if (!validate_super_user(uid)) {
+		error("Security violation, TAKEOVER RPC from uid=%u",
+		      (unsigned int) uid);
+		error_code = ESLURM_USER_ID_MISSING;
+	} else {
+		/* takeover is not possible in controller mode */
+		/* return success */
+		info("Performing RPC: REQUEST_TAKEOVER : "
+		     "already in controller mode - skipping");
+	}
+
+	slurm_send_rc_msg(msg, error_code);
+
+}
+
 /* _slurm_rpc_shutdown_controller - process RPC to shutdown slurmctld */
 static void _slurm_rpc_shutdown_controller(slurm_msg_t * msg)
 {
@@ -1768,21 +1929,12 @@ static void _slurm_rpc_step_complete(slurm_msg_t *msg)
 		req->job_id, req->job_step_id,
 		req->range_first, req->range_last, 
 		req->step_rc, (unsigned int) uid);
-	if (!validate_super_user(uid)) {
-		/* Don't trust RPC, it is not from slurmstepd */
-		error("Invalid user %d attempted REQUEST_STEP_COMPLETE",
-		      uid);
-		return;
-	}
 
 	lock_slurmctld(job_write_lock);
-	rc = step_partial_comp(req, &rem, &step_rc);
+	rc = step_partial_comp(req, uid, &rem, &step_rc);
 
 	if (rc || rem) {	/* some error or not totally done */
-		if (rc) {
-			info("step_partial_comp: %s",
-				slurm_strerror(rc));
-		}
+		/* Note: Error printed within step_partial_comp */
 		unlock_slurmctld(job_write_lock);
 		slurm_send_rc_msg(msg, rc);
 		if (!rc)	/* partition completion */
@@ -1931,8 +2083,17 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 		lock_slurmctld(job_write_lock);
 		if (job_desc_msg->job_id != SLURM_BATCH_SCRIPT) {
 			job_ptr = find_job_record(job_desc_msg->job_id);
-			if (job_ptr && IS_JOB_FINISHED(job_ptr))
-				job_ptr = NULL;
+			if (job_ptr && IS_JOB_FINISHED(job_ptr)) {
+				if (job_ptr->job_state & JOB_COMPLETING) {
+					info("Attempt to re-use active "
+					     "job id %u", job_ptr->job_id);
+					slurm_send_rc_msg(msg, 
+							  ESLURM_DUPLICATE_JOB_ID);
+					unlock_slurmctld(job_write_lock);
+					return;
+				}
+				job_ptr = NULL;	/* OK to re-use job id */
+			}
 		} else
 			job_ptr = NULL;
 
@@ -1960,6 +2121,13 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 				unlock_slurmctld(job_write_lock);
 				return;
 			}
+			if (job_ptr->details && 
+			    job_ptr->details->prolog_running) {
+				slurm_send_rc_msg(msg, EAGAIN);
+				unlock_slurmctld(job_write_lock);
+				return;
+			}
+
 			error_code = _launch_batch_step(job_desc_msg, uid,
 							&step_id);
 			unlock_slurmctld(job_write_lock);
@@ -1999,6 +2167,7 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 	/* return result */
 	if ((error_code != SLURM_SUCCESS)
 	&&  (error_code != ESLURM_JOB_HELD)
+	&&  (error_code != ESLURM_RESERVATION_NOT_USABLE)
 	&&  (error_code != ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)) {
 		info("_slurm_rpc_submit_batch_job: %s",
 			slurm_strerror(error_code));
@@ -2175,9 +2344,8 @@ static void _slurm_rpc_update_partition(slurm_msg_t * msg)
 		(unsigned int) uid);
 	if (!validate_super_user(uid)) {
 		error_code = ESLURM_USER_ID_MISSING;
-		error
-		    ("Security violation, UPDATE_PARTITION RPC from uid=%u",
-		     (unsigned int) uid);
+		error("Security violation, UPDATE_PARTITION RPC from uid=%u",
+		      (unsigned int) uid);
 	}
 
 	if (error_code == SLURM_SUCCESS) {
@@ -2186,9 +2354,13 @@ static void _slurm_rpc_update_partition(slurm_msg_t * msg)
 			error_code = select_g_update_block(part_desc_ptr);
 		else if(part_desc_ptr->root_only == (uint16_t)INFINITE) 
 			error_code = select_g_update_sub_node(part_desc_ptr);
-		else {
+		else if (msg->msg_type == REQUEST_CREATE_PARTITION) {
+			lock_slurmctld(part_write_lock);
+			error_code = update_part(part_desc_ptr, true);
+			unlock_slurmctld(part_write_lock);
+		} else {
 			lock_slurmctld(part_write_lock);
-			error_code = update_part(part_desc_ptr);
+			error_code = update_part(part_desc_ptr, false);
 			unlock_slurmctld(part_write_lock);
 		}
 		END_TIMER2("_slurm_rpc_update_partition");
@@ -2260,6 +2432,204 @@ static void _slurm_rpc_delete_partition(slurm_msg_t * msg)
 	}
 }
 
+/* _slurm_rpc_resv_create - process RPC to create a reservation */
+static void _slurm_rpc_resv_create(slurm_msg_t * msg)
+{
+	int error_code = SLURM_SUCCESS;
+	DEF_TIMERS;
+	resv_desc_msg_t *resv_desc_ptr = (resv_desc_msg_t *) 
+						msg->data;
+	/* Locks: write node, read partition */
+	slurmctld_lock_t node_write_lock = { 
+		NO_LOCK, NO_LOCK, WRITE_LOCK, READ_LOCK };
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_CREATE_RESERVATION from uid=%u",
+		(unsigned int) uid);
+	if (!validate_super_user(uid)) {
+		error_code = ESLURM_USER_ID_MISSING;
+		error
+		    ("Security violation, CREATE_RESERVATION RPC from uid=%u",
+		     (unsigned int) uid);
+	}
+
+	if (error_code == SLURM_SUCCESS) {
+		/* do RPC call */
+		lock_slurmctld(node_write_lock);
+		error_code = create_resv(resv_desc_ptr);
+		unlock_slurmctld(node_write_lock);
+		END_TIMER2("_slurm_rpc_resv_create");
+	}
+
+	/* return result */
+	if (error_code) {
+		info("_slurm_rpc_resv_create reservation=%s: %s",
+			resv_desc_ptr->name, slurm_strerror(error_code));
+		slurm_send_rc_msg(msg, error_code);
+	} else {
+		slurm_msg_t response_msg;
+		reservation_name_msg_t resv_resp_msg;
+
+		debug2("_slurm_rpc_resv_create complete for %s %s",
+			resv_desc_ptr->name, TIME_STR);
+		/* send reservation name */
+		slurm_msg_t_init(&response_msg);
+		resv_resp_msg.name    = resv_desc_ptr->name;
+		response_msg.msg_type = RESPONSE_CREATE_RESERVATION;
+		response_msg.data     = &resv_resp_msg;
+		slurm_send_node_msg(msg->conn_fd, &response_msg);
+
+		/* NOTE: These functions provide their own locks */
+		if (schedule()) {
+			schedule_job_save();
+			schedule_node_save();
+		}
+	}
+}
+
+/* _slurm_rpc_resv_update - process RPC to update a reservation */
+static void _slurm_rpc_resv_update(slurm_msg_t * msg)
+{
+	int error_code = SLURM_SUCCESS;
+	DEF_TIMERS;
+	resv_desc_msg_t *resv_desc_ptr = (resv_desc_msg_t *) 
+						msg->data;
+	/* Locks: write node, read partition */
+	slurmctld_lock_t node_write_lock = { 
+		NO_LOCK, NO_LOCK, WRITE_LOCK, READ_LOCK };
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_UPDATE_RESERVATION from uid=%u",
+		(unsigned int) uid);
+	if (!validate_super_user(uid)) {
+		error_code = ESLURM_USER_ID_MISSING;
+		error
+		    ("Security violation, UPDATE_RESERVATION RPC from uid=%u",
+		     (unsigned int) uid);
+	}
+
+	if (error_code == SLURM_SUCCESS) {
+		/* do RPC call */
+		lock_slurmctld(node_write_lock);
+		error_code = update_resv(resv_desc_ptr);
+		unlock_slurmctld(node_write_lock);
+		END_TIMER2("_slurm_rpc_resv_update");
+	}
+
+	/* return result */
+	if (error_code) {
+		info("_slurm_rpc_resv_update reservation=%s: %s",
+			resv_desc_ptr->name, slurm_strerror(error_code));
+		slurm_send_rc_msg(msg, error_code);
+	} else {
+		debug2("_slurm_rpc_resv_update complete for %s %s",
+			resv_desc_ptr->name, TIME_STR);
+		slurm_send_rc_msg(msg, SLURM_SUCCESS);
+
+		/* NOTE: These functions provide their own locks */
+		if (schedule()) {
+			schedule_job_save();
+			schedule_node_save();
+		}
+	}
+}
+
+/* _slurm_rpc_resv_delete - process RPC to delete a reservation */
+static void _slurm_rpc_resv_delete(slurm_msg_t * msg)
+{
+	/* init */
+	int error_code = SLURM_SUCCESS;
+	DEF_TIMERS;
+	reservation_name_msg_t *resv_desc_ptr = (reservation_name_msg_t *)
+					      msg->data;
+	/* Locks: read job, write node */
+	slurmctld_lock_t node_write_lock = { 
+		NO_LOCK, READ_LOCK, WRITE_LOCK, NO_LOCK };
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_DELETE_RESERVTION from uid=%u",
+		(unsigned int) uid);
+	if (!validate_super_user(uid)) {
+		error_code = ESLURM_USER_ID_MISSING;
+		error
+		    ("Security violation, DELETE_RESERVTION RPC from uid=%u",
+		     (unsigned int) uid);
+	}
+
+	if (error_code == SLURM_SUCCESS) {
+		/* do RPC call */
+		lock_slurmctld(node_write_lock);
+		error_code = delete_resv(resv_desc_ptr);
+		unlock_slurmctld(node_write_lock);
+		END_TIMER2("_slurm_rpc_resv_delete");
+	}
+
+	/* return result */
+	if (error_code) {
+		info("_slurm_rpc_delete_reservation partition=%s: %s",
+			resv_desc_ptr->name, slurm_strerror(error_code));
+		slurm_send_rc_msg(msg, error_code);
+	} else {
+		info("_slurm_rpc_delete_reservation complete for %s %s",
+			resv_desc_ptr->name, TIME_STR);
+		slurm_send_rc_msg(msg, SLURM_SUCCESS);
+
+		/* NOTE: These functions provide their own locks */
+		if (schedule()) {
+			schedule_job_save();
+			schedule_node_save();
+		}
+
+	}
+}
+
+/* _slurm_rpc_resv_show - process RPC to dump reservation info */
+static void _slurm_rpc_resv_show(slurm_msg_t * msg)
+{
+        resv_info_request_msg_t *resv_req_msg = (resv_info_request_msg_t *) 
+						msg->data;
+	DEF_TIMERS;
+	/* Locks: read node */
+	slurmctld_lock_t node_read_lock = { 
+		NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	slurm_msg_t response_msg;
+	char *dump;
+	int dump_size;
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_RESERVATION_INFO from uid=%u",
+		(unsigned int) uid);
+	if ((slurmctld_conf.private_data & PRIVATE_DATA_PARTITIONS) &&
+	    (!validate_super_user(uid))) {
+		debug2("Security violation, REQUEST_RESERVATION_INFO "
+		       "RPC from uid=%d", uid);
+		slurm_send_rc_msg(msg, ESLURM_ACCESS_DENIED);
+	} else if ((resv_req_msg->last_update - 1) >= last_resv_update) {
+		debug2("_slurm_rpc_resv_show, no change");
+		slurm_send_rc_msg(msg, SLURM_NO_CHANGE_IN_DATA);
+	} else {
+		lock_slurmctld(node_read_lock);
+		show_resv(&dump, &dump_size, uid);
+		unlock_slurmctld(node_read_lock);
+		END_TIMER2("_slurm_rpc_resv_show");
+
+		/* init response_msg structure */
+		slurm_msg_t_init(&response_msg);
+		response_msg.address = msg->address;
+		response_msg.msg_type = RESPONSE_RESERVATION_INFO;
+		response_msg.data = dump;
+		response_msg.data_size = dump_size;
+
+		/* send message */
+		slurm_send_node_msg(msg->conn_fd, &response_msg);
+		xfree(dump);
+	}
+}
+
 /* determine of nodes are ready for the job */
 static void _slurm_rpc_job_ready(slurm_msg_t * msg)
 {
@@ -2465,28 +2835,35 @@ inline static void  _slurm_rpc_checkpoint(slurm_msg_t * msg)
 
 	/* do RPC call and send reply */
 	lock_slurmctld(job_write_lock);
-	error_code = job_step_checkpoint(ckpt_ptr, uid, msg->conn_fd);
+	if (ckpt_ptr->op == CHECK_RESTART) {
+		error_code = job_restart(ckpt_ptr, uid, msg->conn_fd);
+	} else if (ckpt_ptr->step_id == SLURM_BATCH_SCRIPT) {
+		error_code = job_checkpoint(ckpt_ptr, uid, msg->conn_fd);
+	} else {
+		error_code = job_step_checkpoint(ckpt_ptr, uid, msg->conn_fd);
+	}
 	unlock_slurmctld(job_write_lock);
 	END_TIMER2("_slurm_rpc_checkpoint");
 
 	if (error_code) {
-		if (ckpt_ptr->step_id == SLURM_BATCH_SCRIPT)
+		if (ckpt_ptr->step_id == SLURM_BATCH_SCRIPT) {
 			info("_slurm_rpc_checkpoint %s %u: %s", op, 
 				ckpt_ptr->job_id, slurm_strerror(error_code));
-		else
+		} else {
 			info("_slurm_rpc_checkpoint %s %u.%u: %s", op, 
 				ckpt_ptr->job_id, ckpt_ptr->step_id, 
 				slurm_strerror(error_code));
+		}
 	} else {
-		if (ckpt_ptr->step_id == SLURM_BATCH_SCRIPT)
+		if (ckpt_ptr->step_id == SLURM_BATCH_SCRIPT) {
 			info("_slurm_rpc_checkpoint %s for %u %s", op,
 				ckpt_ptr->job_id, TIME_STR);
-		else
+		} else {
 			info("_slurm_rpc_checkpoint %s for %u.%u %s", op,
 				ckpt_ptr->job_id, ckpt_ptr->step_id, TIME_STR);
-
-		if ((ckpt_ptr->op != CHECK_ABLE) 
-		&&  (ckpt_ptr->op != CHECK_ERROR)) {
+		}
+		if ((ckpt_ptr->op != CHECK_ABLE) &&
+		    (ckpt_ptr->op != CHECK_ERROR)) {
 			/* job state changed, save it */
 			/* NOTE: This function provides it own locks */
 			schedule_job_save();
@@ -2597,19 +2974,6 @@ _xduparray2(uint16_t size, char ** array)
 	return result;
 }
 
-
-
-int _max_nprocs(struct job_record  *job_ptr)
-{
-       int i, num, nprocs = 0;
-       if (!job_ptr) return 0;
-       num = job_ptr->num_cpu_groups;
-       for (i = 0; i < num; i++) {
-	       nprocs += job_ptr->cpu_count_reps[i]*job_ptr->cpus_per_node[i];
-       }
-       return nprocs;
-}
-
 /* _launch_batch_step
  * IN: job_desc_msg from _slurm_rpc_submit_batch_job() but with jobid set
  *     which means it's trying to launch within a pre-existing allocation.
@@ -2701,6 +3065,7 @@ int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid,
 	launch_msg_ptr->gid = job_ptr->group_id;
 	launch_msg_ptr->uid = uid;
 	launch_msg_ptr->nodes = xstrdup(job_ptr->nodes);
+	launch_msg_ptr->restart_cnt = job_ptr->restart_cnt;
 
 	if (make_batch_job_cred(launch_msg_ptr, job_ptr)) {
 		error("aborting batch step %u.%u", job_ptr->job_id,
@@ -2725,28 +3090,33 @@ int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid,
 						 job_desc_msg->environment);
 	launch_msg_ptr->envc = job_desc_msg->env_size;
 	launch_msg_ptr->job_mem = job_desc_msg->job_min_memory;
+	launch_msg_ptr->cpus_per_task = job_desc_msg->cpus_per_task;
 
 	/* _max_nprocs() represents the total number of CPUs available
 	 * for this step (overcommit not supported yet). If job_desc_msg
 	 * contains a reasonable num_procs request, use that value;
 	 * otherwise default to the allocation processor request.
 	 */
-	launch_msg_ptr->nprocs = _max_nprocs(job_ptr);
+	launch_msg_ptr->nprocs = job_ptr->total_procs;
 	if (job_desc_msg->num_procs > 0 &&
 		job_desc_msg->num_procs < launch_msg_ptr->nprocs)
 		launch_msg_ptr->nprocs = job_desc_msg->num_procs;
 	if (launch_msg_ptr->nprocs < 0)
 		launch_msg_ptr->nprocs = job_ptr->num_procs;
 
-	launch_msg_ptr->num_cpu_groups = job_ptr->num_cpu_groups;
-	launch_msg_ptr->cpus_per_node  = xmalloc(sizeof(uint32_t) *
-			job_ptr->num_cpu_groups);
-	memcpy(launch_msg_ptr->cpus_per_node, job_ptr->cpus_per_node,
-			(sizeof(uint32_t) * job_ptr->num_cpu_groups));
+	launch_msg_ptr->num_cpu_groups = job_ptr->select_job->cpu_array_cnt;
+	launch_msg_ptr->cpus_per_node  = xmalloc(sizeof(uint16_t) *
+			job_ptr->select_job->cpu_array_cnt);
+	memcpy(launch_msg_ptr->cpus_per_node, 
+	       job_ptr->select_job->cpu_array_value,
+	       (sizeof(uint16_t) * job_ptr->select_job->cpu_array_cnt));
 	launch_msg_ptr->cpu_count_reps  = xmalloc(sizeof(uint32_t) *
-			job_ptr->num_cpu_groups);
-	memcpy(launch_msg_ptr->cpu_count_reps, job_ptr->cpu_count_reps,
-			(sizeof(uint32_t) * job_ptr->num_cpu_groups));
+			job_ptr->select_job->cpu_array_cnt);
+	memcpy(launch_msg_ptr->cpu_count_reps, 
+	       job_ptr->select_job->cpu_array_reps,
+	       (sizeof(uint32_t) * job_ptr->select_job->cpu_array_cnt));
+	launch_msg_ptr->select_jobinfo = select_g_copy_jobinfo(
+			job_ptr->select_jobinfo);
 
 	/* FIXME: for some reason these CPU arrays total all the CPUs
 	 * actually allocated, rather than totaling up to the requested
@@ -2827,6 +3197,45 @@ inline static void  _slurm_rpc_trigger_set(slurm_msg_t * msg)
 	slurm_send_rc_msg(msg, rc);
 }
 
+inline static void  _slurm_rpc_get_topo(slurm_msg_t * msg)
+{
+	topo_info_response_msg_t *topo_resp_msg;
+	slurm_msg_t response_msg;
+	int i;
+	/* Locks: read node lock */
+	slurmctld_lock_t node_read_lock = { 
+		NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
+	DEF_TIMERS;
+
+	START_TIMER;
+	lock_slurmctld(node_read_lock);
+	topo_resp_msg = xmalloc(sizeof(topo_info_response_msg_t));
+	topo_resp_msg->record_count = switch_record_cnt;
+	topo_resp_msg->topo_array = xmalloc(sizeof(topo_info_t) * 
+					    topo_resp_msg->record_count);
+	for (i=0; i<topo_resp_msg->record_count; i++) {
+		topo_resp_msg->topo_array[i].level      =
+				switch_record_table[i].level;
+		topo_resp_msg->topo_array[i].link_speed =
+				switch_record_table[i].link_speed;
+		topo_resp_msg->topo_array[i].name       =
+				xstrdup(switch_record_table[i].name);
+		topo_resp_msg->topo_array[i].nodes      =
+				xstrdup(switch_record_table[i].nodes);
+		topo_resp_msg->topo_array[i].switches   =
+				xstrdup(switch_record_table[i].switches);
+	}
+	unlock_slurmctld(node_read_lock);
+	END_TIMER2("_slurm_rpc_get_topo");
+
+	slurm_msg_t_init(&response_msg);
+	response_msg.address  = msg->address;
+	response_msg.msg_type = RESPONSE_TOPO_INFO;
+	response_msg.data     = topo_resp_msg;
+	slurm_send_node_msg(msg->conn_fd, &response_msg);
+	slurm_free_topo_info_msg(topo_resp_msg);
+}
+
 inline static void  _slurm_rpc_job_notify(slurm_msg_t * msg)
 {
 	int error_code = SLURM_SUCCESS;
@@ -2869,7 +3278,8 @@ inline static void  _slurm_rpc_set_debug_level(slurm_msg_t *msg)
 	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 	slurmctld_lock_t config_read_lock =
 		{ READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
-	set_debug_level_msg_t *request_msg = (set_debug_level_msg_t *) msg->data;
+	set_debug_level_msg_t *request_msg = 
+		(set_debug_level_msg_t *) msg->data;
 	log_options_t log_opts = LOG_OPTS_INITIALIZER;
 	slurm_ctl_conf_t *conf;
 
@@ -2955,22 +3365,22 @@ inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg)
 			case ACCT_REMOVE_USER:
 			case ACCT_ADD_COORD:
 			case ACCT_REMOVE_COORD:
-				rc = assoc_mgr_update_local_users(object);
+				rc = assoc_mgr_update_users(object);
 				break;
 			case ACCT_ADD_ASSOC:
 			case ACCT_MODIFY_ASSOC:
 			case ACCT_REMOVE_ASSOC:
-				rc = assoc_mgr_update_local_assocs(object);
+				rc = assoc_mgr_update_assocs(object);
 				break;
 			case ACCT_ADD_QOS:
 			case ACCT_MODIFY_QOS:
 			case ACCT_REMOVE_QOS:
-				rc = assoc_mgr_update_local_qos(object);
+				rc = assoc_mgr_update_qos(object);
 				break;
 			case ACCT_ADD_WCKEY:
 			case ACCT_MODIFY_WCKEY:
 			case ACCT_REMOVE_WCKEY:
-				rc = assoc_mgr_update_local_wckeys(object);
+				rc = assoc_mgr_update_wckeys(object);
 				break;
 			case ACCT_UPDATE_NOTSET:
 			default:
diff --git a/src/slurmctld/proc_req.h b/src/slurmctld/proc_req.h
index fad93e53a22de8cc0ccd050a9a9af17bc192b902..258b294464a5ef05488dee2c16b291f1a667238f 100644
--- a/src/slurmctld/proc_req.h
+++ b/src/slurmctld/proc_req.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> and Kevin Tew <tew1@llnl.gov> 
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index 195136685cb6932685761a6bcb05f7f90e9acd91..2785fc22f37cc031ea91e67cf9820154344dc5d7 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -1,13 +1,15 @@
 /*****************************************************************************\
  *  read_config.c - read the overall slurm configuration file
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -15,7 +17,7 @@
  *  any later version.
  *
  *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
+ *  to link the code of portions of this program with the OpenSSL library under
  *  certain conditions as described in each individual source file, and 
  *  distribute linked combinations including the two. You must obey the GNU 
  *  General Public License in all respects for all of the code used other than 
@@ -68,31 +70,37 @@
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/acct_policy.h"
+#include "src/slurmctld/basil_interface.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/node_scheduler.h"
+#include "src/slurmctld/port_mgr.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/read_config.h"
+#include "src/slurmctld/reservation.h"
 #include "src/slurmctld/sched_plugin.h"
 #include "src/slurmctld/slurmctld.h"
+#include "src/slurmctld/srun_comm.h"
 #include "src/slurmctld/trigger_mgr.h"
+#include "src/slurmctld/topo_plugin.h"
 
 static void _acct_restore_active_jobs(void);
 static int  _build_bitmaps(void);
 static void _build_bitmaps_pre_select(void);
 static int  _init_all_slurm_conf(void);
-static void _purge_old_node_state(struct node_record *old_node_table_ptr, 
-				int old_node_record_count);
-static int  _restore_job_dependencies(void);
-static int  _restore_node_state(struct node_record *old_node_table_ptr, 
-				int old_node_record_count);
 static int  _preserve_select_type_param(slurm_ctl_conf_t * ctl_conf_ptr, 
-					select_type_plugin_info_t old_select_type_p);
+					select_type_plugin_info_t 
+					old_select_type_p);
 static int  _preserve_plugins(slurm_ctl_conf_t * ctl_conf_ptr, 
 				char *old_auth_type, char *old_checkpoint_type,
 				char *old_crypto_type, char *old_sched_type, 
 				char *old_select_type, char *old_switch_type);
+static void _purge_old_node_state(struct node_record *old_node_table_ptr, 
+				int old_node_record_count);
+static int  _restore_job_dependencies(void);
+static int  _restore_node_state(struct node_record *old_node_table_ptr, 
+				int old_node_record_count);
 static int  _sync_nodes_to_comp_job(void);
 static int  _sync_nodes_to_jobs(void);
 static int  _sync_nodes_to_active_job(struct job_record *job_ptr);
@@ -103,9 +111,6 @@ static void _validate_node_proc_count(void);
 static char *highest_node_name = NULL;
 int node_record_count = 0;
 
-/* FIXME - declarations for temporarily moved functions */
-#define MULTIPLE_VALUE_MSG "Multiple values for %s, latest one used"
-
 /*
  * _build_bitmaps_pre_select - recover some state for jobs and nodes prior to 
  *	calling the select_* functions
@@ -116,7 +121,6 @@ static void _build_bitmaps_pre_select(void)
 	struct node_record   *node_ptr;
 	ListIterator part_iterator;
 	int i;
-	
 
 	/* scan partition table and identify nodes in each */
 	part_iterator = list_iterator_create(part_list);
@@ -179,14 +183,17 @@ static int _build_bitmaps(void)
 	/* initialize the idle and up bitmaps */
 	FREE_NULL_BITMAP(idle_node_bitmap);
 	FREE_NULL_BITMAP(avail_node_bitmap);
+	FREE_NULL_BITMAP(power_node_bitmap);
 	FREE_NULL_BITMAP(share_node_bitmap);
 	FREE_NULL_BITMAP(up_node_bitmap);
 	idle_node_bitmap  = (bitstr_t *) bit_alloc(node_record_count);
 	avail_node_bitmap = (bitstr_t *) bit_alloc(node_record_count);
+	power_node_bitmap = (bitstr_t *) bit_alloc(node_record_count);
 	share_node_bitmap = (bitstr_t *) bit_alloc(node_record_count);
 	up_node_bitmap    = (bitstr_t *) bit_alloc(node_record_count);
 	if ((idle_node_bitmap     == NULL) ||
 	    (avail_node_bitmap    == NULL) ||
+	    (power_node_bitmap    == NULL) ||
 	    (share_node_bitmap    == NULL) ||
 	    (up_node_bitmap       == NULL)) 
 		fatal ("bit_alloc malloc failure");
@@ -231,17 +238,15 @@ static int _build_bitmaps(void)
 	 * their configuration, resync DRAINED vs. DRAINING state */
 	for (i = 0; i < node_record_count; i++) {
 		uint16_t base_state, drain_flag, no_resp_flag, job_cnt;
+		struct node_record *node_ptr = node_record_table_ptr + i;
 
-		if (node_record_table_ptr[i].name[0] == '\0')
+		if (node_ptr->name[0] == '\0')
 			continue;	/* defunct */
-		base_state = node_record_table_ptr[i].node_state & 
-				NODE_STATE_BASE;
-		drain_flag = node_record_table_ptr[i].node_state &
+		base_state = node_ptr->node_state & NODE_STATE_BASE;
+		drain_flag = node_ptr->node_state &
 				(NODE_STATE_DRAIN | NODE_STATE_FAIL);
-		no_resp_flag = node_record_table_ptr[i].node_state & 
-				NODE_STATE_NO_RESPOND;
-		job_cnt = node_record_table_ptr[i].run_job_cnt +
-		          node_record_table_ptr[i].comp_job_cnt;
+		no_resp_flag = node_ptr->node_state & NODE_STATE_NO_RESPOND;
+		job_cnt = node_ptr->run_job_cnt + node_ptr->comp_job_cnt;
 
 		if (((base_state == NODE_STATE_IDLE) && (job_cnt == 0))
 		||  (base_state == NODE_STATE_DOWN))
@@ -252,9 +257,10 @@ static int _build_bitmaps(void)
 				bit_set(avail_node_bitmap, i);
 			bit_set(up_node_bitmap, i);
 		}
-		if (node_record_table_ptr[i].config_ptr)
-			bit_set(node_record_table_ptr[i].config_ptr->
-				node_bitmap, i);
+		if (node_ptr->node_state & NODE_STATE_POWER_SAVE)
+			bit_set(power_node_bitmap, i);
+		if (node_ptr->config_ptr)
+			bit_set(node_ptr->config_ptr->node_bitmap, i);
 	}
 	return error_code;
 }
@@ -314,38 +320,6 @@ static int _state_str2int(const char *state_str)
 	return state_val;
 }
 
-#ifdef HAVE_3D
-/* Used to get the general name of the machine, used primarily 
- * for bluegene systems.  Not in general use because some systems 
- * have multiple prefix's such as foo[1-1000],bar[1-1000].
- */
-/* Caller must be holding slurm_conf_lock() */
-static void _set_node_prefix(const char *nodenames, slurm_ctl_conf_t *conf)
-{
-	int i;
-	char *tmp;
-
-	xassert(nodenames != NULL);
-	for (i = 1; nodenames[i] != '\0'; i++) {
-		if((nodenames[i-1] == '[') 
-		   || (nodenames[i-1] <= '9'
-		       && nodenames[i-1] >= '0'))
-			break;
-	}
-	xfree(conf->node_prefix);
-	if(nodenames[i] == '\0')
-		conf->node_prefix = xstrdup(nodenames);
-	else {
-		tmp = xmalloc(sizeof(char)*i+1);
-		memset(tmp, 0, i+1);
-		snprintf(tmp, i, "%s", nodenames);
-		conf->node_prefix = tmp;
-		tmp = NULL;
-	}
-	debug3("Prefix is %s %s %d", conf->node_prefix, nodenames, i);
-}
-#endif /* HAVE_BG */
-
 /* 
  * _build_single_nodeline_info - From the slurm.conf reader, build table,
  * 	and set values
@@ -392,11 +366,6 @@ static int _build_single_nodeline_info(slurm_conf_node_t *node_ptr,
 		goto cleanup;
 	}
 
-#ifdef HAVE_3D
-	if (conf->node_prefix == NULL)
-		_set_node_prefix(node_ptr->nodenames, conf);
-#endif
-
 	/* some sanity checks */
 #ifdef HAVE_FRONT_END
 	if ((hostlist_count(hostname_list) != 1) ||
@@ -518,7 +487,7 @@ cleanup:
 	return error_code;
 }
 
-static void _handle_all_downnodes()
+static void _handle_all_downnodes(void)
 {
 	slurm_conf_downnodes_t *ptr, **ptr_array;
 	int count;
@@ -544,10 +513,11 @@ static void _handle_all_downnodes()
  * Note: Operates on common variables
  *	default_node_record - default node configuration values
  */
-static int _build_all_nodeline_info(slurm_ctl_conf_t *conf)
+static int _build_all_nodeline_info(void)
 {
 	slurm_conf_node_t *node, **ptr_array;
 	struct config_record *config_ptr = NULL;
+	slurm_ctl_conf_t *conf;
 	int count;
 	int i;
 
@@ -555,6 +525,7 @@ static int _build_all_nodeline_info(slurm_ctl_conf_t *conf)
 	if (count == 0)
 		fatal("No NodeName information available!");
 
+	conf = slurm_conf_lock();
 	for (i = 0; i < count; i++) {
 		node = ptr_array[i];
 
@@ -574,6 +545,10 @@ static int _build_all_nodeline_info(slurm_ctl_conf_t *conf)
 		_build_single_nodeline_info(node, config_ptr, conf);
 	}
 	xfree(highest_node_name);
+
+	/* Unlock config here so that we can call
+	 * find_node_record() below and in the topology plugins */
+	slurm_conf_unlock();
 #ifdef HAVE_3D
 {
 	char *node_000 = NULL;
@@ -581,18 +556,19 @@ static int _build_all_nodeline_info(slurm_ctl_conf_t *conf)
 	if (conf->node_prefix)
 		node_000 = xstrdup(conf->node_prefix);
 	xstrcat(node_000, "000");
-	slurm_conf_unlock();
 	node_rec = find_node_record(node_000);
-	slurm_conf_lock();
 	if (node_rec == NULL)
 		fatal("No node %s configured", node_000);
 	xfree(node_000);
 #ifndef HAVE_BG
 	if (count == 1)
-		nodes_to_hilbert_curve();
+		slurm_topo_build_config();
 #endif	/* ! HAVE_BG */
 }
+#else
+	slurm_topo_build_config();
 #endif	/* HAVE_3D */
+
 	return SLURM_SUCCESS;
 }
 
@@ -635,9 +611,17 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 	
 	if(part_ptr->disable_root_jobs) 
 		debug2("partition %s does not allow root jobs", part_ptr->name);
-	
+
+	if ((part->default_time != NO_VAL) &&
+	    (part->default_time > part->max_time)) {
+		info("partition %s DefaultTime exceeds MaxTime (%u > %u)",
+		     part->default_time, part->max_time);
+		part->default_time = NO_VAL;
+	}
+
 	part_ptr->hidden         = part->hidden_flag ? 1 : 0;
 	part_ptr->max_time       = part->max_time;
+	part_ptr->default_time   = part->default_time;
 	part_ptr->max_share      = part->max_share;
 	part_ptr->max_nodes      = part->max_nodes;
 	part_ptr->max_nodes_orig = part->max_nodes;
@@ -650,6 +634,32 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 		xfree(part_ptr->allow_groups);
 		part_ptr->allow_groups = xstrdup(part->allow_groups);
 	}
+ 	if (part->allow_alloc_nodes) {
+ 		if (part_ptr->allow_alloc_nodes) {
+ 			int cnt_tot, cnt_uniq, buf_size;
+ 			hostlist_t hl = hostlist_create(part_ptr->
+							allow_alloc_nodes);
+ 			
+ 			hostlist_push(hl, part->allow_alloc_nodes);
+ 			cnt_tot = hostlist_count(hl);
+ 			hostlist_uniq(hl);
+ 			cnt_uniq = hostlist_count(hl);
+ 			if (cnt_tot != cnt_uniq) {
+ 				fatal("Duplicate Allowed Allocating Nodes for "
+				      "Partition %s", part->name);
+ 			}
+ 			buf_size = strlen(part_ptr->allow_alloc_nodes) + 1 +
+				   strlen(part->allow_alloc_nodes) + 1;
+ 			xfree(part_ptr->allow_alloc_nodes);
+ 			part_ptr->allow_alloc_nodes = xmalloc(buf_size);
+ 			hostlist_ranged_string(hl, buf_size, 
+					       part_ptr->allow_alloc_nodes);
+ 			hostlist_destroy(hl);
+ 		} else {
+ 			part_ptr->allow_alloc_nodes = 
+					xstrdup(part->allow_alloc_nodes);
+ 		}
+ 	}
 	if (part->nodes) {
 		if (part_ptr->nodes) {
 			int cnt_tot, cnt_uniq, buf_size;
@@ -690,7 +700,8 @@ static int _build_all_partitionline_info()
 	slurm_conf_partition_t *part, **ptr_array;
 	int count;
 	int i;
-
+	ListIterator itr = NULL;
+			
 	count = slurm_conf_partition_array(&ptr_array);
 	if (count == 0)
 		fatal("No PartitionName information available!");
@@ -699,7 +710,22 @@ static int _build_all_partitionline_info()
 		part = ptr_array[i];
 
 		_build_single_partitionline_info(part);
+		if(part->priority > part_max_priority) 
+			part_max_priority = part->priority;
 	}
+
+	/* set up the normalized priority of the partitions */
+	if(part_max_priority) {
+		struct part_record *part_ptr = NULL;
+
+		itr = list_iterator_create(part_list);
+		while((part_ptr = list_next(itr))) {
+			part_ptr->norm_priority = (double)part_ptr->priority 
+				/ (double)part_max_priority;
+		}
+		list_iterator_destroy(itr);
+	}
+
 	return SLURM_SUCCESS;
 }
 
@@ -728,7 +754,7 @@ int read_slurm_conf(int recover)
 	char *old_select_type     = xstrdup(slurmctld_conf.select_type);
 	char *old_switch_type     = xstrdup(slurmctld_conf.switch_type);
 	char *state_save_dir      = xstrdup(slurmctld_conf.state_save_location);
-	slurm_ctl_conf_t *conf;
+	char *mpi_params;
 	select_type_plugin_info_t old_select_type_p = 
 		(select_type_plugin_info_t) slurmctld_conf.select_type_param;
 
@@ -763,17 +789,20 @@ int read_slurm_conf(int recover)
 		node_record_table_ptr = old_node_table_ptr;
 		return error_code;
 	}
-	conf = slurm_conf_lock();
-	_build_all_nodeline_info(conf);
-	slurm_conf_unlock();
+
+	if (slurm_topo_init() != SLURM_SUCCESS)
+		fatal("Failed to initialize topology plugin");
+
+	_build_all_nodeline_info();
 	_handle_all_downnodes();
 	_build_all_partitionline_info();
 
 	update_logging();
 	g_slurm_jobcomp_init(slurmctld_conf.job_comp_loc);
-	slurm_sched_init();
-	if (switch_init() < 0)
-		error("Failed to initialize switch plugin");
+	if (slurm_sched_init() != SLURM_SUCCESS)
+		fatal("Failed to initialize sched plugin");
+	if (switch_init() != SLURM_SUCCESS)
+		fatal("Failed to initialize switch plugin");
 
 	if (default_part_loc == NULL)
 		error("read_slurm_conf: default partition not set.");
@@ -803,6 +832,7 @@ int read_slurm_conf(int recover)
 						 old_node_record_count);
 			error_code = MAX(error_code, rc);  /* not fatal */
 		}
+		load_last_job_id();
 		reset_first_job_id();
 		(void) slurm_sched_reconfig();
 		xfree(state_save_dir);
@@ -810,10 +840,10 @@ int read_slurm_conf(int recover)
 
 	_build_bitmaps_pre_select();
 	if ((select_g_node_init(node_record_table_ptr, node_record_count)
-	     != SLURM_SUCCESS) 
-	    || (select_g_block_init(part_list) != SLURM_SUCCESS) 
-	    || (select_g_state_restore(state_save_dir) != SLURM_SUCCESS) 
-	    || (select_g_job_init(job_list) != SLURM_SUCCESS)) {
+	     != SLURM_SUCCESS)						||
+	    (select_g_block_init(part_list) != SLURM_SUCCESS)		||
+	    (select_g_state_restore(state_save_dir) != SLURM_SUCCESS)	||
+	    (select_g_job_init(job_list) != SLURM_SUCCESS)) {
 		fatal("failed to initialize node selection plugin state, "
 		      "Clean start required.");
 	}
@@ -826,6 +856,9 @@ int read_slurm_conf(int recover)
 
 	if ((rc = _build_bitmaps()))
 		fatal("_build_bitmaps failure");
+	mpi_params = slurm_get_mpi_params();
+	reserve_port_config(mpi_params);
+	xfree(mpi_params);
 
 	license_free();
 	if (license_init(slurmctld_conf.licenses) != SLURM_SUCCESS)
@@ -839,6 +872,7 @@ int read_slurm_conf(int recover)
 	(void) _sync_nodes_to_comp_job();/* must follow select_g_node_init() */
 	load_part_uid_allow_list(1);
 
+	load_all_resv_state(recover);
 	if (recover >= 1)
 		(void) trigger_state_restore();
 
@@ -862,6 +896,10 @@ int read_slurm_conf(int recover)
 	if (load_job_ret)
 		_acct_restore_active_jobs();
 
+#ifdef HAVE_CRAY_XT
+	basil_query();
+#endif
+
 	slurmctld_conf.last_update = time(NULL);
 	END_TIMER2("read_slurm_conf");
 	return error_code;
@@ -871,10 +909,17 @@ int read_slurm_conf(int recover)
 /* Restore node state and size information from saved records.
  * If a node was re-configured to be down or drained, we set those states */
 static int _restore_node_state(struct node_record *old_node_table_ptr, 
-				int old_node_record_count)
+			       int old_node_record_count)
 {
 	struct node_record *node_ptr;
 	int i, rc = SLURM_SUCCESS;
+	hostset_t hs = NULL;
+	slurm_ctl_conf_t *conf = slurm_conf_lock();
+	bool power_save_mode = false;
+
+	if (conf->suspend_program && conf->resume_program)
+		power_save_mode = true;
+	slurm_conf_unlock();
 
 	for (i = 0; i < old_node_record_count; i++) {
 		uint16_t drain_flag = false, down_flag = false;
@@ -882,7 +927,8 @@ static int _restore_node_state(struct node_record *old_node_table_ptr,
 		if (node_ptr == NULL)
 			continue;
 
-		if ((node_ptr->node_state & NODE_STATE_BASE) == NODE_STATE_DOWN)
+		if ((node_ptr->node_state & NODE_STATE_BASE) == 
+		    NODE_STATE_DOWN)
 			down_flag = true;
 		if (node_ptr->node_state & NODE_STATE_DRAIN)
 			drain_flag = true;
@@ -893,7 +939,15 @@ static int _restore_node_state(struct node_record *old_node_table_ptr,
 		}
 		if (drain_flag)
 			node_ptr->node_state |= NODE_STATE_DRAIN; 
-			
+		if ((node_ptr->node_state & NODE_STATE_POWER_SAVE) &&
+		    (!power_save_mode)) {
+			node_ptr->node_state &= (~NODE_STATE_POWER_SAVE);
+			if (hs)
+				hostset_insert(hs, node_ptr->name);
+			else
+				hs = hostset_create(node_ptr->name);
+		}
+
 		node_ptr->last_response = old_node_table_ptr[i].last_response;
 		if (old_node_table_ptr[i].port != node_ptr->config_ptr->cpus) {
 			rc = ESLURM_NEED_RESTART;
@@ -928,6 +982,13 @@ static int _restore_node_state(struct node_record *old_node_table_ptr,
 			old_node_table_ptr[i].os = NULL;
 		}
 	}
+
+	if (hs) {
+		char node_names[128];
+		hostset_ranged_string(hs, sizeof(node_names), node_names);
+		info("Cleared POWER_SAVE flag from nodes %s", node_names);
+		hostset_destroy(hs);
+	}
 	return rc;
 }
 
@@ -1094,6 +1155,7 @@ static int _sync_nodes_to_comp_job(void)
 			update_cnt++;
 			info("Killing job_id %u", job_ptr->job_id);
 			deallocate_nodes(job_ptr, false, false);
+			job_completion_logger(job_ptr);
 		}
 	}
 	list_iterator_destroy(job_iterator);
@@ -1111,11 +1173,10 @@ static int _sync_nodes_to_active_job(struct job_record *job_ptr)
 	uint16_t base_state, node_flags;
 	struct node_record *node_ptr = node_record_table_ptr;
 
-	job_ptr->node_cnt = 0;
+	job_ptr->node_cnt = bit_set_count(job_ptr->node_bitmap);
 	for (i = 0; i < node_record_count; i++, node_ptr++) {
 		if (bit_test(job_ptr->node_bitmap, i) == 0)
 			continue;
-		job_ptr->node_cnt++;
 
 		base_state = node_ptr->node_state & NODE_STATE_BASE;
 		node_flags = node_ptr->node_state & NODE_STATE_FLAGS;
@@ -1128,12 +1189,26 @@ static int _sync_nodes_to_active_job(struct job_record *job_ptr)
 		    (job_ptr->details) && (job_ptr->details->shared == 0))
 			node_ptr->no_share_job_cnt++;
 
-		if (base_state == NODE_STATE_DOWN) {
+		if ((base_state == NODE_STATE_DOWN)     &&
+		    (job_ptr->job_state == JOB_RUNNING) &&
+		    (job_ptr->kill_on_node_fail == 0)   &&
+		    (job_ptr->node_cnt > 1)) {
+			/* This should only happen if a job was running 
+			 * on a node that was newly configured DOWN */
+			info("Removing failed node %s from job_id %u",
+			     node_ptr->name, job_ptr->job_id);
+			srun_node_fail(job_ptr->job_id, node_ptr->name);
+			kill_step_on_node(job_ptr, node_ptr);
+			excise_node_from_job(job_ptr, node_ptr);
+		} else if (base_state == NODE_STATE_DOWN) {
 			time_t now = time(NULL);
+			info("Killing job %u on DOWN node %s",
+			     job_ptr->job_id, node_ptr->name);
 			job_ptr->job_state = JOB_NODE_FAIL | JOB_COMPLETING;
 			job_ptr->end_time = MIN(job_ptr->end_time, now);
 			job_ptr->exit_code = MAX(job_ptr->exit_code, 1);
 			job_ptr->state_reason = FAIL_DOWN_NODE;
+			xfree(job_ptr->state_desc);
 			job_completion_logger(job_ptr);
 			cnt++;
 		} else if ((base_state == NODE_STATE_UNKNOWN) || 
diff --git a/src/slurmctld/read_config.h b/src/slurmctld/read_config.h
index 2305565ae553b02c2d5084430a4de621264a19e2..8e96a6d5050c1ffa3d5c69f4ab0ec49632302ae7 100644
--- a/src/slurmctld/read_config.h
+++ b/src/slurmctld/read_config.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2003 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/reservation.c b/src/slurmctld/reservation.c
new file mode 100644
index 0000000000000000000000000000000000000000..50ddc40b9aca8c05309c86337d6f7892c9bed89b
--- /dev/null
+++ b/src/slurmctld/reservation.c
@@ -0,0 +1,2560 @@
+/*****************************************************************************\
+ *  reservation.c - resource reservation management
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov> et. al.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#ifdef WITH_PTHREADS
+#  include <pthread.h>
+#endif				/* WITH_PTHREADS */
+
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <unistd.h>
+#include <slurm/slurm.h>
+#include <slurm/slurm_errno.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "src/common/assoc_mgr.h"
+#include "src/common/bitstring.h"
+#include "src/common/hostlist.h"
+#include "src/common/list.h"
+#include "src/common/log.h"
+#include "src/common/macros.h"
+#include "src/common/pack.h"
+#include "src/common/parse_time.h"
+#include "src/common/uid.h"
+#include "src/common/xassert.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/common/slurm_accounting_storage.h"
+
+#include "src/slurmctld/locks.h"
+#include "src/slurmctld/slurmctld.h"
+#include "src/slurmctld/state_save.h"
+
+#define _RESV_DEBUG	0
+#define RESV_MAGIC	0x3b82
+
+/* Change RESV_STATE_VERSION value when changing the state save format
+ * Add logic to permit reading of the previous version's state in order
+ * to avoid losing reservations between releases major SLURM updates. */
+#define RESV_STATE_VERSION      "VER002"
+
+time_t    last_resv_update = (time_t) 0;
+List      resv_list = (List) NULL;
+uint32_t  resv_over_run;
+uint32_t  top_suffix = 0;
+
+static int  _build_account_list(char *accounts, int *account_cnt, 
+			        char ***account_list);
+static int  _build_uid_list(char *users, int *user_cnt, uid_t **user_list);
+static void _clear_job_resv(slurmctld_resv_t *resv_ptr);
+static slurmctld_resv_t *_copy_resv(slurmctld_resv_t *resv_orig_ptr);
+static void _del_resv_rec(void *x);
+static void _dump_resv_req(resv_desc_msg_t *resv_ptr, char *mode);
+static int  _find_resv_id(void *x, void *key);
+static int  _find_resv_name(void *x, void *key);
+static void _generate_resv_id(void);
+static void _generate_resv_name(resv_desc_msg_t *resv_ptr);
+static bool _is_account_valid(char *account);
+static bool _is_resv_used(slurmctld_resv_t *resv_ptr);
+static void _pack_resv(slurmctld_resv_t *resv_ptr, Buf buffer,
+		       bool internal);
+static int  _post_resv_create(slurmctld_resv_t *resv_ptr);
+static int  _post_resv_delete(slurmctld_resv_t *resv_ptr);
+static int  _post_resv_update(slurmctld_resv_t *resv_ptr,
+			      slurmctld_resv_t *old_resv_ptr);
+static bitstr_t *_pick_idle_nodes(bitstr_t *avail_nodes, 
+				  resv_desc_msg_t *resv_desc_ptr);
+static int  _resize_resv(slurmctld_resv_t *resv_ptr, uint32_t node_cnt);
+static bool _resv_overlap(time_t start_time, time_t end_time, 
+			  uint16_t flags, bitstr_t *node_bitmap,
+			  slurmctld_resv_t *this_resv_ptr);
+static int  _select_nodes(resv_desc_msg_t *resv_desc_ptr, 
+			  struct part_record **part_ptr,
+			  bitstr_t **resv_bitmap);
+static int  _set_assoc_list(slurmctld_resv_t *resv_ptr);
+static void _set_cpu_cnt(slurmctld_resv_t *resv_ptr);
+static void _set_nodes_maint(slurmctld_resv_t *resv_ptr, time_t now);
+static void _swap_resv(slurmctld_resv_t *resv_backup, 
+		       slurmctld_resv_t *resv_ptr);
+static int  _update_account_list(slurmctld_resv_t *resv_ptr, 
+				 char *accounts);
+static int  _update_uid_list(slurmctld_resv_t *resv_ptr, char *users);
+static void _validate_all_reservations(void);
+static int  _valid_job_access_resv(struct job_record *job_ptr,
+				   slurmctld_resv_t *resv_ptr);
+static bool _validate_one_reservation(slurmctld_resv_t *resv_ptr);
+static void _validate_node_choice(slurmctld_resv_t *resv_ptr);
+
+static slurmctld_resv_t *_copy_resv(slurmctld_resv_t *resv_orig_ptr)
+{
+	slurmctld_resv_t *resv_copy_ptr;
+	int i;
+
+	xassert(resv_orig_ptr->magic == RESV_MAGIC);
+	resv_copy_ptr = xmalloc(sizeof(slurmctld_resv_t));
+	resv_copy_ptr->accounts = xstrdup(resv_orig_ptr->accounts);
+	resv_copy_ptr->account_cnt = resv_orig_ptr->account_cnt;
+	resv_copy_ptr->account_list = xmalloc(sizeof(char *) * 
+					      resv_orig_ptr->account_cnt);
+	for (i=0; i<resv_copy_ptr->account_cnt; i++) {
+		resv_copy_ptr->account_list[i] = 
+				xstrdup(resv_orig_ptr->account_list[i]);
+	}
+	resv_copy_ptr->assoc_list = xstrdup(resv_orig_ptr->assoc_list);
+	resv_copy_ptr->cpu_cnt = resv_orig_ptr->cpu_cnt;
+	resv_copy_ptr->end_time = resv_orig_ptr->end_time;
+	resv_copy_ptr->features = xstrdup(resv_orig_ptr->features);
+	resv_copy_ptr->flags = resv_orig_ptr->flags;
+	resv_copy_ptr->job_pend_cnt = resv_orig_ptr->job_pend_cnt;
+	resv_copy_ptr->job_run_cnt = resv_orig_ptr->job_run_cnt;
+	resv_copy_ptr->magic = resv_orig_ptr->magic;
+	resv_copy_ptr->name = xstrdup(resv_orig_ptr->name);
+	resv_copy_ptr->node_bitmap = bit_copy(resv_orig_ptr->node_bitmap);
+	resv_copy_ptr->node_cnt = resv_orig_ptr->node_cnt;
+	resv_copy_ptr->node_list = xstrdup(resv_orig_ptr->node_list);
+	resv_copy_ptr->partition = xstrdup(resv_orig_ptr->partition);
+	resv_copy_ptr->part_ptr = resv_orig_ptr->part_ptr;
+	resv_copy_ptr->resv_id = resv_orig_ptr->resv_id;
+	resv_copy_ptr->start_time = resv_orig_ptr->start_time;
+	resv_copy_ptr->start_time_first = resv_orig_ptr->start_time_first;
+	resv_copy_ptr->start_time_prev = resv_orig_ptr->start_time_prev;
+	resv_copy_ptr->users = xstrdup(resv_orig_ptr->users);
+	resv_copy_ptr->user_cnt = resv_orig_ptr->user_cnt;
+	resv_copy_ptr->user_list = xmalloc(sizeof(uid_t) * 
+					   resv_orig_ptr->user_cnt);
+	for (i=0; i<resv_copy_ptr->user_cnt; i++)
+		resv_copy_ptr->user_list[i] = resv_orig_ptr->user_list[i];
+
+	return resv_copy_ptr;
+}
+
+/* Swaping the contents of two reservation records */
+static void _swap_resv(slurmctld_resv_t *resv_backup, 
+		       slurmctld_resv_t *resv_ptr)
+{
+	resv_desc_msg_t *resv_copy_ptr;
+
+	xassert(resv_backup->magic == RESV_MAGIC);
+	xassert(resv_ptr->magic    == RESV_MAGIC);
+	resv_copy_ptr = xmalloc(sizeof(slurmctld_resv_t));
+	memcpy(resv_copy_ptr, resv_backup, sizeof(slurmctld_resv_t));
+	memcpy(resv_backup, resv_ptr, sizeof(slurmctld_resv_t));
+	memcpy(resv_ptr, resv_copy_ptr, sizeof(slurmctld_resv_t));
+	xfree(resv_copy_ptr);
+}
+
+static void _del_resv_rec(void *x)
+{
+	int i;
+	slurmctld_resv_t *resv_ptr = (slurmctld_resv_t *) x;
+
+	if (resv_ptr) {
+		xassert(resv_ptr->magic == RESV_MAGIC);
+		resv_ptr->magic = 0;
+		xfree(resv_ptr->accounts);
+		for (i=0; i<resv_ptr->account_cnt; i++)
+			xfree(resv_ptr->account_list[i]);
+		xfree(resv_ptr->account_list);
+		xfree(resv_ptr->assoc_list);
+		xfree(resv_ptr->features);
+		xfree(resv_ptr->name);
+		if (resv_ptr->node_bitmap)
+			bit_free(resv_ptr->node_bitmap);
+		xfree(resv_ptr->node_list);
+		xfree(resv_ptr->partition);
+		xfree(resv_ptr->users);
+		xfree(resv_ptr->user_list);
+		xfree(resv_ptr);
+	}
+}
+
+static int _find_resv_id(void *x, void *key)
+{
+	slurmctld_resv_t *resv_ptr = (slurmctld_resv_t *) x;
+	uint32_t *resv_id = (uint32_t *) key;
+
+	xassert(resv_ptr->magic == RESV_MAGIC);
+
+	if (resv_ptr->resv_id != *resv_id)
+		return 0;
+	else
+		return 1;	/* match */
+}
+
+static int _find_resv_name(void *x, void *key)
+{
+	slurmctld_resv_t *resv_ptr = (slurmctld_resv_t *) x;
+
+	xassert(resv_ptr->magic == RESV_MAGIC);
+
+	if (strcmp(resv_ptr->name, (char *) key))
+		return 0;
+	else
+		return 1;	/* match */
+}
+
+static void _dump_resv_req(resv_desc_msg_t *resv_ptr, char *mode)
+{
+	
+#if _RESV_DEBUG
+	char start_str[32] = "-1", end_str[32] = "-1", *flag_str = NULL;
+	int duration;
+
+	if (resv_ptr->start_time != (time_t) NO_VAL) {
+		slurm_make_time_str(&resv_ptr->start_time,
+				    start_str, sizeof(start_str));
+	}
+	if (resv_ptr->end_time != (time_t) NO_VAL) {
+		slurm_make_time_str(&resv_ptr->end_time, 
+				    end_str,  sizeof(end_str));
+	}
+	if (resv_ptr->flags != (uint16_t) NO_VAL)
+		flag_str = reservation_flags_string(resv_ptr->flags);
+
+	if (resv_ptr->duration == NO_VAL)
+		duration = -1;
+	else
+		duration = resv_ptr->duration;
+
+	info("%s: Name=%s StartTime=%s EndTime=%s Duration=%d "
+	     "Flags=%s NodeCnt=%d NodeList=%s Features=%s "
+	     "PartitionName=%s Users=%s Accounts=%s",
+	     mode, resv_ptr->name, start_str, end_str, duration,
+	     flag_str, resv_ptr->node_cnt, resv_ptr->node_list, 
+	     resv_ptr->features, resv_ptr->partition, 
+	     resv_ptr->users, resv_ptr->accounts);
+
+	xfree(flag_str);
+#endif
+}
+
+static void _generate_resv_id(void)
+{
+	while (1) {
+		if (top_suffix >= 9999)
+			top_suffix = 1;		/* wrap around */
+		else
+			top_suffix++;
+		if (!list_find_first(resv_list, _find_resv_id, &top_suffix))
+			break;
+	}
+}
+
+static void _generate_resv_name(resv_desc_msg_t *resv_ptr)
+{
+	char *key, *name, *sep;
+	int len;
+
+	/* Generate name prefix, based upon the first account
+	 * name if provided otherwise first user name */
+	if (resv_ptr->accounts && resv_ptr->accounts[0])
+		key = resv_ptr->accounts;
+	else
+		key = resv_ptr->users;
+	sep = strchr(key, ',');
+	if (sep)
+		len = sep - key;
+	else
+		len = strlen(key);
+	name = xmalloc(len + 16);
+	strncpy(name, key, len);
+
+	xstrfmtcat(name, "_%d", top_suffix);
+	len++;
+
+	resv_ptr->name = name;
+}
+
+/* Validate an account name */
+static bool _is_account_valid(char *account)
+{
+	acct_association_rec_t assoc_rec, *assoc_ptr;
+
+	if (!(accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS))
+		return true;	/* don't worry about account validity */
+
+	memset(&assoc_rec, 0, sizeof(acct_association_rec_t));
+	assoc_rec.uid       = NO_VAL;
+	assoc_rec.acct      = account;
+
+	if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
+				    accounting_enforce, &assoc_ptr)) {
+		return false;
+	}
+	return true;
+}
+
+static int _append_assoc_list(List assoc_list, acct_association_rec_t *assoc)
+{
+	int rc = ESLURM_INVALID_BANK_ACCOUNT;
+	acct_association_rec_t *assoc_ptr = NULL;
+	if (assoc_mgr_fill_in_assoc(
+		    acct_db_conn, assoc,
+		    accounting_enforce, 
+		    &assoc_ptr)) {
+		if(accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS) {
+			error("No association for user %u and account %s",
+			      assoc->uid, assoc->acct);
+		} else {
+			verbose("No association for user %u and account %s",
+				assoc->uid, assoc->acct);
+			rc = SLURM_SUCCESS;
+		}
+		
+	} 
+	if(assoc_ptr) {
+		list_append(assoc_list, assoc_ptr);
+		rc = SLURM_SUCCESS;
+	}
+
+	return rc;
+}
+/* Set a association list based upon accounts and users */
+static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
+{
+	int rc = SLURM_SUCCESS, i = 0, j = 0;
+	List assoc_list = NULL;
+	acct_association_rec_t assoc, *assoc_ptr = NULL;
+
+	/* no need to do this if we can't ;) */
+	if(!association_based_accounting)
+		return rc;
+
+	assoc_list = list_create(NULL);
+
+	memset(&assoc, 0, sizeof(acct_association_rec_t));
+
+	if(resv_ptr->user_cnt) {
+		for(i=0; i < resv_ptr->user_cnt; i++) {
+			if(resv_ptr->account_cnt) {
+				for(j=0; j < resv_ptr->account_cnt; j++) {
+					memset(&assoc, 0, 
+					       sizeof(acct_association_rec_t));
+					assoc.uid = resv_ptr->user_list[i];
+					assoc.acct = resv_ptr->account_list[j];
+					if((rc = _append_assoc_list(
+						    assoc_list, &assoc))
+					   != SLURM_SUCCESS) {
+						goto end_it;
+					}
+				}	
+			} else {
+				memset(&assoc, 0, 
+				       sizeof(acct_association_rec_t));
+				assoc.uid = resv_ptr->user_list[i];
+				if((rc = assoc_mgr_get_user_assocs(
+					    acct_db_conn, &assoc,
+					    accounting_enforce, assoc_list))
+				   != SLURM_SUCCESS) {
+					rc = ESLURM_INVALID_BANK_ACCOUNT;
+					goto end_it;
+				}
+			}
+		}
+	} else if(resv_ptr->account_cnt) {
+		for(i=0; i < resv_ptr->account_cnt; i++) {
+			memset(&assoc, 0, 
+			       sizeof(acct_association_rec_t));
+			assoc.uid = (uint32_t)NO_VAL;
+			assoc.acct = resv_ptr->account_list[j];
+			if((rc = _append_assoc_list(assoc_list, &assoc))
+			   != SLURM_SUCCESS) {
+				goto end_it;
+			}
+		}	
+	} else if(accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS) {
+		error("We need at least 1 user or 1 account to "
+		      "create a reservtion.");
+		rc = SLURM_ERROR;
+	}
+	
+	if(list_count(assoc_list)) {
+		ListIterator itr = list_iterator_create(assoc_list);
+		xfree(resv_ptr->assoc_list);	/* clear for modify */
+		while((assoc_ptr = list_next(itr))) {
+			if(resv_ptr->assoc_list)
+				xstrfmtcat(resv_ptr->assoc_list, ",%u", 
+					   assoc_ptr->id);
+			else
+				xstrfmtcat(resv_ptr->assoc_list, "%u",
+					   assoc_ptr->id);
+		}
+		list_iterator_destroy(itr);
+	}
+
+end_it:
+	list_destroy(assoc_list);
+	return rc;
+}
+
+/* Post reservation create */
+static int _post_resv_create(slurmctld_resv_t *resv_ptr)
+{
+	int rc = SLURM_SUCCESS;
+	acct_reservation_rec_t resv;
+	char temp_bit[BUF_SIZE];
+
+	memset(&resv, 0, sizeof(acct_reservation_rec_t));
+	
+	resv.assocs = resv_ptr->assoc_list;
+	resv.cluster = slurmctld_cluster_name;
+	resv.cpus = resv_ptr->cpu_cnt;
+	resv.flags = resv_ptr->flags;
+	resv.id = resv_ptr->resv_id;
+	resv.name = resv_ptr->name;
+	resv.nodes = resv_ptr->node_list;
+	if(resv_ptr->node_bitmap) 
+		resv.node_inx = bit_fmt(temp_bit, sizeof(temp_bit), 
+					resv_ptr->node_bitmap);
+
+	resv.time_end = resv_ptr->end_time;
+	resv.time_start = resv_ptr->start_time;
+
+	rc = acct_storage_g_add_reservation(acct_db_conn, &resv);
+
+	return rc;
+}
+
+/* Note that a reservation has been deleted */
+static int _post_resv_delete(slurmctld_resv_t *resv_ptr)
+{
+	int rc = SLURM_SUCCESS;
+	acct_reservation_rec_t resv;
+	memset(&resv, 0, sizeof(acct_reservation_rec_t));
+
+	resv.cluster = slurmctld_cluster_name;
+	resv.id = resv_ptr->resv_id;
+	resv.name = resv_ptr->name;
+	resv.time_start = resv_ptr->start_time;
+	/* This is just a time stamp here to delete if the reservation
+	 * hasn't started yet so we don't get trash records in the
+	 * database if said database isn't up right now */
+	resv.time_start_prev = time(NULL);
+	rc = acct_storage_g_remove_reservation(acct_db_conn, &resv);
+	
+	return rc;
+}
+
+/* Note that a reservation has been updated */
+static int _post_resv_update(slurmctld_resv_t *resv_ptr,
+			     slurmctld_resv_t *old_resv_ptr)
+{
+	int rc = SLURM_SUCCESS;
+	acct_reservation_rec_t resv;
+	char temp_bit[BUF_SIZE];
+
+	memset(&resv, 0, sizeof(acct_reservation_rec_t));
+	
+	resv.cluster = slurmctld_cluster_name;
+	resv.id = resv_ptr->resv_id;
+	resv.time_end = resv_ptr->end_time;
+	
+	if(!old_resv_ptr) {
+		resv.assocs = resv_ptr->assoc_list;
+		resv.cpus = resv_ptr->cpu_cnt;
+		resv.flags = resv_ptr->flags;
+		resv.nodes = resv_ptr->node_list;
+	} else {
+		time_t now = time(NULL);
+		
+		if(old_resv_ptr->assoc_list && resv_ptr->assoc_list) {
+			if(strcmp(old_resv_ptr->assoc_list,
+				  resv_ptr->assoc_list)) 
+				resv.assocs = resv_ptr->assoc_list;
+		} else if(resv_ptr->assoc_list) 
+			resv.assocs = resv_ptr->assoc_list;
+
+		if(old_resv_ptr->cpu_cnt != resv_ptr->cpu_cnt) 
+			resv.cpus = resv_ptr->cpu_cnt;
+		else 
+			resv.cpus = (uint32_t)NO_VAL;
+
+		if(old_resv_ptr->flags != resv_ptr->flags) 
+			resv.flags = resv_ptr->flags;
+		else 
+			resv.flags = (uint16_t)NO_VAL;
+
+		if(old_resv_ptr->node_list && resv_ptr->node_list) {
+			if(strcmp(old_resv_ptr->node_list,
+				  resv_ptr->node_list)) 
+				resv.nodes = resv_ptr->node_list;
+		} else if(resv_ptr->node_list) 
+			resv.nodes = resv_ptr->node_list;
+		
+		/* Here if the reservation has started already we need
+		   to mark a new start time for it if certain
+		   variables are needed in accounting.  Right now if
+		   the assocs, nodes, flags or cpu count changes we need a
+		   new start time of now. */
+		if((resv_ptr->start_time < now)
+		   && (resv.assocs
+		       || resv.nodes 
+		       || (resv.flags != (uint16_t)NO_VAL)
+		       || (resv.cpus != (uint32_t)NO_VAL))) {
+			resv_ptr->start_time_prev = resv_ptr->start_time;
+			resv_ptr->start_time = now;
+		}
+	}
+	/* now set the (maybe new) start_times */
+	resv.time_start = resv_ptr->start_time;
+	resv.time_start_prev = resv_ptr->start_time_prev;
+
+	if(resv.nodes && resv_ptr->node_bitmap) 
+		resv.node_inx = bit_fmt(temp_bit, sizeof(temp_bit),
+					resv_ptr->node_bitmap);
+
+	rc = acct_storage_g_modify_reservation(acct_db_conn, &resv);
+
+	return rc;
+}
+
+/*
+ * Validate a comma delimited list of account names and build an array of
+ *	them
+ * IN account       - a list of account names
+ * OUT account_cnt  - number of accounts in the list
+ * OUT account_list - list of the account names, 
+ *		      CALLER MUST XFREE this plus each individual record
+ * RETURN 0 on success
+ */
+static int _build_account_list(char *accounts, int *account_cnt, 
+			       char ***account_list)
+{
+	char *last = NULL, *tmp, *tok;
+	int ac_cnt = 0, i;
+	char **ac_list;
+
+	*account_cnt = 0;
+	*account_list = (char **) NULL;
+
+	if (!accounts)
+		return ESLURM_INVALID_BANK_ACCOUNT;
+
+	i = strlen(accounts);
+	ac_list = xmalloc(sizeof(char *) * (i + 2));
+	tmp = xstrdup(accounts);
+	tok = strtok_r(tmp, ",", &last);
+	while (tok) {
+		if (!_is_account_valid(tok)) {
+			info("Reservation request has invalid account %s", 
+			     tok);
+			goto inval;
+		}
+		ac_list[ac_cnt++] = xstrdup(tok);
+		tok = strtok_r(NULL, ",", &last);
+	}
+	*account_cnt  = ac_cnt;
+	*account_list = ac_list;
+	xfree(tmp);
+	return SLURM_SUCCESS;
+
+ inval:	for (i=0; i<ac_cnt; i++)
+		xfree(ac_list[i]);
+	xfree(ac_list);
+	xfree(tmp);
+	return ESLURM_INVALID_BANK_ACCOUNT;
+}
+
+/*
+ * Update a account list for an existing reservation based upon an 
+ *	update comma delimited specification of accounts to add (+name), 
+ *	remove (-name), or set value of
+ * IN/OUT resv_ptr - pointer to reservation structure being updated
+ * IN accounts     - a list of account names, to set, add, or remove
+ * RETURN 0 on success
+ */
+static int  _update_account_list(slurmctld_resv_t *resv_ptr, 
+				 char *accounts)
+{
+	char *last = NULL, *ac_cpy, *tok;
+	int ac_cnt = 0, i, j, k;
+	int *ac_type, minus_account = 0, plus_account = 0;
+	char **ac_list;
+	bool found_it;
+
+	if (!accounts)
+		return ESLURM_INVALID_BANK_ACCOUNT;
+
+	i = strlen(accounts);
+	ac_list = xmalloc(sizeof(char *) * (i + 2));
+	ac_type = xmalloc(sizeof(int)    * (i + 2));
+	ac_cpy = xstrdup(accounts);
+	tok = strtok_r(ac_cpy, ",", &last);
+	while (tok) {
+		if (tok[0] == '-') {
+			ac_type[ac_cnt] = 1;	/* minus */
+			minus_account = 1;
+			tok++;
+		} else if (tok[0] == '+') {
+			ac_type[ac_cnt] = 2;	/* plus */
+			plus_account = 1;
+			tok++;
+		} else if (tok[0] == '\0') {
+			continue;
+		} else if (plus_account || minus_account) {
+			info("Reservation account expression invalid %s", 
+			     accounts);
+			goto inval;
+		} else
+			ac_type[ac_cnt] = 3;	/* set */
+		if (!_is_account_valid(tok)) {
+			info("Reservation request has invalid account %s", 
+			     tok);
+			goto inval;
+		}
+		ac_list[ac_cnt++] = xstrdup(tok);
+		tok = strtok_r(NULL, ",", &last);
+	}
+
+	if ((plus_account == 0) && (minus_account == 0)) {
+		/* Just a reset of account list */
+		xfree(resv_ptr->accounts);
+		if (accounts[0] != '\0')
+			resv_ptr->accounts = xstrdup(accounts);
+		xfree(resv_ptr->account_list);
+		resv_ptr->account_list = ac_list;
+		resv_ptr->account_cnt = ac_cnt;
+		xfree(ac_cpy);
+		xfree(ac_type);
+		return SLURM_SUCCESS;
+	}
+
+	/* Modification of existing account list */
+	if (minus_account) {
+		if (resv_ptr->account_cnt == 0)
+			goto inval;
+		for (i=0; i<ac_cnt; i++) {
+			if (ac_type[i] != 1)
+				continue;
+			found_it = false;
+			for (j=0; j<resv_ptr->account_cnt; j++) {
+				if (strcmp(resv_ptr->account_list[j], 
+					   ac_list[i])) {
+					continue;
+				}
+				found_it = true;
+				xfree(resv_ptr->account_list[j]);
+				resv_ptr->account_cnt--;
+				for (k=j; k<resv_ptr->account_cnt; k++) {
+					resv_ptr->account_list[k] =
+						resv_ptr->account_list[k+1];
+				}
+				break;
+			}
+			if (!found_it)
+				goto inval;
+		}
+		xfree(resv_ptr->accounts);
+		for (i=0; i<resv_ptr->account_cnt; i++) {
+			if (i == 0) {
+				resv_ptr->accounts = xstrdup(resv_ptr->
+							     account_list[i]);
+			} else {
+				xstrcat(resv_ptr->accounts, ",");
+				xstrcat(resv_ptr->accounts,
+					resv_ptr->account_list[i]);
+			}
+		}
+	}
+
+	if (plus_account) {
+		for (i=0; i<ac_cnt; i++) {
+			if (ac_type[i] != 2)
+				continue;
+			found_it = false;
+			for (j=0; j<resv_ptr->account_cnt; j++) {
+				if (strcmp(resv_ptr->account_list[j], 
+					   ac_list[i])) {
+					continue;
+				}
+				found_it = true;
+				break;
+			}
+			if (found_it)
+				continue;	/* duplicate entry */
+			xrealloc(resv_ptr->account_list, 
+				 sizeof(char *) * (resv_ptr->account_cnt + 1));
+			resv_ptr->account_list[resv_ptr->account_cnt++] =
+					xstrdup(ac_list[i]);
+		}
+		xfree(resv_ptr->accounts);
+		for (i=0; i<resv_ptr->account_cnt; i++) {
+			if (i == 0) {
+				resv_ptr->accounts = xstrdup(resv_ptr->
+							     account_list[i]);
+			} else {
+				xstrcat(resv_ptr->accounts, ",");
+				xstrcat(resv_ptr->accounts,
+					resv_ptr->account_list[i]);
+			}
+		}
+	}
+	for (i=0; i<ac_cnt; i++)
+		xfree(ac_list[i]);
+	xfree(ac_list);
+	xfree(ac_type);
+	xfree(ac_cpy);
+	return SLURM_SUCCESS;
+
+ inval:	for (i=0; i<ac_cnt; i++)
+		xfree(ac_list[i]);
+	xfree(ac_list);
+	xfree(ac_type);
+	xfree(ac_cpy);
+	return ESLURM_INVALID_BANK_ACCOUNT;
+}
+
+/*
+ * Validate a comma delimited list of user names and build an array of
+ *	their UIDs
+ * IN users      - a list of user names
+ * OUT user_cnt  - number of UIDs in the list
+ * OUT user_list - list of the user's uid, CALLER MUST XFREE;
+ * RETURN 0 on success
+ */
+static int _build_uid_list(char *users, int *user_cnt, uid_t **user_list)
+{
+	char *last = NULL, *tmp = NULL, *tok;
+	int u_cnt = 0, i;
+	uid_t *u_list, u_tmp;
+
+	*user_cnt = 0;
+	*user_list = (uid_t *) NULL;
+
+	if (!users)
+		return ESLURM_USER_ID_MISSING;
+
+	i = strlen(users);
+	u_list = xmalloc(sizeof(uid_t) * (i + 2));
+	tmp = xstrdup(users);
+	tok = strtok_r(tmp, ",", &last);
+	while (tok) {
+		u_tmp = uid_from_string(tok);
+		if (u_tmp == (uid_t) -1) {
+			info("Reservation request has invalid user %s", tok);
+			goto inval;
+		}
+		u_list[u_cnt++] = u_tmp;
+		tok = strtok_r(NULL, ",", &last);
+	}
+	*user_cnt  = u_cnt;
+	*user_list = u_list;
+	xfree(tmp);
+	return SLURM_SUCCESS;
+
+ inval:	xfree(tmp);
+	xfree(u_list);
+	return ESLURM_USER_ID_MISSING;
+}
+
+/*
+ * Update a user/uid list for an existing reservation based upon an 
+ *	update comma delimited specification of users to add (+name), 
+ *	remove (-name), or set value of
+ * IN/OUT resv_ptr - pointer to reservation structure being updated
+ * IN users        - a list of user names, to set, add, or remove
+ * RETURN 0 on success
+ */
+static int _update_uid_list(slurmctld_resv_t *resv_ptr, char *users)
+{
+	char *last = NULL, *u_cpy = NULL, *tmp = NULL, *tok;
+	int u_cnt = 0, i, j, k;
+	uid_t *u_list, u_tmp;
+	int *u_type, minus_user = 0, plus_user = 0;
+	char **u_name;
+	bool found_it;
+
+	if (!users)
+		return ESLURM_USER_ID_MISSING;
+
+	/* Parse the incoming user expression */
+	i = strlen(users);
+	u_list = xmalloc(sizeof(uid_t)  * (i + 2));
+	u_name = xmalloc(sizeof(char *) * (i + 2));
+	u_type = xmalloc(sizeof(int)    * (i + 2));
+	u_cpy = xstrdup(users);
+	tok = strtok_r(u_cpy, ",", &last);
+	while (tok) {
+		if (tok[0] == '-') {
+			u_type[u_cnt] = 1;	/* minus */
+			minus_user = 1;
+			tok++;
+		} else if (tok[0] == '+') {
+			u_type[u_cnt] = 2;	/* plus */
+			plus_user = 1;
+			tok++;
+		} else if (tok[0] == '\0') {
+			continue;
+		} else if (plus_user || minus_user) {
+			info("Reservation user expression invalid %s", users);
+			goto inval;
+		} else
+			u_type[u_cnt] = 3;	/* set */
+		u_tmp = uid_from_string(tok);
+		if (u_tmp == (uid_t) -1) {
+			info("Reservation request has invalid user %s", tok);
+			goto inval;
+		}
+		u_name[u_cnt] = tok;
+		u_list[u_cnt++] = u_tmp;
+		tok = strtok_r(NULL, ",", &last);
+	}
+
+	if ((plus_user == 0) && (minus_user == 0)) {
+		/* Just a reset of user list */
+		xfree(resv_ptr->users);
+		xfree(resv_ptr->user_list);
+		if (users[0] != '\0')
+			resv_ptr->users = xstrdup(users);
+		resv_ptr->user_cnt  = u_cnt;
+		resv_ptr->user_list = u_list;
+		xfree(u_cpy);
+		xfree(u_name);
+		xfree(u_type);
+		return SLURM_SUCCESS;
+	}
+	
+	/* Modification of existing user list */
+	if (minus_user) {
+		for (i=0; i<u_cnt; i++) {
+			if (u_type[i] != 1)
+				continue;
+			found_it = false;
+			for (j=0; j<resv_ptr->user_cnt; j++) {
+				if (resv_ptr->user_list[j] != u_list[i])
+					continue;
+				found_it = true;
+				resv_ptr->user_cnt--;
+				for (k=j; k<resv_ptr->user_cnt; k++) {
+					resv_ptr->user_list[k] =
+						resv_ptr->user_list[k+1];
+				}
+				break;
+			}
+			if (!found_it)
+				goto inval;
+			/* Now we need to remove from users string */
+			k = strlen(u_name[i]);
+			tmp = resv_ptr->users;
+			while ((tok = strstr(tmp, u_name[i]))) {
+				if (((tok != resv_ptr->users) &&
+				     (tok[-1] != ',')) ||
+				    ((tok[k] != '\0') && (tok[k] != ','))) {
+					tmp = tok + 1;
+					continue;
+				}
+				if (tok[-1] == ',') {
+					tok--;
+					k++;
+				} else if (tok[k] == ',')
+					k++;
+				for (j=0; ; j++) {
+					tok[j] = tok[j+k];
+					if (tok[j] == '\0')
+						break;
+				}
+			}
+		}
+	}
+
+	if (plus_user) {
+		for (i=0; i<u_cnt; i++) {
+			if (u_type[i] != 2)
+				continue;
+			found_it = false;
+			for (j=0; j<resv_ptr->user_cnt; j++) {
+				if (resv_ptr->user_list[j] != u_list[i])
+					continue;
+				found_it = true;
+				break;
+			}
+			if (found_it)
+				continue;	/* duplicate entry */
+			if (resv_ptr->users && resv_ptr->users[0])
+				xstrcat(resv_ptr->users, ",");
+			xstrcat(resv_ptr->users, u_name[i]);
+			xrealloc(resv_ptr->user_list, 
+				 sizeof(uid_t) * (resv_ptr->user_cnt + 1));
+			resv_ptr->user_list[resv_ptr->user_cnt++] =
+				u_list[i];
+		}
+	}
+	xfree(u_cpy);
+	xfree(u_list);
+	xfree(u_name);
+	xfree(u_type);
+	return SLURM_SUCCESS;
+
+ inval:	xfree(u_cpy);
+	xfree(u_list);
+	xfree(u_name);
+	xfree(u_type);
+	return ESLURM_USER_ID_MISSING;
+}
+
+/* 
+ * _pack_resv - dump configuration information about a specific reservation
+ *	in machine independent form (for network transmission or state save)
+ * IN resv_ptr - pointer to reservation for which information is requested
+ * IN/OUT buffer - buffer in which data is placed, pointers automatically 
+ *	updated
+ * IN internal   - true if for internal save state, false for xmit to users
+ * NOTE: if you make any changes here be sure to make the corresponding 
+ *	to _unpack_reserve_info_members() in common/slurm_protocol_pack.c
+ *	plus load_all_resv_state() below.
+ */
+static void _pack_resv(slurmctld_resv_t *resv_ptr, Buf buffer, 
+		       bool internal)
+{
+	packstr(resv_ptr->accounts,	buffer);
+	pack_time(resv_ptr->end_time,	buffer);
+	packstr(resv_ptr->features,	buffer);
+	packstr(resv_ptr->name,		buffer);
+	pack32(resv_ptr->node_cnt,	buffer);
+	packstr(resv_ptr->node_list,	buffer);
+	packstr(resv_ptr->partition,	buffer);
+	pack_time(resv_ptr->start_time_first,	buffer);
+	pack16(resv_ptr->flags,		buffer);
+	packstr(resv_ptr->users,	buffer);
+
+	if (internal) {
+		packstr(resv_ptr->assoc_list,	buffer);
+		pack32(resv_ptr->cpu_cnt,	buffer);
+		pack32(resv_ptr->resv_id,	buffer);
+		pack_time(resv_ptr->start_time_prev,	buffer);
+		pack_time(resv_ptr->start_time,	buffer);
+		pack32(resv_ptr->duration,	buffer);
+	} else {
+		pack_bit_fmt(resv_ptr->node_bitmap, buffer);
+	}
+}
+
+/*
+ * Test if a new/updated reservation request overlaps an existing
+ *	reservation
+ * RET true if overlap
+ */
+static bool _resv_overlap(time_t start_time, time_t end_time, 
+			  uint16_t flags, bitstr_t *node_bitmap,
+			  slurmctld_resv_t *this_resv_ptr)
+{
+	ListIterator iter;
+	slurmctld_resv_t *resv_ptr;
+	bool rc = false;
+	uint32_t delta_t, i, j;
+	time_t s_time1, s_time2, e_time1, e_time2;
+
+	if ((!node_bitmap) || (flags & RESERVE_FLAG_MAINT))
+		return rc;
+
+	iter = list_iterator_create(resv_list);
+	if (!iter)
+		fatal("malloc: list_iterator_create");
+
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+		if (resv_ptr == this_resv_ptr)
+			continue;	/* skip self */
+		if (resv_ptr->node_bitmap == NULL)
+			continue;	/* no specific nodes in reservation */
+		if (!bit_overlap(resv_ptr->node_bitmap, node_bitmap))
+			continue;	/* no overlap */
+
+		for (i=0; ((i<7) && (!rc)); i++) {  /* look forward one week */
+			delta_t = i * (24 * 60 * 60);
+			s_time1 = start_time + delta_t;
+			e_time1 = end_time   + delta_t;
+			for (j=0; ((j<7) && (!rc)); j++) {
+				delta_t = j * (24 * 60 * 60);
+				s_time2 = resv_ptr->start_time + delta_t;
+				e_time2 = resv_ptr->end_time   + delta_t;
+				if ((s_time1 < e_time2) &&
+				    (e_time1 > s_time2)) {
+					verbose("Reservation overlap with %s",
+						resv_ptr->name);
+					rc = true;
+					break;
+				}
+				if (!(resv_ptr->flags & RESERVE_FLAG_DAILY))
+					break;
+			}
+			if ((flags & RESERVE_FLAG_DAILY) == 0)
+				break;
+		}
+	}
+	list_iterator_destroy(iter);
+
+	return rc;
+}
+
+/* Set a reservation's CPU count. Requires that the reservation's
+ *	node_bitmap be set. */
+static void _set_cpu_cnt(slurmctld_resv_t *resv_ptr)
+{
+	int i;
+	uint32_t cpu_cnt = 0;
+	struct node_record *node_ptr = node_record_table_ptr;
+
+	if (!resv_ptr->node_bitmap)
+		return;
+
+	for (i=0; i<node_record_count; i++, node_ptr++) {
+		if (!bit_test(resv_ptr->node_bitmap, i))
+			continue;
+		if (slurmctld_conf.fast_schedule)
+			cpu_cnt += node_ptr->config_ptr->cpus;
+		else
+			cpu_cnt += node_ptr->cpus;
+	}
+	resv_ptr->cpu_cnt = cpu_cnt;
+}
+
+/* Create a resource reservation */
+extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
+{
+	int i, rc = SLURM_SUCCESS;
+	time_t now = time(NULL);
+	struct part_record *part_ptr = NULL;
+	bitstr_t *node_bitmap = NULL;
+	slurmctld_resv_t *resv_ptr;
+	int account_cnt = 0, user_cnt = 0;
+	char **account_list = NULL;
+	uid_t *user_list = NULL;
+	char start_time[32], end_time[32];
+
+	if (!resv_list)
+		resv_list = list_create(_del_resv_rec);
+	_dump_resv_req(resv_desc_ptr, "create_resv");
+
+	/* Validate the request */
+	if (resv_desc_ptr->start_time != (time_t) NO_VAL) {
+		if (resv_desc_ptr->start_time < (now - 60)) {
+			info("Reservation requestion has invalid start time");
+			rc = ESLURM_INVALID_TIME_VALUE;
+			goto bad_parse;
+		}
+	} else 
+		resv_desc_ptr->start_time = now;
+
+	if (resv_desc_ptr->end_time != (time_t) NO_VAL) {
+		if (resv_desc_ptr->end_time < (now - 60)) {
+			info("Reservation requestion has invalid end time");
+			rc = ESLURM_INVALID_TIME_VALUE;
+			goto bad_parse;
+		}
+	} else if (resv_desc_ptr->duration) {
+		resv_desc_ptr->end_time = resv_desc_ptr->start_time +
+					  (resv_desc_ptr->duration * 60);
+	} else
+		resv_desc_ptr->end_time = INFINITE;
+	if (resv_desc_ptr->flags == (uint16_t) NO_VAL)
+		resv_desc_ptr->flags = 0;
+	else {
+		resv_desc_ptr->flags &= RESERVE_FLAG_MAINT | 
+					RESERVE_FLAG_DAILY | 
+					RESERVE_FLAG_WEEKLY;
+	}
+	if (resv_desc_ptr->partition) {
+		part_ptr = find_part_record(resv_desc_ptr->partition);
+		if (!part_ptr) {
+			info("Reservation request has invalid partition %s",
+			     resv_desc_ptr->partition);
+			rc = ESLURM_INVALID_PARTITION_NAME;
+			goto bad_parse;
+		}
+	}
+	if ((resv_desc_ptr->accounts == NULL) &&
+	    (resv_desc_ptr->users == NULL)) {
+		info("Reservation request lacks users or accounts");
+		rc = ESLURM_INVALID_BANK_ACCOUNT;
+		goto bad_parse;
+	}
+	if (resv_desc_ptr->accounts) {
+		rc = _build_account_list(resv_desc_ptr->accounts, 
+					 &account_cnt, &account_list);
+		if (rc)
+			goto bad_parse;
+	}
+	if (resv_desc_ptr->users) {
+		rc = _build_uid_list(resv_desc_ptr->users, 
+				     &user_cnt, &user_list);
+		if (rc)
+			goto bad_parse;
+	}
+	if (resv_desc_ptr->node_list) {
+		resv_desc_ptr->flags |= RESERVE_FLAG_SPEC_NODES;
+		if (strcasecmp(resv_desc_ptr->node_list, "ALL") == 0) {
+			node_bitmap = bit_alloc(node_record_count);
+			bit_nset(node_bitmap, 0, (node_record_count - 1));
+		} else if (node_name2bitmap(resv_desc_ptr->node_list, 
+					    false, &node_bitmap)) {
+			rc = ESLURM_INVALID_NODE_NAME;
+			goto bad_parse;
+		}
+		if (resv_desc_ptr->node_cnt == NO_VAL)
+			resv_desc_ptr->node_cnt = 0;
+		if (_resv_overlap(resv_desc_ptr->start_time, 
+				  resv_desc_ptr->end_time, 
+				  resv_desc_ptr->flags, node_bitmap,
+				  NULL)) {
+			info("Reservation requestion overlaps another");
+			rc = ESLURM_RESERVATION_OVERLAP;
+			goto bad_parse;
+		}
+		resv_desc_ptr->node_cnt = bit_set_count(node_bitmap);
+	} else if (resv_desc_ptr->node_cnt == NO_VAL) {
+		info("Reservation request lacks node specification");
+		rc = ESLURM_INVALID_NODE_NAME;
+		goto bad_parse;
+	} else if ((rc = _select_nodes(resv_desc_ptr, &part_ptr, &node_bitmap))
+		   != SLURM_SUCCESS) {
+		goto bad_parse;
+	}
+
+	_generate_resv_id();
+	if (resv_desc_ptr->name) {
+		resv_ptr = (slurmctld_resv_t *) list_find_first (resv_list, 
+				_find_resv_name, resv_desc_ptr->name);
+		if (resv_ptr) {
+			info("Reservation requestion name duplication (%s)",
+			     resv_desc_ptr->name);
+			rc = ESLURM_RESERVATION_INVALID;
+			goto bad_parse;
+		}
+	} else {
+		while (1) {
+			_generate_resv_name(resv_desc_ptr);
+			resv_ptr = (slurmctld_resv_t *) 
+					list_find_first (resv_list, 
+					_find_resv_name, resv_desc_ptr->name);
+			if (!resv_ptr)
+				break;
+			_generate_resv_id();	/* makes new suffix */
+			/* Same as previously created name, retry */
+		}
+	}
+
+	/* Create a new reservation record */
+	resv_ptr = xmalloc(sizeof(slurmctld_resv_t));
+	resv_ptr->accounts	= resv_desc_ptr->accounts;
+	resv_desc_ptr->accounts = NULL;		/* Nothing left to free */
+	resv_ptr->account_cnt	= account_cnt;
+	resv_ptr->account_list	= account_list;
+	resv_ptr->duration      = resv_desc_ptr->duration;
+	resv_ptr->end_time	= resv_desc_ptr->end_time;
+	resv_ptr->features	= resv_desc_ptr->features;
+	resv_desc_ptr->features = NULL;		/* Nothing left to free */
+	resv_ptr->resv_id       = top_suffix;
+	xassert(resv_ptr->magic = RESV_MAGIC);	/* Sets value */
+	resv_ptr->name		= xstrdup(resv_desc_ptr->name);
+	resv_ptr->node_cnt	= resv_desc_ptr->node_cnt;
+	resv_ptr->node_list	= resv_desc_ptr->node_list;
+	resv_desc_ptr->node_list = NULL;	/* Nothing left to free */
+	resv_ptr->node_bitmap	= node_bitmap;	/* May be unset */
+	resv_ptr->partition	= resv_desc_ptr->partition;
+	resv_desc_ptr->partition = NULL;	/* Nothing left to free */
+	resv_ptr->part_ptr	= part_ptr;
+	resv_ptr->start_time	= resv_desc_ptr->start_time;
+	resv_ptr->start_time_first = resv_ptr->start_time;
+	resv_ptr->start_time_prev = resv_ptr->start_time;
+	resv_ptr->flags		= resv_desc_ptr->flags;
+	resv_ptr->users		= resv_desc_ptr->users;
+	resv_ptr->user_cnt	= user_cnt;
+	resv_ptr->user_list	= user_list;
+	resv_desc_ptr->users 	= NULL;		/* Nothing left to free */
+	_set_cpu_cnt(resv_ptr);
+	if((rc = _set_assoc_list(resv_ptr)) != SLURM_SUCCESS)
+		goto bad_parse;
+
+	/* This needs to be done after all other setup is done. */
+	_post_resv_create(resv_ptr);
+
+	slurm_make_time_str(&resv_ptr->start_time, start_time, 
+			    sizeof(start_time));
+	slurm_make_time_str(&resv_ptr->end_time, end_time, sizeof(end_time));
+	info("Created reservation %s accounts=%s users=%s "
+	     "nodes=%s start=%s end=%s",
+	     resv_ptr->name, resv_ptr->accounts, resv_ptr->users, 
+	     resv_ptr->node_list, start_time, end_time);
+	list_append(resv_list, resv_ptr);
+	last_resv_update = now;
+	schedule_resv_save();
+
+	return SLURM_SUCCESS;
+
+ bad_parse:
+	for (i=0; i<account_cnt; i++)
+		xfree(account_list[i]);
+	xfree(account_list);
+	if (node_bitmap)
+		bit_free(node_bitmap);
+	xfree(user_list);
+	return rc;
+}
+
+/* Purge all reservation data structures */
+extern void resv_fini(void)
+{
+	if (resv_list) {
+		list_destroy(resv_list);
+		resv_list = (List) NULL;
+	}
+}
+
+/* Update an exiting resource reservation */
+extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
+{
+	time_t now = time(NULL);
+	slurmctld_resv_t *resv_backup, *resv_ptr;
+	int error_code = SLURM_SUCCESS, rc;
+	char start_time[32], end_time[32];
+
+	if (!resv_list)
+		resv_list = list_create(_del_resv_rec);
+	_dump_resv_req(resv_desc_ptr, "update_resv");
+
+	/* Find the specified reservation */
+	if ((resv_desc_ptr->name == NULL))
+		return ESLURM_RESERVATION_INVALID;
+	resv_ptr = (slurmctld_resv_t *) list_find_first (resv_list, 
+			_find_resv_name, resv_desc_ptr->name);
+	if (!resv_ptr)
+		return ESLURM_RESERVATION_INVALID;
+
+	/* Make backup to restore state in case of failure */
+	resv_backup = _copy_resv(resv_ptr);
+
+	/* Process the request */
+	if (resv_desc_ptr->flags != (uint16_t) NO_VAL) {
+		if (resv_desc_ptr->flags & RESERVE_FLAG_MAINT)
+			resv_ptr->flags |= RESERVE_FLAG_MAINT;
+		if (resv_desc_ptr->flags & RESERVE_FLAG_NO_MAINT)
+			resv_ptr->flags &= (~RESERVE_FLAG_MAINT);
+		if (resv_desc_ptr->flags & RESERVE_FLAG_DAILY)
+			resv_ptr->flags |= RESERVE_FLAG_DAILY;
+		if (resv_desc_ptr->flags & RESERVE_FLAG_NO_DAILY)
+			resv_ptr->flags &= (~RESERVE_FLAG_DAILY);
+		if (resv_desc_ptr->flags & RESERVE_FLAG_WEEKLY)
+			resv_ptr->flags |= RESERVE_FLAG_WEEKLY;
+		if (resv_desc_ptr->flags & RESERVE_FLAG_NO_WEEKLY)
+			resv_ptr->flags &= (~RESERVE_FLAG_WEEKLY);
+	}
+	if (resv_desc_ptr->partition && (resv_desc_ptr->partition[0] == '\0')) {
+		/* Clear the partition */
+		xfree(resv_desc_ptr->partition);
+		xfree(resv_ptr->partition);
+		resv_ptr->part_ptr = NULL;
+	}
+	if (resv_desc_ptr->partition) {
+		struct part_record *part_ptr = NULL;
+		part_ptr = find_part_record(resv_desc_ptr->partition);
+		if (!part_ptr) {
+			info("Reservation request has invalid partition (%s)",
+			     resv_desc_ptr->partition);
+			error_code = ESLURM_INVALID_PARTITION_NAME;
+			goto update_failure;
+		}
+		xfree(resv_ptr->partition);
+		resv_ptr->partition	= resv_desc_ptr->partition;
+		resv_desc_ptr->partition = NULL; /* Nothing left to free */
+		resv_ptr->part_ptr	= part_ptr;
+	}
+	if (resv_desc_ptr->accounts) {
+		rc = _update_account_list(resv_ptr, resv_desc_ptr->accounts);
+		if (rc) {
+			error_code = rc;
+			goto update_failure;
+		}
+	}
+	if (resv_desc_ptr->features && (resv_desc_ptr->features[0] == '\0')) {
+		xfree(resv_desc_ptr->features);
+		xfree(resv_ptr->features);
+	}
+	if (resv_desc_ptr->features) {
+		xfree(resv_ptr->features);
+		resv_ptr->features = resv_desc_ptr->features;
+		resv_desc_ptr->features = NULL;	/* Nothing left to free */
+	}
+	if (resv_desc_ptr->users) {
+		rc = _update_uid_list(resv_ptr, resv_desc_ptr->users);
+		if (rc) {
+			error_code = rc;
+			goto update_failure;
+		}
+	}
+	if ((resv_ptr->users == NULL) && (resv_ptr->accounts == NULL)) {
+		info("Reservation request lacks users or accounts");
+		error_code = ESLURM_INVALID_BANK_ACCOUNT;
+		goto update_failure;
+	}
+
+	if (resv_desc_ptr->start_time != (time_t) NO_VAL) {
+		if (resv_desc_ptr->start_time < (now - 60)) {
+			info("Reservation requestion has invalid start time");
+			error_code = ESLURM_INVALID_TIME_VALUE;
+			goto update_failure;
+		}
+		resv_ptr->start_time_prev = resv_ptr->start_time;
+		resv_ptr->start_time = resv_desc_ptr->start_time;
+		resv_ptr->start_time_first = resv_desc_ptr->start_time;
+		if(resv_ptr->duration) {
+			resv_ptr->end_time = resv_ptr->start_time_first + 
+				(resv_ptr->duration * 60);
+		}
+	}
+	if (resv_desc_ptr->end_time != (time_t) NO_VAL) {
+		if (resv_desc_ptr->end_time < (now - 60)) {
+			info("Reservation requestion has invalid end time");
+			error_code = ESLURM_INVALID_TIME_VALUE;
+			goto update_failure;
+		}
+		resv_ptr->end_time = resv_desc_ptr->end_time;
+		resv_ptr->duration = 0;
+	}
+	if (resv_desc_ptr->duration != NO_VAL) {
+		resv_ptr->duration = resv_desc_ptr->duration;
+		resv_ptr->end_time = resv_ptr->start_time_first + 
+				     (resv_desc_ptr->duration * 60);
+	}
+
+	if (resv_ptr->start_time >= resv_ptr->end_time) {
+		error_code = ESLURM_INVALID_TIME_VALUE;
+		goto update_failure;
+	}
+	if (resv_desc_ptr->node_list && 
+	    (resv_desc_ptr->node_list[0] == '\0')) {	/* Clear bitmap */
+		resv_ptr->flags &= (~RESERVE_FLAG_SPEC_NODES);
+		xfree(resv_desc_ptr->node_list);
+		xfree(resv_ptr->node_list);
+		FREE_NULL_BITMAP(resv_ptr->node_bitmap);
+		resv_ptr->node_bitmap = bit_alloc(node_record_count);
+		if (resv_desc_ptr->node_cnt == NO_VAL)
+			resv_desc_ptr->node_cnt = resv_ptr->node_cnt;
+		resv_ptr->node_cnt = 0;
+	}
+	if (resv_desc_ptr->node_list) {		/* Change bitmap last */
+		bitstr_t *node_bitmap;
+		resv_ptr->flags |= RESERVE_FLAG_SPEC_NODES;
+		if (strcasecmp(resv_desc_ptr->node_list, "ALL") == 0) {
+			node_bitmap = bit_alloc(node_record_count);
+			bit_nset(node_bitmap, 0, (node_record_count - 1));
+		} else if (node_name2bitmap(resv_desc_ptr->node_list, 
+					    false, &node_bitmap)) {
+			error_code = ESLURM_INVALID_NODE_NAME;
+			goto update_failure;
+		}
+		xfree(resv_ptr->node_list);
+		resv_ptr->node_list = resv_desc_ptr->node_list;
+		resv_desc_ptr->node_list = NULL;  /* Nothing left to free */
+		FREE_NULL_BITMAP(resv_ptr->node_bitmap);
+		resv_ptr->node_bitmap = node_bitmap;
+		resv_ptr->node_cnt = bit_set_count(resv_ptr->node_bitmap);
+	}
+	if (resv_desc_ptr->node_cnt != NO_VAL) {
+		rc = _resize_resv(resv_ptr, resv_desc_ptr->node_cnt);
+		if (rc) {
+			error_code = rc;
+			goto update_failure;
+		}
+		resv_ptr->node_cnt = bit_set_count(resv_ptr->node_bitmap);
+	}
+	if (_resv_overlap(resv_ptr->start_time, resv_ptr->end_time, 
+			  resv_ptr->flags, resv_ptr->node_bitmap, resv_ptr)) {
+		info("Reservation requestion overlaps another");
+		error_code = ESLURM_RESERVATION_OVERLAP;
+		goto update_failure;
+	}
+	_set_cpu_cnt(resv_ptr);
+	if((error_code = _set_assoc_list(resv_ptr)) != SLURM_SUCCESS)
+		goto update_failure;
+
+	slurm_make_time_str(&resv_ptr->start_time, start_time, 
+			    sizeof(start_time));
+	slurm_make_time_str(&resv_ptr->end_time, end_time, sizeof(end_time));
+	info("Update reservation %s accounts=%s users=%s "
+	     "nodes=%s start=%s end=%s",
+	     resv_ptr->name, resv_ptr->accounts, resv_ptr->users, 
+	     resv_ptr->node_list, start_time, end_time);
+
+	_post_resv_update(resv_ptr, resv_backup);
+	_del_resv_rec(resv_backup);
+	last_resv_update = now;
+	schedule_resv_save();
+	return error_code;
+
+update_failure:
+	_swap_resv(resv_backup, resv_ptr);
+	_del_resv_rec(resv_backup);
+	return error_code;
+}
+
+/* Determine if a running or pending job is using a reservation */
+static bool _is_resv_used(slurmctld_resv_t *resv_ptr)
+{
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+	bool match = false;
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if ((!IS_JOB_FINISHED(job_ptr)) &&
+		    (job_ptr->resv_id == resv_ptr->resv_id)) {
+			match = true;
+			break;
+		}
+	}
+	list_iterator_destroy(job_iterator);
+
+	return match;
+}
+
+/* Clear the reservation points for jobs referencing a defunct reservation */
+static void _clear_job_resv(slurmctld_resv_t *resv_ptr)
+{
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (job_ptr->resv_ptr != resv_ptr)
+			continue;
+		if (!IS_JOB_FINISHED(job_ptr)) {
+			info("Job %u linked to defunct reservation %s, "
+			     "clearing that reservation",
+			     job_ptr->job_id, job_ptr->resv_name);
+		}
+		job_ptr->resv_id = 0;
+		job_ptr->resv_ptr = NULL;
+		xfree(job_ptr->resv_name);
+	}
+	list_iterator_destroy(job_iterator);
+}
+
+/* Delete an exiting resource reservation */
+extern int delete_resv(reservation_name_msg_t *resv_desc_ptr)
+{
+	ListIterator iter;
+	slurmctld_resv_t *resv_ptr;
+	int rc = SLURM_SUCCESS;
+	time_t now = time(NULL);
+
+#ifdef _RESV_DEBUG
+	info("delete_resv: Name=%s", resv_desc_ptr->name);
+#endif
+
+	iter = list_iterator_create(resv_list);
+	if (!iter)
+		fatal("malloc: list_iterator_create");
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+		if (strcmp(resv_ptr->name, resv_desc_ptr->name))
+			continue;
+		if (_is_resv_used(resv_ptr)) {
+			rc = ESLURM_RESERVATION_BUSY;
+			break;
+		}
+
+		if (resv_ptr->maint_set_node) {
+			resv_ptr->maint_set_node = false;
+			_set_nodes_maint(resv_ptr, now);
+			last_node_update = now;
+		}
+
+		rc = _post_resv_delete(resv_ptr);
+		_clear_job_resv(resv_ptr);
+		list_delete_item(iter);
+		break;
+	}
+	list_iterator_destroy(iter);
+
+	if (!resv_ptr) {
+		info("Reservation %s not found for deletion",
+		     resv_desc_ptr->name);
+		return ESLURM_RESERVATION_INVALID;
+	}
+
+	last_resv_update = time(NULL);
+	schedule_resv_save();
+	return rc;
+}
+
+/* Dump the reservation records to a buffer */
+extern void show_resv(char **buffer_ptr, int *buffer_size, uid_t uid)
+{
+	ListIterator iter;
+	slurmctld_resv_t *resv_ptr;
+	uint32_t resv_packed;
+	int tmp_offset;
+	Buf buffer;
+	time_t now = time(NULL);
+	DEF_TIMERS;
+
+	START_TIMER;
+	if (!resv_list)
+		resv_list = list_create(_del_resv_rec);
+
+	buffer_ptr[0] = NULL;
+	*buffer_size = 0;
+
+	buffer = init_buf(BUF_SIZE);
+
+	/* write header: version and time */
+	resv_packed = 0;
+	pack32(resv_packed, buffer);
+	pack_time(now, buffer);
+
+	/* write individual reservation records */
+	iter = list_iterator_create(resv_list);
+	if (!iter)
+		fatal("malloc: list_iterator_create");
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+		if ((slurmctld_conf.private_data & PRIVATE_DATA_RESERVATIONS)
+		    && !validate_super_user(uid)) {
+			int i = 0;
+			for(i=0; i<resv_ptr->user_cnt; i++) {
+				if(resv_ptr->user_list[i] == uid)
+					break;
+			}
+
+			if(i >= resv_ptr->user_cnt)
+				continue;
+		}
+
+		_pack_resv(resv_ptr, buffer, false);
+		resv_packed++;
+	}
+	list_iterator_destroy(iter);
+
+	/* put the real record count in the message body header */
+	tmp_offset = get_buf_offset(buffer);
+	set_buf_offset(buffer, 0);
+	pack32(resv_packed, buffer);
+	set_buf_offset(buffer, tmp_offset);
+
+	*buffer_size = get_buf_offset(buffer);
+	buffer_ptr[0] = xfer_buf_data(buffer);
+	END_TIMER2("show_resv");
+}
+
+/* Save the state of all reservations to file */
+extern int dump_all_resv_state(void)
+{
+	ListIterator iter;
+	slurmctld_resv_t *resv_ptr;
+	int error_code = 0, log_fd;
+	char *old_file, *new_file, *reg_file;
+	/* Locks: Read node */
+	slurmctld_lock_t resv_read_lock =
+	    { READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
+	Buf buffer = init_buf(BUF_SIZE);
+	DEF_TIMERS;
+
+	START_TIMER;
+	if (!resv_list)
+		resv_list = list_create(_del_resv_rec);
+
+	/* write header: time */
+	packstr(RESV_STATE_VERSION, buffer);
+	pack_time(time(NULL), buffer);
+	pack32(top_suffix, buffer);
+
+	/* write reservation records to buffer */
+	lock_slurmctld(resv_read_lock);
+	iter = list_iterator_create(resv_list);
+	if (!iter)
+		fatal("malloc: list_iterator_create");
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter)))
+		_pack_resv(resv_ptr, buffer, true);
+	list_iterator_destroy(iter);
+	/* Maintain config read lock until we copy state_save_location *\
+	\* unlock_slurmctld(resv_read_lock);          - see below      */
+
+	/* write the buffer to file */
+	old_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(old_file, "/resv_state.old");
+	reg_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(reg_file, "/resv_state");
+	new_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(new_file, "/resv_state.new");
+	unlock_slurmctld(resv_read_lock);
+	lock_state_files();
+	log_fd = creat(new_file, 0600);
+	if (log_fd == 0) {
+		error("Can't save state, error creating file %s, %m",
+		      new_file);
+		error_code = errno;
+	} else {
+		int pos = 0, nwrite = get_buf_offset(buffer), amount;
+		char *data = (char *)get_buf_data(buffer);
+
+		while (nwrite > 0) {
+			amount = write(log_fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				error("Error writing file %s, %m", new_file);
+				error_code = errno;
+				break;
+			}
+			nwrite -= amount;
+			pos    += amount;
+		}
+		fsync(log_fd);
+		close(log_fd);
+	}
+	if (error_code)
+		(void) unlink(new_file);
+	else {			/* file shuffle */
+		(void) unlink(old_file);
+		(void) link(reg_file, old_file);
+		(void) unlink(reg_file);
+		(void) link(new_file, reg_file);
+		(void) unlink(new_file);
+	}
+	xfree(old_file);
+	xfree(reg_file);
+	xfree(new_file);
+	unlock_state_files();
+
+	free_buf(buffer);
+	END_TIMER2("dump_all_resv_state");
+	return 0;
+}
+
+/* Validate one reservation record, return true if good */
+static bool _validate_one_reservation(slurmctld_resv_t *resv_ptr)
+{
+	if ((resv_ptr->name == NULL) || (resv_ptr->name[0] == '\0')) {
+		error("Read reservation without name");
+		return false;
+	}
+	if (resv_ptr->partition) {
+		struct part_record *part_ptr = NULL;
+		part_ptr = find_part_record(resv_ptr->partition);
+		if (!part_ptr) {
+			error("Reservation %s has invalid partition (%s)",
+			      resv_ptr->name, resv_ptr->partition);
+			return false;
+		}
+		resv_ptr->part_ptr	= part_ptr;
+	}
+	if (resv_ptr->accounts) {
+		int account_cnt = 0, i, rc;
+		char **account_list;
+		rc = _build_account_list(resv_ptr->accounts, 
+					 &account_cnt, &account_list);
+		if (rc) {
+			error("Reservation %s has invalid accounts (%s)",
+			      resv_ptr->name, resv_ptr->accounts);
+			return false;
+		}
+		for (i=0; i<resv_ptr->account_cnt; i++)
+			xfree(resv_ptr->account_list[i]);
+		xfree(resv_ptr->account_list);
+		resv_ptr->account_cnt  = account_cnt;
+		resv_ptr->account_list = account_list;
+	}
+	if (resv_ptr->users) {
+		int rc, user_cnt = 0;
+		uid_t *user_list = NULL;
+		rc = _build_uid_list(resv_ptr->users, 
+				     &user_cnt, &user_list);
+		if (rc) {
+			error("Reservation %s has invalid users (%s)",
+			      resv_ptr->name, resv_ptr->users);
+			return false;
+		}
+		xfree(resv_ptr->user_list);
+		resv_ptr->user_cnt  = user_cnt;
+		resv_ptr->user_list = user_list;
+	}
+	if (resv_ptr->node_list) {		/* Change bitmap last */
+		bitstr_t *node_bitmap;
+		if (strcasecmp(resv_ptr->node_list, "ALL") == 0) {
+			node_bitmap = bit_alloc(node_record_count);
+			bit_nset(node_bitmap, 0, (node_record_count - 1));
+		} else if (node_name2bitmap(resv_ptr->node_list,
+					    false, &node_bitmap)) {
+			error("Reservation %s has invalid nodes (%s)",
+			      resv_ptr->name, resv_ptr->node_list);
+			return false;
+		}
+		FREE_NULL_BITMAP(resv_ptr->node_bitmap);
+		resv_ptr->node_bitmap = node_bitmap;
+	}
+	return true;
+}
+
+/*
+ * Validate all reservation records, reset bitmaps, etc.
+ * Purge any invalid reservation.
+ */
+static void _validate_all_reservations(void)
+{
+	ListIterator iter;
+	slurmctld_resv_t *resv_ptr;
+	struct job_record *job_ptr;
+	char *tmp;
+	uint32_t res_num;
+
+	iter = list_iterator_create(resv_list);
+	if (!iter)
+		fatal("malloc: list_iterator_create");
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+		if (!_validate_one_reservation(resv_ptr)) {
+			error("Purging invalid reservation record %s",
+			      resv_ptr->name);
+			_post_resv_delete(resv_ptr);
+			_clear_job_resv(resv_ptr);
+			list_delete_item(iter);
+		} else {
+			_set_assoc_list(resv_ptr);
+			tmp = strrchr(resv_ptr->name, '_');
+			if (tmp) {
+				res_num = atoi(tmp + 1);
+				top_suffix = MAX(top_suffix, res_num);
+			}
+		}
+	}
+	list_iterator_destroy(iter);
+
+	/* Validate all job reservation pointers */
+	iter = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(iter))) {
+		if (job_ptr->resv_name == NULL)
+			continue;
+
+		if ((job_ptr->resv_ptr == NULL) ||
+		    (job_ptr->resv_ptr->magic != RESV_MAGIC)) {
+			job_ptr->resv_ptr = (slurmctld_resv_t *) 
+					list_find_first(resv_list,
+							_find_resv_name,
+							job_ptr->resv_name);
+		}
+		if (!job_ptr->resv_ptr) {
+			error("JobId %u linked to defunct reservation %s",
+			       job_ptr->job_id, job_ptr->resv_name);
+			job_ptr->resv_id = 0;
+			xfree(job_ptr->resv_name);
+		}
+	}
+	list_iterator_destroy(iter);
+
+}
+
+/*
+ * Validate the the reserved nodes are not DOWN or DRAINED and 
+ *	select different nodes as needed.
+ */
+static void _validate_node_choice(slurmctld_resv_t *resv_ptr)
+{
+	bitstr_t *tmp_bitmap = NULL;
+	int i;
+	resv_desc_msg_t resv_desc;
+
+	if (resv_ptr->flags & RESERVE_FLAG_SPEC_NODES)
+		return;
+
+	i = bit_overlap(resv_ptr->node_bitmap, avail_node_bitmap);
+	if (i == resv_ptr->node_cnt)
+		return;
+
+	/* Reservation includes DOWN, DRAINED/DRAINING, FAILING or 
+	 * NO_RESPOND nodes. Generate new request using _select_nodes()
+	 * in attempt to replace this nodes */
+	memset(&resv_desc, 0, sizeof(resv_desc_msg_t));
+	resv_desc.start_time = resv_ptr->start_time;
+	resv_desc.end_time   = resv_ptr->end_time;
+	resv_desc.features   = resv_ptr->features;
+	resv_desc.node_cnt   = resv_ptr->node_cnt - i;
+	i = _select_nodes(&resv_desc, &resv_ptr->part_ptr, &tmp_bitmap);
+	xfree(resv_desc.node_list);
+	xfree(resv_desc.partition);
+	if (i == SLURM_SUCCESS) {
+		bit_and(resv_ptr->node_bitmap, avail_node_bitmap);
+		bit_or(resv_ptr->node_bitmap, tmp_bitmap);
+		bit_free(tmp_bitmap);
+		xfree(resv_ptr->node_list);
+		resv_ptr->node_list = bitmap2node_name(resv_ptr->node_bitmap);
+		info("modified reservation %s due to unusable nodes, "
+		     "new nodes: %s", resv_ptr->name, resv_ptr->node_list);
+	} else if (difftime(resv_ptr->start_time, time(NULL)) < 600) {
+		info("reservation %s contains unusable nodes, "
+		     "can't reallocate now", resv_ptr->name);
+	} else {
+		debug("reservation %s contains unusable nodes, "
+		      "can't reallocate now", resv_ptr->name);
+	}
+}
+
+/*
+ * Load the reservation state from file, recover on slurmctld restart. 
+ *	Reset reservation pointers for all jobs.
+ *	Execute this after loading the configuration file data.
+ * IN recover - 0 = validate current reservations ONLY if already recovered, 
+ *                  otherwise recover from disk
+ *              1+ = recover all reservation state from disk
+ * RET SLURM_SUCCESS or error code
+ * NOTE: READ lock_slurmctld config before entry
+ */
+extern int load_all_resv_state(int recover)
+{
+	char *state_file, *data = NULL, *ver_str = NULL;
+	time_t now;
+	uint32_t data_size = 0, uint32_tmp;
+	int data_allocated, data_read = 0, error_code = 0, state_fd;
+	Buf buffer;
+	slurmctld_resv_t *resv_ptr = NULL;
+
+	last_resv_update = time(NULL);
+	if ((recover == 0) && resv_list) {
+		_validate_all_reservations();
+		return SLURM_SUCCESS;
+	}
+
+	/* Read state file and validate */
+	if (resv_list)
+		list_flush(resv_list);
+	else
+		resv_list = list_create(_del_resv_rec);
+
+	/* read the file */
+	state_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(state_file, "/resv_state");
+	lock_state_files();
+	state_fd = open(state_file, O_RDONLY);
+	if (state_fd < 0) {
+		info("No reservation state file (%s) to recover",
+		     state_file);
+		error_code = ENOENT;
+	} else {
+		data_allocated = BUF_SIZE;
+		data = xmalloc(data_allocated);
+		while (1) {
+			data_read = read(state_fd, &data[data_size], 
+					BUF_SIZE);
+			if (data_read < 0) {
+				if  (errno == EINTR)
+					continue;
+				else {
+					error("Read error on %s: %m", 
+						state_file);
+					break;
+				}
+			} else if (data_read == 0)     /* eof */
+				break;
+			data_size      += data_read;
+			data_allocated += data_read;
+			xrealloc(data, data_allocated);
+		}
+		close(state_fd);
+	}
+	xfree(state_file);
+	unlock_state_files();
+
+	buffer = create_buf(data, data_size);
+
+	safe_unpackstr_xmalloc( &ver_str, &uint32_tmp, buffer);
+	debug3("Version string in resv_state header is %s", ver_str);
+	if ((!ver_str) || (strcmp(ver_str, RESV_STATE_VERSION) != 0)) {
+		error("************************************************************");
+		error("Can not recover reservation state, data version incompatable");
+		error("************************************************************");
+		xfree(ver_str);
+		free_buf(buffer);
+		schedule_resv_save();	/* Schedule save with new format */
+		return EFAULT;
+	}
+	xfree(ver_str);
+	safe_unpack_time(&now, buffer);
+	safe_unpack32(&top_suffix, buffer);
+
+	while (remaining_buf(buffer) > 0) {
+		resv_ptr = xmalloc(sizeof(slurmctld_resv_t));
+		xassert(resv_ptr->magic = RESV_MAGIC);	/* Sets value */
+		safe_unpackstr_xmalloc(&resv_ptr->accounts,	
+				       &uint32_tmp,	buffer);
+		safe_unpack_time(&resv_ptr->end_time,	buffer);
+		safe_unpackstr_xmalloc(&resv_ptr->features,
+				       &uint32_tmp, 	buffer);
+		safe_unpackstr_xmalloc(&resv_ptr->name,	&uint32_tmp, buffer);
+		safe_unpack32(&resv_ptr->node_cnt,	buffer);
+		safe_unpackstr_xmalloc(&resv_ptr->node_list,
+				       &uint32_tmp,	buffer);
+		safe_unpackstr_xmalloc(&resv_ptr->partition,
+				       &uint32_tmp, 	buffer);
+		safe_unpack_time(&resv_ptr->start_time_first,	buffer);
+		safe_unpack16(&resv_ptr->flags,		buffer);
+		safe_unpackstr_xmalloc(&resv_ptr->users,&uint32_tmp, buffer);
+
+		/* Fields saved for internal use only (save state) */
+		safe_unpackstr_xmalloc(&resv_ptr->assoc_list,	
+				       &uint32_tmp,	buffer);
+		safe_unpack32(&resv_ptr->cpu_cnt,	buffer);
+		safe_unpack32(&resv_ptr->resv_id,	buffer);
+		safe_unpack_time(&resv_ptr->start_time_prev, buffer);
+		safe_unpack_time(&resv_ptr->start_time, buffer);
+		safe_unpack32(&resv_ptr->duration,	buffer);
+
+		list_append(resv_list, resv_ptr);
+		info("Recovered state of reservation %s", resv_ptr->name);
+	}
+
+	_validate_all_reservations();
+	info("Recovered state of %d reservations", list_count(resv_list));
+	free_buf(buffer);
+	return error_code;
+
+      unpack_error:
+	_validate_all_reservations();
+	if (state_fd >= 0)
+		error("Incomplete reservation data checkpoint file");
+	info("Recovered state of %d reservations", list_count(resv_list));
+	if (resv_ptr)
+		_del_resv_rec(resv_ptr);
+	free_buf(buffer);
+	return EFAULT;
+}
+
+/*
+ * Determine if a job request can use the specified reservations
+ * IN/OUT job_ptr - job to validate, set its resv_id and resv_flags
+ * RET SLURM_SUCCESS or error code (not found or access denied)
+*/
+extern int validate_job_resv(struct job_record *job_ptr)
+{
+	slurmctld_resv_t *resv_ptr = NULL;
+	int rc;
+
+	xassert(job_ptr);
+
+	if ((job_ptr->resv_name == NULL) || (job_ptr->resv_name[0] == '\0')) {
+		xfree(job_ptr->resv_name);
+		job_ptr->resv_id    = 0;
+		job_ptr->resv_flags = 0;
+		return SLURM_SUCCESS;
+	}
+
+	if (!resv_list)
+		return ESLURM_RESERVATION_INVALID;
+
+	/* Find the named reservation */
+	resv_ptr = (slurmctld_resv_t *) list_find_first (resv_list, 
+			_find_resv_name, job_ptr->resv_name);
+	if (!resv_ptr) {
+		info("Reservation name not found (%s)", job_ptr->resv_name);
+		return ESLURM_RESERVATION_INVALID;
+	}
+
+	rc = _valid_job_access_resv(job_ptr, resv_ptr);
+	if (rc == SLURM_SUCCESS) {
+		job_ptr->resv_id    = resv_ptr->resv_id;
+		job_ptr->resv_flags = resv_ptr->flags;
+	}
+	return rc;
+}
+
+static int  _resize_resv(slurmctld_resv_t *resv_ptr, uint32_t node_cnt)
+{
+	bitstr_t *tmp1_bitmap = NULL, *tmp2_bitmap = NULL;
+	int delta_node_cnt, i;
+	resv_desc_msg_t resv_desc;
+
+	delta_node_cnt = resv_ptr->node_cnt - node_cnt;
+	if (delta_node_cnt == 0)	/* Already correct node count */
+		return SLURM_SUCCESS;
+
+	if (delta_node_cnt > 0) {	/* Must decrease node count */
+		if (bit_overlap(resv_ptr->node_bitmap, idle_node_bitmap)) {
+			/* Start by eliminating idle nodes from reservation */
+			tmp1_bitmap = bit_copy(resv_ptr->node_bitmap);
+			bit_and(tmp1_bitmap, idle_node_bitmap);
+			i = bit_set_count(tmp1_bitmap);
+			if (i > delta_node_cnt) {
+				tmp2_bitmap = bit_pick_cnt(tmp1_bitmap, 
+							   delta_node_cnt);
+				bit_not(tmp2_bitmap);
+				bit_and(resv_ptr->node_bitmap, tmp2_bitmap);
+				FREE_NULL_BITMAP(tmp1_bitmap);
+				FREE_NULL_BITMAP(tmp2_bitmap);
+				delta_node_cnt = 0;	/* ALL DONE */
+			} else if (i) {
+				bit_not(idle_node_bitmap);
+				bit_and(resv_ptr->node_bitmap, 
+					idle_node_bitmap);
+				bit_not(idle_node_bitmap);
+				resv_ptr->node_cnt = bit_set_count(
+						resv_ptr->node_bitmap);
+				delta_node_cnt = resv_ptr->node_cnt - 
+						 node_cnt;
+			}
+			FREE_NULL_BITMAP(tmp1_bitmap);
+		}
+		if (delta_node_cnt > 0) {
+			/* Now eliminate allocated nodes from reservation */
+			tmp1_bitmap = bit_pick_cnt(resv_ptr->node_bitmap,
+						   node_cnt);
+			bit_free(resv_ptr->node_bitmap);
+			resv_ptr->node_bitmap = tmp1_bitmap;
+		}
+		xfree(resv_ptr->node_list);
+		resv_ptr->node_list = bitmap2node_name(resv_ptr->node_bitmap);
+		resv_ptr->node_cnt = node_cnt;
+		return SLURM_SUCCESS;
+	}
+
+	/* Must increase node count. Make this look like new request so 
+	 * we can use _select_nodes() for selecting the nodes */
+	memset(&resv_desc, 0, sizeof(resv_desc_msg_t));
+	resv_desc.start_time = resv_ptr->start_time;
+	resv_desc.end_time   = resv_ptr->end_time;
+	resv_desc.features   = resv_ptr->features;
+	resv_desc.node_cnt   = 0 - delta_node_cnt;
+	i = _select_nodes(&resv_desc, &resv_ptr->part_ptr, &tmp1_bitmap);
+	xfree(resv_desc.node_list);
+	xfree(resv_desc.partition);
+	if (i == SLURM_SUCCESS) {
+		bit_or(resv_ptr->node_bitmap, tmp1_bitmap);
+		bit_free(tmp1_bitmap);
+		xfree(resv_ptr->node_list);
+		resv_ptr->node_list = bitmap2node_name(resv_ptr->node_bitmap);
+		resv_ptr->node_cnt = node_cnt;
+	}
+	return i;
+}
+
+/* Given a reservation create request, select appropriate nodes for use */
+static int  _select_nodes(resv_desc_msg_t *resv_desc_ptr, 
+			  struct part_record **part_ptr,
+			  bitstr_t **resv_bitmap)
+{
+	slurmctld_resv_t *resv_ptr;
+	bitstr_t *node_bitmap;
+	struct node_record *node_ptr;
+	ListIterator iter;
+	int i, j;
+
+	if (*part_ptr == NULL) {
+		*part_ptr = default_part_loc;
+		if (*part_ptr == NULL)
+			return ESLURM_DEFAULT_PARTITION_NOT_SET;
+		xfree(resv_desc_ptr->partition);	/* should be no-op */
+		resv_desc_ptr->partition = xstrdup((*part_ptr)->name);
+	}
+
+	/* Start with all nodes in the partition */
+	node_bitmap = bit_copy((*part_ptr)->node_bitmap);
+
+	/* Don't use node already reserved */
+	if ((resv_desc_ptr->flags & RESERVE_FLAG_MAINT) == 0) {
+		iter = list_iterator_create(resv_list);
+		if (!iter)
+			fatal("malloc: list_iterator_create");
+		while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+			if ((resv_ptr->node_bitmap == NULL) ||
+			    (resv_ptr->start_time >= resv_desc_ptr->end_time) ||
+			    (resv_ptr->end_time   <= resv_desc_ptr->start_time))
+				continue;
+			bit_not(resv_ptr->node_bitmap);
+			bit_and(node_bitmap, resv_ptr->node_bitmap);
+			bit_not(resv_ptr->node_bitmap);
+		}
+		list_iterator_destroy(iter);
+	}
+
+	/* Satisfy feature specification */
+	if (resv_desc_ptr->features) {
+		/* FIXME: Just support a single feature name for now */
+		node_ptr = node_record_table_ptr;
+		for (i=0; i<node_record_count; i++, node_ptr++) {
+			if (!bit_test(node_bitmap, i))
+				continue;
+			if (!node_ptr->config_ptr->feature_array) {
+				bit_clear(node_bitmap, i);
+				continue;
+			}
+			for (j=0; node_ptr->config_ptr->feature_array[j]; j++){
+				if (!strcmp(resv_desc_ptr->features,
+					    node_ptr->config_ptr->
+					    feature_array[j]))
+					break;
+			}
+			if (!node_ptr->config_ptr->feature_array[j]) {
+				bit_clear(node_bitmap, i);
+				continue;
+			}
+		}
+	}
+
+	if ((resv_desc_ptr->flags & RESERVE_FLAG_MAINT) == 0) {
+		/* Nodes must be available */
+		bit_and(node_bitmap, avail_node_bitmap);
+	}
+	*resv_bitmap = NULL;
+	if (bit_set_count(node_bitmap) < resv_desc_ptr->node_cnt)
+		verbose("reservation requests more nodes than are available");
+	else if ((i = bit_overlap(node_bitmap, idle_node_bitmap)) >=
+		 resv_desc_ptr->node_cnt) {	/* Reserve idle nodes */
+		bit_and(node_bitmap, idle_node_bitmap);
+		*resv_bitmap = bit_pick_cnt(node_bitmap, 
+					    resv_desc_ptr->node_cnt);
+	} else {
+		/* Reserve nodes that are or will be idle.
+		 * This algorithm is slower than above logic that just 
+		 * selects from the idle nodes. */
+		*resv_bitmap = _pick_idle_nodes(node_bitmap, resv_desc_ptr);
+	}
+
+	bit_free(node_bitmap);
+	if (*resv_bitmap == NULL)
+		return ESLURM_TOO_MANY_REQUESTED_NODES;
+	resv_desc_ptr->node_list = bitmap2node_name(*resv_bitmap);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Select nodes for a reservation to use
+ * IN,OUT avail_nodes - nodes to choose from with proper features, partition
+ *                      destructively modified by this function
+ * IN resv_desc_ptr - reservation request
+ * RET bitmap of selected nodes or NULL if request can not be satisfied
+ */
+static bitstr_t *_pick_idle_nodes(bitstr_t *avail_nodes, 
+				  resv_desc_msg_t *resv_desc_ptr)
+{
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if ((job_ptr->job_state != JOB_RUNNING) &&
+		    (job_ptr->job_state != JOB_SUSPENDED))
+			continue;
+		if (job_ptr->end_time < resv_desc_ptr->start_time)
+			continue;
+		bit_not(job_ptr->node_bitmap);
+		bit_and(avail_nodes, job_ptr->node_bitmap);
+		bit_not(job_ptr->node_bitmap);
+	}
+	list_iterator_destroy(job_iterator);
+
+	return bit_pick_cnt(avail_nodes, resv_desc_ptr->node_cnt);
+}
+
+/* Determine if a job has access to a reservation
+ * RET SLURM_SUCCESS if true, ESLURM_RESERVATION_ACCESS otherwise */
+static int _valid_job_access_resv(struct job_record *job_ptr,
+				  slurmctld_resv_t *resv_ptr)
+{
+	int i;
+
+	/* Determine if we have access */
+	if (/*association_enforced*/ 0) {
+		/* FIXME: add association checks
+		if (job_ptr->assoc_id in reservation association list)
+			return SLURM_SUCCESS;
+		*/
+	} else {
+		for (i=0; i<resv_ptr->user_cnt; i++) {
+			if (job_ptr->user_id == resv_ptr->user_list[i])
+				return SLURM_SUCCESS;
+		}
+		for (i=0; (i<resv_ptr->account_cnt) && job_ptr->account; i++) {
+			if (resv_ptr->account_list[i] &&
+			    (strcmp(job_ptr->account, 
+				    resv_ptr->account_list[i]) == 0)) {
+				return SLURM_SUCCESS;
+			}
+		}
+	}
+	info("Security violation, uid=%u attempt to use reservation %s",
+	     job_ptr->user_id, resv_ptr->name);
+	return ESLURM_RESERVATION_ACCESS;
+}
+
+/*
+ * Determine if a job can start now based only upon reservations
+ * IN job_ptr      - job to test
+ * RET	SLURM_SUCCESS if runable now, otherwise an error code
+ */
+extern int job_test_resv_now(struct job_record *job_ptr)
+{
+	slurmctld_resv_t * resv_ptr;
+	time_t now;
+
+	if (job_ptr->resv_name == NULL)
+		return SLURM_SUCCESS;
+
+	resv_ptr = (slurmctld_resv_t *) list_find_first (resv_list, 
+			_find_resv_name, job_ptr->resv_name);
+	job_ptr->resv_ptr = resv_ptr;
+	if (!resv_ptr)
+		return ESLURM_RESERVATION_INVALID;
+
+	if (_valid_job_access_resv(job_ptr, resv_ptr) != SLURM_SUCCESS)
+		return ESLURM_RESERVATION_ACCESS;
+	now = time(NULL);
+	if (now < resv_ptr->start_time) {
+		/* reservation starts later */
+		return ESLURM_INVALID_TIME_VALUE;
+	}
+	if (now > resv_ptr->end_time) {
+		/* reservation ended earlier */
+		return ESLURM_RESERVATION_INVALID;
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Determine which nodes a job can use based upon reservations
+ * IN job_ptr      - job to test
+ * IN/OUT when     - when we want the job to start (IN)
+ *                   when the reservation is available (OUT)
+ * IN move_time    - if true, then permit the start time to advance from
+ *                   "when" as needed IF job has no reservervation
+ * OUT node_bitmap - nodes which the job can use, caller must free unless error
+ * RET	SLURM_SUCCESS if runable now
+ *	ESLURM_RESERVATION_ACCESS access to reservation denied
+ *	ESLURM_RESERVATION_INVALID reservation invalid
+ *	ESLURM_INVALID_TIME_VALUE reservation invalid at time "when"
+ *	ESLURM_NODES_BUSY job has no reservation, but required nodes are
+ *			  reserved
+ */
+extern int job_test_resv(struct job_record *job_ptr, time_t *when,
+			 bool move_time, bitstr_t **node_bitmap)
+{
+	slurmctld_resv_t * resv_ptr;
+	time_t job_start_time, job_end_time;
+	uint32_t duration;
+	ListIterator iter;
+	int i, rc = SLURM_SUCCESS;
+
+	*node_bitmap = (bitstr_t *) NULL;
+
+	if (job_ptr->resv_name) {
+		resv_ptr = (slurmctld_resv_t *) list_find_first (resv_list, 
+				_find_resv_name, job_ptr->resv_name);
+		job_ptr->resv_ptr = resv_ptr;
+		if (!resv_ptr)
+			return ESLURM_RESERVATION_INVALID;
+		if (_valid_job_access_resv(job_ptr, resv_ptr) != SLURM_SUCCESS)
+			return ESLURM_RESERVATION_ACCESS;
+		if (*when < resv_ptr->start_time) {
+			/* reservation starts later */
+			*when = resv_ptr->start_time;
+			return ESLURM_INVALID_TIME_VALUE;
+		}
+		if (*when > resv_ptr->end_time) {
+			/* reservation ended earlier */
+			*when = resv_ptr->end_time;
+			job_ptr->priority = 0;	/* administrative hold */
+			return ESLURM_RESERVATION_INVALID;
+		}
+		*node_bitmap = bit_copy(resv_ptr->node_bitmap);
+		return SLURM_SUCCESS;
+	}
+
+	job_ptr->resv_ptr = NULL;	/* should be redundant */
+	*node_bitmap = bit_alloc(node_record_count);
+	bit_nset(*node_bitmap, 0, (node_record_count - 1));
+	if (list_count(resv_list) == 0)
+		return SLURM_SUCCESS;
+
+	/* Job has no reservation, try to find time when this can
+	 * run and get it's required nodes (if any) */
+	if (job_ptr->time_limit == INFINITE)
+		duration = 365 * 24 * 60 * 60;
+	else if (job_ptr->time_limit != NO_VAL)
+		duration = (job_ptr->time_limit * 60);
+	else {	/* partition time limit */
+		if (job_ptr->part_ptr->max_time == INFINITE)
+			duration = 365 * 24 * 60 * 60;
+		else
+			duration = (job_ptr->part_ptr->max_time * 60);
+	}
+	for (i=0; ; i++) {
+		job_start_time = job_end_time = *when;
+		job_end_time += duration;
+
+		iter = list_iterator_create(resv_list);
+		if (!iter)
+			fatal("malloc: list_iterator_create");
+		while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+			if ((resv_ptr->node_bitmap == NULL) ||
+			    (resv_ptr->start_time >= job_end_time) ||
+			    (resv_ptr->end_time   <= job_start_time))
+				continue;
+			if (job_ptr->details->req_node_bitmap &&
+			    bit_overlap(job_ptr->details->req_node_bitmap,
+					resv_ptr->node_bitmap)) {
+				*when = resv_ptr->end_time;
+				rc = ESLURM_NODES_BUSY;
+				break;
+			}
+			bit_not(resv_ptr->node_bitmap);
+			bit_and(*node_bitmap, resv_ptr->node_bitmap);
+			bit_not(resv_ptr->node_bitmap);
+		}
+		list_iterator_destroy(iter);
+
+		if (rc == SLURM_SUCCESS)
+			break;
+		/* rc == ESLURM_NODES_BUSY here from above break */
+		if (move_time && (i<10)) {  /* Retry for later start time */
+			bit_nset(*node_bitmap, 0, (node_record_count - 1));
+			rc = SLURM_SUCCESS;
+			continue;
+		}
+		FREE_NULL_BITMAP(*node_bitmap);
+		break;	/* Give up */
+	}
+
+	return rc;
+}
+
+/* Begin scan of all jobs for valid reservations */
+extern void begin_job_resv_check(void)
+{
+	ListIterator iter;
+	slurmctld_resv_t *resv_ptr;
+	slurm_ctl_conf_t *conf;
+
+	if (!resv_list)
+		return;
+
+	conf = slurm_conf_lock();
+	resv_over_run = conf->resv_over_run;
+	slurm_conf_unlock();
+	if (resv_over_run == (uint16_t) INFINITE)
+		resv_over_run = 365 * 24 * 60 * 60;
+	else
+		resv_over_run *= 60;
+
+	iter = list_iterator_create(resv_list);
+	if (!iter)
+		fatal("malloc: list_iterator_create");
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+		resv_ptr->job_pend_cnt = 0;
+		resv_ptr->job_run_cnt  = 0;
+	}
+	list_iterator_destroy(iter);
+}
+
+/* Test a particular job for valid reservation
+ * RET ESLURM_INVALID_TIME_VALUE if reservation is terminated
+ *     SLURM_SUCCESS if reservation is still valid */
+extern int job_resv_check(struct job_record *job_ptr)
+{
+	bool run_flag = false;
+
+	if (!job_ptr->resv_name)
+		return SLURM_SUCCESS;
+
+	if ((job_ptr->job_state == JOB_RUNNING) ||
+	    (job_ptr->job_state == JOB_SUSPENDED))
+		run_flag = true;
+	else if (job_ptr->job_state == JOB_PENDING)
+		run_flag = false;
+	else
+		return SLURM_SUCCESS;
+
+	xassert(job_ptr->resv_ptr->magic == RESV_MAGIC);
+	if (run_flag)
+		job_ptr->resv_ptr->job_run_cnt++;
+	else
+		job_ptr->resv_ptr->job_pend_cnt++;
+
+	if (job_ptr->resv_ptr->end_time < (time(NULL) + resv_over_run))
+		return ESLURM_INVALID_TIME_VALUE;
+	return SLURM_SUCCESS;
+}
+
+/* Finish scan of all jobs for valid reservations */
+extern void fini_job_resv_check(void)
+{
+	ListIterator iter;
+	slurmctld_resv_t *resv_ptr;
+	time_t now = time(NULL);
+
+	if (!resv_list)
+		return;
+
+	iter = list_iterator_create(resv_list);
+	if (!iter)
+		fatal("malloc: list_iterator_create");
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+		if (resv_ptr->end_time > now) { /* reservation not over */
+			_validate_node_choice(resv_ptr);
+			continue;
+		}
+
+		if ((resv_ptr->job_run_cnt  == 0) &&
+		    (resv_ptr->flags & RESERVE_FLAG_DAILY)) {
+			verbose("Advance reservation %s one day",
+			resv_ptr->name);
+			resv_ptr->start_time += 24 * 60 * 60;
+			resv_ptr->start_time_prev = resv_ptr->start_time;
+			resv_ptr->start_time_first = resv_ptr->start_time;
+			resv_ptr->end_time   += 24 * 60 * 60;
+			_post_resv_create(resv_ptr);
+			last_resv_update = now;
+			schedule_resv_save();
+			continue;
+		}
+		if ((resv_ptr->job_run_cnt  == 0) &&
+		    (resv_ptr->flags & RESERVE_FLAG_WEEKLY)) {
+			verbose("Advance reservation %s one week",
+				resv_ptr->name);
+			resv_ptr->start_time += 7 * 24 * 60 * 60;
+			resv_ptr->start_time_prev = resv_ptr->start_time;
+			resv_ptr->start_time_first = resv_ptr->start_time;
+			resv_ptr->end_time   += 7 * 24 * 60 * 60;
+			_post_resv_create(resv_ptr);
+			last_resv_update = now;
+			schedule_resv_save();
+			continue;
+		}
+		if ((resv_ptr->job_pend_cnt   == 0) &&
+		    (resv_ptr->job_run_cnt    == 0) &&
+		    (resv_ptr->maint_set_node == 0) &&
+		    ((resv_ptr->flags & RESERVE_FLAG_DAILY ) == 0) &&
+		    ((resv_ptr->flags & RESERVE_FLAG_WEEKLY) == 0)) {
+			debug("Purging vestigial reservation record %s",
+			      resv_ptr->name);
+			_clear_job_resv(resv_ptr);
+			list_delete_item(iter);
+			last_resv_update = now;
+			schedule_resv_save();
+		}
+
+	}
+	list_iterator_destroy(iter);
+}
+
+/* send all reservations to accounting.  Only needed at
+ * first registration
+ */
+extern int send_resvs_to_accounting()
+{
+	ListIterator itr = NULL;
+	slurmctld_resv_t *resv_ptr;
+
+	if(!resv_list)
+		return SLURM_SUCCESS;
+	
+	itr = list_iterator_create(resv_list);
+	while ((resv_ptr = list_next(itr))) {
+		_post_resv_create(resv_ptr);
+	}
+	list_iterator_destroy(itr);
+
+	return SLURM_SUCCESS;
+}
+
+
+/* Set or clear NODE_STATE_MAINT for node_state as needed */
+extern void set_node_maint_mode(void)
+{
+	ListIterator iter;
+	slurmctld_resv_t *resv_ptr;
+	time_t now = time(NULL);
+
+	if (!resv_list)
+		return;
+
+	iter = list_iterator_create(resv_list);
+	if (!iter)
+		fatal("malloc: list_iterator_create");
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+		if ((resv_ptr->flags & RESERVE_FLAG_MAINT) == 0)
+			continue;
+		if ((now >= resv_ptr->start_time) &&
+		    (now <  resv_ptr->end_time  )) {
+			if (!resv_ptr->maint_set_node) {
+				resv_ptr->maint_set_node = true;
+				_set_nodes_maint(resv_ptr, now);
+				last_node_update = now;
+			}
+		} else if (resv_ptr->maint_set_node) {
+			resv_ptr->maint_set_node = false;
+			_set_nodes_maint(resv_ptr, now);
+			last_node_update = now;
+		}
+	}
+	list_iterator_destroy(iter);
+}
+
+static void _set_nodes_maint(slurmctld_resv_t *resv_ptr, time_t now)
+{
+	int i, i_first, i_last;
+	struct node_record *node_ptr;
+
+	if (!resv_ptr->node_bitmap) {
+		error("reservation %s lacks a bitmap", resv_ptr->name);
+		return;
+	}
+
+	i_first = bit_ffs(resv_ptr->node_bitmap);
+	i_last  = bit_fls(resv_ptr->node_bitmap);
+	for (i=i_first; i<=i_last; i++) {
+		if (!bit_test(resv_ptr->node_bitmap, i))
+			continue;
+
+		node_ptr = node_record_table_ptr + i;
+		if (resv_ptr->maint_set_node)
+			node_ptr->node_state |= NODE_STATE_MAINT;
+		else
+			node_ptr->node_state &= (~NODE_STATE_MAINT);
+		/* mark that this node is now down and in maint mode
+		   or was removed from maint mode 
+		*/
+		if(((node_ptr->node_state & NODE_STATE_BASE) 
+		    == NODE_STATE_DOWN) ||
+		   (node_ptr->node_state & NODE_STATE_DRAIN) ||
+		   (node_ptr->node_state & NODE_STATE_FAIL)) {
+			clusteracct_storage_g_node_down(
+				acct_db_conn, 
+				slurmctld_cluster_name,
+				node_ptr, now, NULL);
+		}
+	}
+}
diff --git a/src/slurmctld/reservation.h b/src/slurmctld/reservation.h
new file mode 100644
index 0000000000000000000000000000000000000000..d05ee8ee08079aa034776f862feb804753b2bbc3
--- /dev/null
+++ b/src/slurmctld/reservation.h
@@ -0,0 +1,131 @@
+/*****************************************************************************\
+ *  reservation.h - resource reservation management
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov> et. al.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _RESERVATION_H
+#define _RESERVATION_H
+
+#include <time.h>
+#include <unistd.h>
+#include <slurm/slurm.h>
+#include "src/common/bitstring.h"
+#include "src/slurmctld/slurmctld.h"
+
+extern time_t last_resv_update;
+
+/* Create a resource reservation */
+extern int create_resv(resv_desc_msg_t *resv_desc_ptr);
+
+/* Update an existing resource reservation */
+extern int update_resv(resv_desc_msg_t *resv_desc_ptr);
+
+/* Delete an existing resource reservation */
+extern int delete_resv(reservation_name_msg_t *resv_desc_ptr);
+
+/* Dump the reservation records to a buffer */
+extern void show_resv(char **buffer_ptr, int *buffer_size, uid_t uid);
+
+/* Save the state of all reservations to file */
+extern int dump_all_resv_state(void);
+
+/* Purge all reservation data structures */
+extern void resv_fini(void);
+
+/* send all reservations to accounting.  Only needed at
+ * first registration
+ */
+extern int send_resvs_to_accounting();
+
+/* Set or clear NODE_STATE_MAINT for node_state as needed */
+extern void set_node_maint_mode(void);
+
+/*
+ * Load the reservation state from file, recover on slurmctld restart. 
+ *	Reset reservation pointers for all jobs.
+ *	Execute this after loading the configuration file data.
+ * IN recover - 0 = validate current reservations ONLY if already recovered, 
+ *                  otherwise recover from disk
+ *              1+ = recover all reservation state from disk
+ * RET SLURM_SUCCESS or error code
+ * NOTE: READ lock_slurmctld config before entry
+ */
+extern int load_all_resv_state(int recover);
+
+/*
+ * Determine if a job request can use the specified reservations
+ * IN/OUT job_ptr - job to validate, set its resv_id and resv_type
+ * RET SLURM_SUCCESS or error code (not found or access denied)
+*/
+extern int validate_job_resv(struct job_record *job_ptr);
+
+/*
+ * Determine which nodes a job can use based upon reservations
+ * IN job_ptr      - job to test
+ * IN/OUT when     - when we want the job to start (IN)
+ *                   when the reservation is available (OUT)
+ * IN move_time    - if true, then permit the start time to advance from
+ *                   "when" as needed IF job has no reservervation
+ * OUT node_bitmap - nodes which the job can use, caller must free
+ * RET	SLURM_SUCCESS if runable now
+ *	ESLURM_RESERVATION_ACCESS access to reservation denied
+ *	ESLURM_RESERVATION_INVALID reservation invalid
+ *	ESLURM_INVALID_TIME_VALUE reservation invalid at time "when"
+ *	ESLURM_NODES_BUSY job has no reservation, but required nodes are
+ *			  reserved
+ */
+extern int job_test_resv(struct job_record *job_ptr, time_t *when,
+			 bool move_time, bitstr_t **node_bitmap);
+
+/*
+ * Determine if a job can start now based only upon reservations
+ * IN job_ptr      - job to test
+ * RET	SLURM_SUCCESS if runable now, otherwise an error code
+ */
+extern int job_test_resv_now(struct job_record *job_ptr);
+
+/* Begin scan of all jobs for valid reservations */
+extern void begin_job_resv_check(void);
+
+/* Test a particular job for valid reservation
+ * RET ESLURM_INVALID_TIME_VALUE if reservation is terminated
+ *     SLURM_SUCCESS if reservation is still valid */
+extern int job_resv_check(struct job_record *job_ptr);
+
+/* Finish scan of all jobs for valid reservations */
+extern void fini_job_resv_check(void);
+
+#endif /* !_RESERVATION_H */
diff --git a/src/slurmctld/sched_plugin.c b/src/slurmctld/sched_plugin.c
index 4f86d83aba6ce38d16c11b912d288dc6577d50a9..722beb123c38c3b7d174c5260b409ffcc0d6fccf 100644
--- a/src/slurmctld/sched_plugin.c
+++ b/src/slurmctld/sched_plugin.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/sched_plugin.h b/src/slurmctld/sched_plugin.h
index de16bb45133c5ed7edc8db0108155311412b28fb..69f368e485f2a0c28700d579636a683c0fcc70a5 100644
--- a/src/slurmctld/sched_plugin.h
+++ b/src/slurmctld/sched_plugin.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Jay Windley <jwindley@lnxi.com>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index bb1cd87b264eb8033e3854542fc18e5a1c986a00..8c8aa2d2bdd0dadc2a123fdef2049349fae8a3a7 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -2,13 +2,14 @@
  *  slurmctld.h - definitions of functions and structures for slurmcltd use
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -70,13 +71,14 @@
 #include "src/common/log.h"
 #include "src/common/macros.h"
 #include "src/common/pack.h"
+#include "src/common/read_config.h" /* location of slurmctld_conf */
+#include "src/common/select_job_res.h"
 #include "src/common/slurm_cred.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/switch.h"
 #include "src/common/timers.h"
 #include "src/common/xmalloc.h"
-#include "src/common/read_config.h" /* location of slurmctld_conf */
 
 #define FREE_NULL_BITMAP(_X)		\
 	do {				\
@@ -85,6 +87,8 @@
 	} while (0)
 #define IS_JOB_FINISHED(_X)		\
 	((_X->job_state & (~JOB_COMPLETING)) >  JOB_SUSPENDED)
+#define IS_JOB_COMPLETED(_X)		\
+	(IS_JOB_FINISHED(_X) && ((_X->job_state & JOB_COMPLETING) == 0))
 #define IS_JOB_PENDING(_X)		\
 	((_X->job_state & (~JOB_COMPLETING)) == JOB_PENDING)
 
@@ -162,11 +166,12 @@ typedef struct slurmctld_config {
 } slurmctld_config_t;
 
 extern slurmctld_config_t slurmctld_config;
-extern int bg_recover;		/* state recovery mode */
-extern char *slurmctld_cluster_name; /* name of cluster */
+extern int   bg_recover;		/* state recovery mode */
+extern char *slurmctld_cluster_name;	/* name of cluster */
 extern void *acct_db_conn;
-extern int accounting_enforce;
-extern int association_based_accounting;
+extern int   accounting_enforce;
+extern int   association_based_accounting;
+extern int   cluster_procs;
 
 /*****************************************************************************\
  *  NODE parameters and data structures
@@ -226,7 +231,12 @@ struct node_record {
 	char *arch;			/* computer architecture */
 	char *os;			/* operating system currently running */
 	struct node_record *node_next;	/* next entry with same hash index */
-	uint32_t hilbert_integer;	/* Hilbert number based on node name */
+	uint32_t hilbert_integer;	/* Hilbert number based on node name,
+					 * no need to save/restore */
+#ifdef APBASIL_LOC
+	uint32_t basil_node_id;		/* Cray/BASIL node ID,
+					 * no need to save/restore */
+#endif	/* APBASIL_LOC */
 };
 
 extern struct node_record *node_record_table_ptr;  /* ptr to node records */
@@ -243,6 +253,7 @@ extern bool ping_nodes_now;		/* if set, ping nodes immediately */
  *  avail_node_bitmap       Set if node's state is not DOWN, DRAINING/DRAINED, 
  *                          FAILING or NO_RESPOND (i.e. available to run a job)
  *  idle_node_bitmap        Set if node has no jobs allocated to it
+ *  power_node_bitmap       Set for nodes which are powered down
  *  share_node_bitmap       Set if any job allocated resources on that node
  *                          is configured to not share the node with other 
  *                          jobs (--exclusive option specified by job or
@@ -252,9 +263,26 @@ extern bool ping_nodes_now;		/* if set, ping nodes immediately */
 extern bitstr_t *avail_node_bitmap;	/* bitmap of available nodes, 
 					 * state not DOWN, DRAIN or FAILING */
 extern bitstr_t *idle_node_bitmap;	/* bitmap of idle nodes */
+extern bitstr_t *power_node_bitmap;	/* Powered down nodes */
 extern bitstr_t *share_node_bitmap;	/* bitmap of sharable nodes */
 extern bitstr_t *up_node_bitmap;	/* bitmap of up nodes, not DOWN */
 
+/*****************************************************************************\
+ *  SWITCH topology data structures
+\*****************************************************************************/
+struct switch_record {
+	int level;			/* level in hierarchy, leaf=0 */
+	uint32_t link_speed;		/* link speed, arbitrary units */
+	char *name;			/* switch name */
+	bitstr_t *node_bitmap;		/* bitmap of all nodes descended from 
+					 * this switch */
+	char *nodes;			/* name if direct descendent nodes */
+	char *switches;			/* name if direct descendent switches */
+};
+
+extern struct switch_record *switch_record_table;  /* ptr to switch records */
+extern int switch_record_cnt;		/* size of switch_record_table */
+
 /*****************************************************************************\
  *  PARTITION parameters and data structures
 \*****************************************************************************/
@@ -266,6 +294,7 @@ struct part_record {
 	char *name;		/* name of the partition */
 	uint16_t hidden;	/* 1 if hidden by default */
 	uint32_t max_time;	/* minutes or INFINITE */
+	uint32_t default_time;	/* minutes, NO_VAL or INFINITE */
 	uint32_t max_nodes;	/* per job or INFINITE */
 	uint32_t max_nodes_orig;/* unscaled value (c-nodes on BlueGene) */
 	uint32_t min_nodes;	/* per job */
@@ -277,11 +306,17 @@ struct part_record {
 	uint16_t root_only;	/* 1 if allocate/submit RPC can only be 
 				   issued by user root */
 	uint16_t max_share;	/* number of jobs to gang schedule */
+	double   norm_priority;	/* normalized scheduling priority for
+				 * jobs (DON'T PACK) */
 	uint16_t priority;	/* scheduling priority for jobs */
 	uint16_t state_up;	/* 1 if state is up, 0 if down */
 	char *nodes;		/* comma delimited list names of nodes */
 	char *allow_groups;	/* comma delimited list of groups, 
 				 * NULL indicates all */
+	char *allow_alloc_nodes;/* comma delimited list of allowed 
+				 * allocating nodes
+				 * NULL indicates all */
+
 	uid_t *allow_uids;	/* zero terminated list of allowed users */
 	bitstr_t *node_bitmap;	/* bitmap of nodes in partition */
 };
@@ -291,20 +326,49 @@ extern time_t last_part_update;		/* time of last part_list update */
 extern struct part_record default_part;	/* default configuration values */
 extern char *default_part_name;		/* name of default partition */
 extern struct part_record *default_part_loc;	/* default partition ptr */
+extern uint16_t part_max_priority;      /* max priority in all partitions */
+
+/*****************************************************************************\
+ *  RESERVATION parameters and data structures
+\*****************************************************************************/
+
+typedef struct slurmctld_resv {
+	char *accounts;		/* names of accounts permitted to use	*/
+	int account_cnt;	/* count of accounts permitted to use	*/
+	char **account_list;	/* list of accounts permitted to use	*/
+	char *assoc_list;	/* list of associations			*/
+	uint32_t cpu_cnt;	/* number of reserved CPUs		*/
+	uint32_t duration;	/* time in seconds for this
+				 * reservation to last                  */
+	time_t end_time;	/* end time of reservation		*/
+	char *features;		/* required node features		*/
+	uint16_t flags;		/* see RESERVE_FLAG_* in slurm.h	*/
+	uint32_t job_pend_cnt;	/* number of pending jobs		*/
+	uint32_t job_run_cnt;	/* number of running jobs		*/
+	uint16_t magic;		/* magic cookie, RESV_MAGIC		*/
+	bool maint_set_node;	/* NODE_STATE_MAINT set for nodes	*/
+	char *name;		/* name of reservation			*/
+	bitstr_t *node_bitmap;	/* bitmap of reserved nodes		*/
+	uint32_t node_cnt;	/* count of nodes required		*/
+	char *node_list;	/* list of reserved nodes or ALL	*/
+	char *partition;	/* name of partition to be used		*/
+	struct part_record *part_ptr;	/* pointer to partition used	*/
+	uint32_t resv_id;	/* unique reservation ID, internal use	*/
+	time_t start_time;	/* start time of reservation		*/
+	time_t start_time_first;/* when the reservation first started	*/
+	time_t start_time_prev;	/* If start time was changed this is
+				 * the pervious start time.  Needed
+				 * for accounting */
+	char *users;		/* names of users permitted to use	*/
+	int user_cnt;		/* count of users permitted to use	*/
+	uid_t *user_list;	/* array of users permitted to use	*/
+} slurmctld_resv_t;
 
 /*****************************************************************************\
  *  JOB parameters and data structures
 \*****************************************************************************/
 extern time_t last_job_update;	/* time of last update to part records */
 
-/* Used for Moab
- * These QOS values only apply to LLNL's configuration
- * Other values may apply at other sites,
- * These may be mapped to partition priorities in the future */
-#define QOS_EXPEDITE	300
-#define QOS_NORMAL 	200
-#define	QOS_STANDBY	100
-
 #define DETAILS_MAGIC 0xdea84e7
 #define JOB_MAGIC 0xf0b7392c
 #define STEP_MAGIC 0xce593bc1
@@ -318,57 +382,71 @@ struct feature_record {
 	char *name;			/* name of feature */
 	uint16_t count;			/* count of nodes with this feature */
 	uint8_t op_code;		/* separator, see FEATURE_OP_ above */
+	uint16_t tmp_cnt;		/* temporary, allocated node counter */
 };
 
 /* job_details - specification of a job's constraints, 
  * can be purged after initiation */
 struct job_details {
+	uint16_t acctg_freq;		/* accounting polling interval */
+	uint16_t argc;			/* count of argv elements */
+	char **argv;			/* arguments for a batch job script */
+	time_t begin_time;		/* start at this time (srun --begin), 
+					 * resets to time first eligible
+					 * (all dependencies satisfied) */
+	char *ckpt_dir;		        /* directory to store checkpoint images */
+	uint16_t contiguous;		/* set if requires contiguous nodes */
+	char *cpu_bind;			/* binding map for map/mask_cpu */
+	uint16_t cpu_bind_type;		/* see cpu_bind_type_t */
+	uint16_t cpus_per_task;		/* number of processors required for 
+					 * each task */
+	List depend_list;		/* list of job_ptr:state pairs */
+	char *dependency;		/* wait for other jobs */
+	char *err;			/* pathname of job's stderr file */
+	bitstr_t *exc_node_bitmap;	/* bitmap of excluded nodes */
+	char *exc_nodes;		/* excluded nodes */
+	List feature_list;		/* required features with node counts */
+	char *features;			/* required features */
+	char *in;			/* pathname of job's stdin file */
+	/* job constraints: */
+	uint32_t job_min_procs;		/* minimum processors per node */
+	uint32_t job_min_memory;	/* minimum memory per node (MB) OR
+					 * memory per allocated 
+					 * CPU | MEM_PER_CPU */
+	uint32_t job_min_tmp_disk;	/* minimum tempdisk per node, MB */
 	uint32_t magic;			/* magic cookie for data integrity */
-	uint32_t min_nodes;		/* minimum number of nodes */
 	uint32_t max_nodes;		/* maximum number of nodes */
-	char *req_nodes;		/* required nodes */
-	char *exc_nodes;		/* excluded nodes */
+	multi_core_data_t *mc_ptr;	/* multi-core specific data */
+	char *mem_bind;			/* binding map for map/mask_cpu */
+	uint16_t mem_bind_type;		/* see mem_bind_type_t */
+	uint32_t min_nodes;		/* minimum number of nodes */
+	uint16_t nice;		        /* requested priority change, 
+					 * NICE_OFFSET == no change */
+	uint16_t ntasks_per_node;	/* number of tasks on each node */
+	uint32_t num_tasks;		/* number of tasks to start */
+	uint8_t open_mode;		/* stdout/err append or trunctate */
+	char *out;			/* pathname of job's stdout file */
+	uint8_t overcommit;		/* processors being over subscribed */
+	uint16_t plane_size;		/* plane size when task_dist =
+					 * SLURM_DIST_PLANE */
+	uint8_t prolog_running;		/* set while prolog_slurmctld is 
+					 * running */
+	uint32_t reserved_resources;	/* CPU minutes of resources reserved
+					 * for this job while it was pending */
 	bitstr_t *req_node_bitmap;	/* bitmap of required nodes */
 	uint16_t *req_node_layout;	/* task layout for required nodes */
-	bitstr_t *exc_node_bitmap;	/* bitmap of excluded nodes */
-	char *features;			/* required features */
-	List feature_list;		/* required features with node counts */
+	char *req_nodes;		/* required nodes */
+	uint16_t requeue;		/* controls ability requeue job */
+	char *restart_dir;	        /* restart execution from ckpt images in this dir */
 	uint16_t shared;		/* 1 if job can share nodes,
 					 * 0 if job cannot share nodes,
 					 * any other value accepts the default
 					 * sharing policy. */
-	uint16_t contiguous;		/* set if requires contiguous nodes */
+	time_t submit_time;		/* time of submission */
 	uint16_t task_dist;		/* task layout for this job. Only
 					 * useful when Consumable Resources
                                          * is enabled */
-	uint32_t num_tasks;		/* number of tasks to start */
-	uint8_t open_mode;		/* stdout/err append or trunctate */
-	uint8_t overcommit;		/* processors being over subscribed */
-	uint16_t acctg_freq;		/* accounting polling interval */
-	uint16_t cpus_per_task;		/* number of processors required for 
-					 * each task */
-	uint16_t ntasks_per_node;	/* number of tasks on each node */
-	/* job constraints: */
-	uint32_t job_min_procs;		/* minimum processors per node */
-	uint32_t job_min_memory;	/* minimum memory per node (MB) OR
-					 * memory per allocated CPU | MEM_PER_CPU */
-	uint32_t job_min_tmp_disk;	/* minimum tempdisk per node, MB */
-	char *err;			/* pathname of job's stderr file */
-	char *in;			/* pathname of job's stdin file */
-	char *out;			/* pathname of job's stdout file */
-	time_t submit_time;		/* time of submission */
-	time_t begin_time;		/* start at this time (srun --being), 
-					 * resets to time first eligible
-					 * (all dependencies satisfied) */
-	uint32_t reserved_resources;	/* CPU minutes of resources reserved
-					 * for this job while it was pending */
 	char *work_dir;			/* pathname of working directory */
-	char **argv;			/* arguments for a batch job script */
-	uint16_t argc;			/* count of argv elements */
-	uint16_t requeue;		/* controls ability requeue job */
-	multi_core_data_t *mc_ptr;	/* multi-core specific data */
-	char *dependency;		/* wait for other jobs */
-	List depend_list;		/* list of job_ptr:state pairs */
 };
 
 struct job_record {
@@ -383,8 +461,11 @@ struct job_record {
 					 * value before use */
 	uint16_t batch_flag;		/* 1 or 2 if batch job (with script),
 					 * 2 indicates retry mode (one retry) */
+	check_jobinfo_t check_job;      /* checkpoint context, opaque */
+	uint16_t ckpt_interval;	        /* checkpoint interval in minutes */
+	time_t ckpt_time;	        /* last time job was periodically checkpointed */
 	char *comment;			/* arbitrary comment */
-        uint16_t cr_enabled;            /* specify if if Consumable Resources
+	uint16_t cr_enabled;            /* specify if if Consumable Resources
                                          * is enabled. Needed since CR deals
                                          * with a finer granularity in its
                                          * node/cpu scheduling (available cpus
@@ -395,6 +476,9 @@ struct job_record {
 	uint32_t db_index;              /* used only for database
 					 * plugins */
 	struct job_details *details;	/* job details */
+	uint16_t direct_set_prio;	/* Priority set directly if
+					 * set the system will not
+					 * change the priority any further. */
 	time_t end_time;		/* time of termination, 
 					 * actual or expected */
 	uint32_t exit_code;		/* exit code for job (status from 
@@ -415,7 +499,7 @@ struct job_record {
 	uint32_t magic;			/* magic cookie for data integrity */
 	char *name;			/* name of the job */
 	char *network;			/* network/switch requirement spec */
-	uint16_t next_step_id;		/* next step id to be used */
+	uint32_t next_step_id;		/* next step id to be used */
 	char *nodes;			/* list of nodes allocated to job */
 	slurm_addr *node_addr;		/* addresses of the nodes allocated to 
 					 * job */
@@ -432,12 +516,27 @@ struct job_record {
 	time_t pre_sus_time;		/* time job ran prior to last suspend */
 	uint32_t priority;		/* relative priority of the job,
 					 * zero == held (don't initiate) */
-	uint16_t qos;			/* quality of service, used only by Moab */
+	double priority_fs;		/* cached value used by sprio command */
+	uint16_t qos;			/* quality of service, 
+					 * used only by Moab */
+	void *qos_ptr;	                /* pointer to the quality of
+					 * service record used for this job, 
+					 * used only by Moab, it is
+					 * void* because of interdependencies
+					 * in the header files, confirm the 
+					 * value before use */
+	uint16_t restart_cnt;		/* count of restarts */
+	uint32_t resv_id;		/* reservation ID */
+	char *resv_name;		/* reservation name */
+	struct slurmctld_resv *resv_ptr;/* reservation structure pointer */
+	uint16_t resv_flags;		/* see RESERVE_FLAG_* in slurm.h */
 	uint32_t requid;            	/* requester user ID */
 	char *resp_host;		/* host for srun communications */
-	select_jobinfo_t select_jobinfo;/* opaque data */
+	select_jobinfo_t select_jobinfo;/* opaque data, BlueGene */
+	select_job_res_t select_job;	/* details of allocated cores */
 	time_t start_time;		/* time execution begins, 
 					 * actual or expected */
+	char *state_desc;		/* optional details for state_reason */
 	uint16_t state_reason;		/* reason job still pending or failed
 					 * see slurm.h:enum job_wait_reason */
 	List step_list;			/* list of job's steps */
@@ -449,21 +548,7 @@ struct job_record {
 	uint32_t total_procs;		/* number of allocated processors, 
 					 * for accounting */
 	uint32_t user_id;		/* user the job runs as */
-
-	/* Per node allocation details */
-	uint16_t num_cpu_groups;	/* record count in cpus_per_node and 
-					 * cpu_count_reps */
-	uint32_t *cpus_per_node;	/* array of cpus per node allocated */
-	uint32_t *cpu_count_reps;	/* array of consecutive nodes with 
-					 * same cpu count */
-
-        uint32_t alloc_lps_cnt;		/* number of hosts in alloc_lps
-					 * or 0 if alloc_lps is not needed
-					 * for the credentials */
-        uint32_t *alloc_lps;		/* number of logical processors
-					 * allocated for this job */
-	uint32_t *used_lps;		/* number of logical processors
-					 * already allocated to job steps */
+	char *wckey;		        /* optional wckey */
 };
 
 /* Job dependency specification, used in "depend_list" within job_record */
@@ -479,37 +564,46 @@ struct	depend_spec {
 };
 
 struct 	step_record {
-	struct job_record* job_ptr; 	/* ptr to the job that owns the step */
-	uint16_t step_id;		/* step number */
-	uint16_t cyclic_alloc;		/* set for cyclic task allocation 
-					   across nodes */
-	time_t start_time;      	/* step allocation time */
-/*	time_t suspend_time;		 * time step last suspended or resumed
-					 * implicitly the same as suspend_time
-					 * in the job record */
-	time_t pre_sus_time;		/* time step ran prior to last suspend */
-	time_t tot_sus_time;		/* total time in suspended state */
-	bitstr_t *step_node_bitmap;	/* bitmap of nodes allocated to job 
-					 * step */
-	uint16_t port;			/* port for srun communications */
-	char *host;			/* host for srun communications */
 	uint16_t batch_step;		/* 1 if batch job step, 0 otherwise */
-	uint16_t mem_per_task;		/* MB memory per task, 0=no limit */
 	uint16_t ckpt_interval;		/* checkpoint interval in minutes */
-	char *ckpt_path;	        /* path to store checkpoint image files */
-	uint16_t exclusive;		/* dedicated resources for the step */
-	time_t ckpt_time;		/* time of last checkpoint */
-	switch_jobinfo_t switch_job;	/* switch context, opaque */
 	check_jobinfo_t check_job;	/* checkpoint context, opaque */
-	char *name;			/* name of job step */
-	char *network;			/* step's network specification */
+	char *ckpt_dir;	                /* path to checkpoint image files */
+	time_t ckpt_time;		/* time of last checkpoint */
+	bitstr_t *core_bitmap_job;	/* bitmap of cores allocated to this
+					 * step relative to job's nodes, 
+					 * see src/common/select_job_res.h */
+	uint32_t cpu_count;		/* count of step's CPUs */
 	uint16_t cpus_per_task;		/* cpus per task initiated */
+	uint16_t cyclic_alloc;		/* set for cyclic task allocation 
+					 * across nodes */
+	uint16_t exclusive;		/* dedicated resources for the step */
 	uint32_t exit_code;		/* highest exit code from any task */
 	bitstr_t *exit_node_bitmap;	/* bitmap of exited nodes */
+	char *host;			/* host for srun communications */
+	struct job_record* job_ptr; 	/* ptr to the job that owns the step */
 	jobacctinfo_t *jobacct;         /* keep track of process info in the 
 					 * step */
+	uint32_t mem_per_task;		/* MB memory per task, 0=no limit */
+	char *name;			/* name of job step */
+	char *network;			/* step's network specification */
+	uint8_t no_kill;		/* 1 if no kill on node failure */
+	uint16_t port;			/* port for srun communications */
+	time_t pre_sus_time;		/* time step ran prior to last suspend */
+	int *resv_port_array;		/* reserved port indexes */
+	uint16_t resv_port_cnt;		/* count of ports reserved per node */
+	char *resv_ports;		/* ports reserved for job */
+	time_t start_time;      	/* step allocation time */
+	uint32_t step_id;		/* step number */
 	slurm_step_layout_t *step_layout;/* info about how tasks are laid out
 					  * in the step */
+	bitstr_t *step_node_bitmap;	/* bitmap of nodes allocated to job 
+					 * step */
+/*	time_t suspend_time;		 * time step last suspended or resumed
+					 * implicitly the same as suspend_time
+					 * in the job record */
+	switch_jobinfo_t switch_job;	/* switch context, opaque */
+	time_t time_last_active;	/* time step was last found on node */
+	time_t tot_sus_time;		/* total time in suspended state */
 };
 
 extern List job_list;			/* list of job_record entries */
@@ -529,8 +623,9 @@ enum select_data_info {
 	SELECT_BITMAP,       /* data-> partially_idle_bitmap (CR support) */
 	SELECT_ALLOC_CPUS,   /* data-> uint16 alloc cpus (CR support) */
 	SELECT_ALLOC_LPS,    /* data-> uint32 alloc lps  (CR support) */
-	SELECT_AVAIL_CPUS,   /* data-> uint16 avail cpus (CR support) */ 
-	SELECT_AVAIL_MEMORY  /* data-> uint32 avail mem  (CR support) */ 
+	SELECT_AVAIL_MEMORY, /* data-> uint32 avail mem  (CR support) */
+	SELECT_STATIC_PART   /* data-> uint16, 1 if static partitioning 
+			      * BlueGene support */
 } ;
 
 /*****************************************************************************\
@@ -615,14 +710,6 @@ extern struct node_record *create_node_record (struct config_record
  */
 extern struct part_record *create_part_record (void);
 
-/* 
- * create_step_record - create an empty step_record for the specified job.
- * IN job_ptr - pointer to job table entry to have step record added
- * RET a pointer to the record or NULL if error
- * NOTE: allocates memory that should be xfreed with delete_step_record
- */
-extern struct step_record * create_step_record (struct job_record *job_ptr);
-
 /* 
  * delete_step_records - delete step record for specified job_ptr
  * IN job_ptr - pointer to job table entry to have step records removed
@@ -697,6 +784,10 @@ extern void dump_job_step_state(struct step_record *step_ptr, Buf buffer);
  */
 extern void dump_step_desc(job_step_create_request_msg_t *step_spec);
 
+/* Remove one node from a job's allocation */
+extern void excise_node_from_job(struct job_record *job_ptr,
+                                 struct node_record *node_ptr);
+
 /* 
  * find_job_record - return a pointer to the job record with the given job_id
  * IN job_id - requested job's id
@@ -724,6 +815,16 @@ extern struct node_record *find_node_record (char *name);
  */
 extern struct part_record *find_part_record (char *name);
 
+/* 
+ * find_step_record - return a pointer to the step record with the given 
+ *	job_id and step_id
+ * IN job_ptr - pointer to job table entry to have step record added
+ * IN step_id - id of the desired job step
+ * RET pointer to the job step's record, NULL on error
+ */
+extern struct step_record * find_step_record(struct job_record *job_ptr, 
+					     uint32_t step_id);
+
 /*
  * get_job_env - return the environment variables and their count for a 
  *	given job
@@ -746,16 +847,6 @@ extern char *get_job_script (struct job_record *job_ptr);
  */
 extern uint32_t get_next_job_id(void);
 
-/* 
- * find_step_record - return a pointer to the step record with the given 
- *	job_id and step_id
- * IN job_ptr - pointer to job table entry to have step record added
- * IN step_id - id of the desired job step
- * RET pointer to the job step's record, NULL on error
- */
-extern struct step_record * find_step_record(struct job_record *job_ptr, 
-					     uint16_t step_id);
-
 /* 
  * init_job_conf - initialize the job configuration tables and values. 
  *	this should be called after creating node information, but 
@@ -844,6 +935,10 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
  */
 extern int job_cancel_by_assoc_id(uint32_t assoc_id);
 
+/* Perform checkpoint operation on a job */
+extern int job_checkpoint(checkpoint_msg_t *ckpt_ptr, uid_t uid, 
+			  slurm_fd conn_fd);
+
 /* log the completion of the specified job */
 extern void job_completion_logger(struct job_record  *job_ptr);
 
@@ -885,6 +980,21 @@ extern int job_fail(uint32_t job_id);
  */
 extern int job_node_ready(uint32_t job_id, int *ready);
 
+/*
+ * job_restart - Restart a batch job from checkpointed state
+ *
+ * Restart a job is similar to submit a new job, except that
+ * the job requirements is load from the checkpoint file and
+ * the job id is restored.
+ *
+ * IN ckpt_ptr - checkpoint request message 
+ * IN uid - user id of the user issuing the RPC
+ * IN conn_fd - file descriptor on which to send reply
+ * RET 0 on success, otherwise ESLURM error code
+ */
+extern int job_restart(checkpoint_msg_t *ckpt_ptr, uid_t uid, 
+		       slurm_fd conn_fd);
+
 /* 
  * job_signal - signal the specified job
  * IN job_id - id of the job to be signaled
@@ -1043,13 +1153,23 @@ extern void kill_job_on_node(uint32_t job_id, struct job_record *job_ptr,
 			     struct node_record *node_ptr);
 
 /*
- * kill_running_job_by_node_name - Given a node name, deallocate jobs 
- *	from the node or kill them 
+ * kill_running_job_by_node_name - Given a node name, deallocate RUNNING 
+ *	or COMPLETING jobs from the node or kill them 
  * IN node_name - name of a node
- * IN step_test - if true, only kill the job if a step is running on the node
  * RET number of killed jobs
  */
-extern int kill_running_job_by_node_name(char *node_name, bool step_test);
+extern int kill_running_job_by_node_name(char *node_name);
+
+/* 
+ * kill_step_on_node - determine if the specified job has any job steps
+ *	allocated to the specified node and kill them unless no_kill flag
+ *	is set on the step
+ * IN job_ptr - pointer to an active job record
+ * IN node_ptr - pointer to a node record
+ * RET count of killed job steps
+ */
+extern int kill_step_on_node(struct job_record  *job_ptr, 
+			     struct node_record *node_ptr);
 
 /* list_compare_config - compare two entry from the config list based upon 
  *	weight, see common/list.h for documentation */
@@ -1080,6 +1200,12 @@ extern int load_all_job_state ( void );
  */
 extern int load_all_node_state ( bool state_only );
 
+/*
+ * load_last_job_id - load only the last job ID from state save file.
+ * RET 0 or error code
+ */
+extern int load_last_job_id( void );
+
 /*
  * load_part_uid_allow_list - reload the allow_uid list of partitions
  *	if required (updated group file or force set)
@@ -1135,7 +1261,7 @@ extern void node_fini(void);
  *	representation
  * IN node_names  - list of nodes
  * IN best_effort - if set don't return an error on invalid node name entries 
- * OUT bitmap     - set to bitmap or NULL on error 
+ * OUT bitmap     - set to bitmap, may not have all bits set on error 
  * RET 0 if no error, otherwise EINVAL
  * global: node_record_table_ptr - pointer to global node table
  * NOTE: the caller must bit_free() memory at bitmap when no longer required
@@ -1158,11 +1284,6 @@ extern void node_not_resp (char *name, time_t msg_time);
  * and log that the node is not responding using a hostlist expression */
 extern void node_no_resp_msg(void);
 
-/* Using the node record table, generate a Hilbert integer for each node
- * based upon its coordinates and sort the records in that order. This must
- * be called once, immediately after reading the slurm.conf file. */
-extern void nodes_to_hilbert_curve(void);
-
 /*
  * job_alloc_info - get details about an existing job allocation
  * IN uid - job issuing the code
@@ -1406,8 +1527,9 @@ extern int step_create ( job_step_create_request_msg_t *step_specs,
 
 /*
  * step_layout_create - creates a step_layout according to the inputs.
- * IN job_ptr - job record step belongs to
+ * IN step_ptr - step having tasks layed out
  * IN step_node_list - node list of hosts in step
+ * IN node_count - count of nodes in step allocation
  * IN num_tasks - number of tasks in step
  * IN cpus_per_task - number of cpus per task
  * IN task_dist - type of task distribution
@@ -1418,7 +1540,7 @@ extern int step_create ( job_step_create_request_msg_t *step_specs,
  */
 extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 					       char *step_node_list,
-					       uint16_t node_count,
+					       uint32_t node_count,
 					       uint32_t num_tasks,
 					       uint16_t cpus_per_task,
 					       uint16_t task_dist,
@@ -1433,26 +1555,17 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 extern int step_epilog_complete(struct job_record  *job_ptr, 
 	char *node_name);
 
-/* 
- * step_on_node - determine if the specified job has any job steps allocated to 
- * 	the specified node 
- * IN job_ptr - pointer to an active job record
- * IN node_ptr - pointer to a node record
- * RET true of job has step on the node, false otherwise 
- */
-extern bool step_on_node(struct job_record  *job_ptr, 
-			 struct node_record *node_ptr);
-
 /*
  * step_partial_comp - Note the completion of a job step on at least
  *	some of its nodes
  * IN req     - step_completion_msg RPC from slurmstepd
+ * IN uid     - UID issuing the request
  * OUT rem    - count of nodes for which responses are still pending
  * OUT max_rc - highest return code for any step thus far
  * RET 0 on success, otherwise ESLURM error code
  */
-extern int step_partial_comp(step_complete_msg_t *req, int *rem,
-			     uint32_t *max_rc);
+extern int step_partial_comp(step_complete_msg_t *req, uid_t uid, 
+			     int *rem, uint32_t *max_rc);
 
 /* Update time stamps for job step suspend */
 extern void suspend_job_step(struct job_record *job_ptr);
@@ -1510,13 +1623,23 @@ extern void update_logging(void);
 extern int update_node ( update_node_msg_t * update_node_msg )  ;
 
 /* 
- * update_part - update a partition's configuration data
+ * update_part - create or update a partition's configuration data
  * IN part_desc - description of partition changes
+ * IN create_flag - create a new partition
  * RET 0 or an error code
  * global: part_list - list of partition entries
  *	last_part_update - update time of partition records
  */
-extern int update_part (update_part_msg_t * part_desc );
+extern int update_part (update_part_msg_t * part_desc, bool create_flag);
+
+/*
+ * validate_alloc_node - validate that the allocating node
+ * is allowed to use this partition
+ * IN part_ptr - pointer to a partition
+ * IN alloc_node - allocting node of the request
+ * RET 1 if permitted to run, 0 otherwise
+ */
+extern int validate_alloc_node(struct part_record *part_ptr, char* alloc_node);
 
 /*
  * validate_group - validate that the submit uid is authorized to run in 
diff --git a/src/slurmctld/srun_comm.c b/src/slurmctld/srun_comm.c
index 8bcfb519ba77fc11742f9f4a23aa096060d2820f..b09ddbcce93b0e9f4c080dab670ae6fe8696b46a 100644
--- a/src/slurmctld/srun_comm.c
+++ b/src/slurmctld/srun_comm.c
@@ -2,12 +2,14 @@
  *  srun_comm.c - srun communications
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -78,10 +80,12 @@ extern void srun_allocate (uint32_t job_id)
 	struct job_record *job_ptr = find_job_record (job_id);
 
 	xassert(job_ptr);
-	if (job_ptr && job_ptr->alloc_resp_port && job_ptr->alloc_node
-	&&  job_ptr->resp_host) {
+	if (job_ptr && job_ptr->alloc_resp_port && job_ptr->alloc_node &&
+	    job_ptr->resp_host && job_ptr->select_job && 
+	    job_ptr->select_job->cpu_array_cnt) {
 		slurm_addr * addr;
 		resource_allocation_response_msg_t *msg_arg;
+		select_job_res_t select_ptr = job_ptr->select_job;
 
 		addr = xmalloc(sizeof(struct sockaddr_in));
 		slurm_set_addr(addr, job_ptr->alloc_resp_port, 
@@ -89,15 +93,17 @@ extern void srun_allocate (uint32_t job_id)
 		msg_arg = xmalloc(sizeof(resource_allocation_response_msg_t));
 		msg_arg->job_id 	= job_ptr->job_id;
 		msg_arg->node_list	= xstrdup(job_ptr->nodes);
-		msg_arg->num_cpu_groups	= job_ptr->num_cpu_groups;
-		msg_arg->cpus_per_node  = xmalloc(sizeof(uint32_t) *
-				job_ptr->num_cpu_groups);
-		memcpy(msg_arg->cpus_per_node, job_ptr->cpus_per_node,
-				(sizeof(uint32_t) * job_ptr->num_cpu_groups));
+		msg_arg->num_cpu_groups	= select_ptr->cpu_array_cnt;
+		msg_arg->cpus_per_node  = xmalloc(sizeof(uint16_t) *
+					  select_ptr->cpu_array_cnt);
+		memcpy(msg_arg->cpus_per_node, 
+		       select_ptr->cpu_array_value,
+		       (sizeof(uint16_t) * select_ptr->cpu_array_cnt));
 		msg_arg->cpu_count_reps  = xmalloc(sizeof(uint32_t) *
-				job_ptr->num_cpu_groups);
-		memcpy(msg_arg->cpu_count_reps, job_ptr->cpu_count_reps,
-				(sizeof(uint32_t) * job_ptr->num_cpu_groups));
+					   select_ptr->cpu_array_cnt);
+		memcpy(msg_arg->cpu_count_reps, 
+		       select_ptr->cpu_array_reps,
+		       (sizeof(uint32_t) * select_ptr->cpu_array_cnt));
 		msg_arg->node_cnt	= job_ptr->node_cnt;
 		msg_arg->select_jobinfo = select_g_copy_jobinfo(
 				job_ptr->select_jobinfo);
@@ -346,6 +352,31 @@ extern void srun_step_complete (struct step_record *step_ptr)
 	}
 }
 
+/*
+ * srun_step_missing - notify srun that a job step is missing from
+ *		       a node we expect to find it on
+ * IN step_ptr  - pointer to the slurmctld job step record
+ * IN node_list - name of nodes we did not find the step on
+ */
+extern void srun_step_missing (struct step_record *step_ptr,
+			       char *node_list)
+{
+	slurm_addr * addr;
+	srun_step_missing_msg_t *msg_arg;
+
+	xassert(step_ptr);
+	if (step_ptr->port && step_ptr->host && step_ptr->host[0]) {
+		addr = xmalloc(sizeof(struct sockaddr_in));
+		slurm_set_addr(addr, step_ptr->port, step_ptr->host);
+		msg_arg = xmalloc(sizeof(srun_step_missing_msg_t));
+		msg_arg->job_id   = step_ptr->job_ptr->job_id;
+		msg_arg->step_id  = step_ptr->step_id;
+		msg_arg->nodelist = xstrdup(node_list);
+		_srun_agent_launch(addr, step_ptr->host, SRUN_STEP_MISSING,
+				   msg_arg);
+	}
+}
+
 /*
  * srun_exec - request that srun execute a specific command
  *	and route it's output to stdout
diff --git a/src/slurmctld/srun_comm.h b/src/slurmctld/srun_comm.h
index 7858e5a2ba7f45a845f4e6948fcf7d45d36e30f0..70e06d31d90e41c8a60ed48bdf1fb6a999ddfd7a 100644
--- a/src/slurmctld/srun_comm.h
+++ b/src/slurmctld/srun_comm.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -76,6 +77,15 @@ extern void srun_job_complete (struct job_record *job_ptr);
  */
 extern void srun_step_complete (struct step_record *step_ptr);
 
+/*
+ * srun_step_missing - notify srun that a job step is missing from
+ *		       a node we expect to find it on
+ * IN step_ptr  - pointer to the slurmctld job step record
+ * IN node_list - name of nodes we did not find the step on
+ */
+extern void srun_step_missing (struct step_record *step_ptr,
+			       char *node_list);
+
 /*
  * srun_node_fail - notify srun of a node's failure
  * IN job_id    - id of job to notify
diff --git a/src/slurmctld/state_save.c b/src/slurmctld/state_save.c
index 105b624bfd8a9f485b4b295293d0e4c02315143a..4e49ddc7c2ace67ed4eae786589b2cc6fd63b1a9 100644
--- a/src/slurmctld/state_save.c
+++ b/src/slurmctld/state_save.c
@@ -2,12 +2,14 @@
  *  state_save.c - Keep saved slurmctld state current 
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -44,12 +46,14 @@
 #endif                          /* WITH_PTHREADS */
 
 #include "src/common/macros.h"
+#include "src/slurmctld/reservation.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/trigger_mgr.h"
 
 static pthread_mutex_t state_save_lock = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t  state_save_cond = PTHREAD_COND_INITIALIZER;
-static int save_jobs = 0, save_nodes = 0, save_parts = 0, save_triggers = 0;
+static int save_jobs = 0, save_nodes = 0, save_parts = 0;
+static int save_triggers = 0, save_resv = 0;
 static bool run_save_thread = true;
 
 /* Queue saving of job state information */
@@ -79,6 +83,15 @@ extern void schedule_part_save(void)
 	pthread_cond_broadcast(&state_save_cond);
 }
 
+/* Queue saving of reservation state information */
+extern void schedule_resv_save(void)
+{
+	slurm_mutex_lock(&state_save_lock);
+	save_resv++;
+	slurm_mutex_unlock(&state_save_lock);
+	pthread_cond_broadcast(&state_save_cond);
+}
+
 /* Queue saving of trigger state information */
 extern void schedule_trigger_save(void)
 {
@@ -113,7 +126,7 @@ extern void *slurmctld_state_save(void *no_data)
 		slurm_mutex_lock(&state_save_lock);
 		while (1) {
 			if (save_jobs + save_nodes + save_parts + 
-			    save_triggers)
+			    save_resv + save_triggers)
 				break;		/* do the work */
 			else if (!run_save_thread) {
 				run_save_thread = true;
@@ -157,6 +170,17 @@ extern void *slurmctld_state_save(void *no_data)
 		if (run_save)
 			(void)dump_all_part_state();
 
+		/* save reservation info if necessary */
+		run_save = false;
+		slurm_mutex_lock(&state_save_lock);
+		if (save_resv) {
+			run_save = true;
+			save_resv = 0;
+		}
+		slurm_mutex_unlock(&state_save_lock);
+		if (run_save)
+			(void)dump_all_resv_state();
+
 		/* save trigger info if necessary */
 		run_save = false;
 		slurm_mutex_lock(&state_save_lock);
diff --git a/src/slurmctld/state_save.h b/src/slurmctld/state_save.h
index d800752ad1650fae6f8ce911d4f55bfb227f2cd1..6eea01549ce27d8a80f5d8d783a889a42e271e2c 100644
--- a/src/slurmctld/state_save.h
+++ b/src/slurmctld/state_save.h
@@ -2,12 +2,14 @@
  *  state_save.h - Definitions for keeping saved slurmctld state current 
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -15,7 +17,7 @@
  *  any later version.
  *
  *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
+ *  to link the code of portions of this program with the OpenSSL library under
  *  certain conditions as described in each individual source file, and 
  *  distribute linked combinations including the two. You must obey the GNU 
  *  General Public License in all respects for all of the code used other than 
@@ -47,6 +49,9 @@ extern void schedule_node_save(void);
 /* Queue saving of partition state information */
 extern void schedule_part_save(void);
 
+/* Queue saving of reservation state information */
+extern void schedule_resv_save(void);
+
 /* Queue saving of trigger state information */
 extern void schedule_trigger_save(void);
 
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index d796ebd2e8ba00a09fa6528e22f22632854003b4..f6d3b92d79194906fb071e63970d5480395ad5d0 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  step_mgr.c - manage the job step information of slurm
- *  $Id: step_mgr.c 16584 2009-02-18 19:03:40Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -55,27 +56,31 @@
 
 #include "src/common/bitstring.h"
 #include "src/common/checkpoint.h"
-#include "src/common/slurm_protocol_interface.h"
-#include "src/common/switch.h"
-#include "src/common/xstring.h"
 #include "src/common/forward.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_jobacct_gather.h"
+#include "src/common/slurm_protocol_interface.h"
+#include "src/common/switch.h"
+#include "src/common/xstring.h"
 
 #include "src/slurmctld/agent.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/node_scheduler.h"
+#include "src/slurmctld/port_mgr.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/srun_comm.h"
 
-#define STEP_DEBUG 0
 #define MAX_RETRIES 10
 
 static int  _count_cpus(bitstr_t *bitmap);
+static struct step_record * _create_step_record (struct job_record *job_ptr);
+static void _dump_step_layout(struct step_record *step_ptr);
+static void _free_step_rec(struct step_record *step_ptr);
 static void _pack_ctld_job_step_info(struct step_record *step, Buf buffer);
 static bitstr_t * _pick_step_nodes (struct job_record  *job_ptr, 
 				    job_step_create_request_msg_t *step_spec,
-				    bool batch_step, int *return_code);
+				    int cpus_per_task, bool batch_step,
+				    int *return_code);
 static hostlist_t _step_range_to_hostlist(struct step_record *step_ptr,
 				uint32_t range_first, uint32_t range_last);
 static int _step_hostname_to_inx(struct step_record *step_ptr,
@@ -84,27 +89,33 @@ static void _step_dealloc_lps(struct step_record *step_ptr);
 
 
 /* 
- * create_step_record - create an empty step_record for the specified job.
+ * _create_step_record - create an empty step_record for the specified job.
  * IN job_ptr - pointer to job table entry to have step record added
  * RET a pointer to the record or NULL if error
  * NOTE: allocates memory that should be xfreed with delete_step_record
  */
-struct step_record * 
-create_step_record (struct job_record *job_ptr) 
+static struct step_record * _create_step_record(struct job_record *job_ptr)
 {
 	struct step_record *step_ptr;
 
 	xassert(job_ptr);
+	/* NOTE: Reserve highest step ID values for NO_VAL and
+	 * SLURM_BATCH_SCRIPT */
+	if (job_ptr->next_step_id >= 0xfffffff0) {
+		/* avoid step records in the accounting database */
+		info("job %u has reached step id limit", job_ptr->job_id);
+		return NULL;
+	}
+
 	step_ptr = (struct step_record *) xmalloc(sizeof (struct step_record));
 
 	last_job_update = time(NULL);
-	step_ptr->job_ptr = job_ptr; 
-	step_ptr->step_id = (job_ptr->next_step_id)++;
+	step_ptr->job_ptr = job_ptr;
 	step_ptr->start_time = time(NULL) ;
 	step_ptr->jobacct = jobacct_gather_g_create(NULL);
-	step_ptr->ckpt_path = NULL;
+	step_ptr->ckpt_dir = NULL;
 	if (list_append (job_ptr->step_list, step_ptr) == NULL)
-		fatal ("create_step_record: unable to allocate memory");
+		fatal ("_create_step_record: unable to allocate memory");
 
 	return step_ptr;
 }
@@ -139,21 +150,28 @@ delete_step_records (struct job_record *job_ptr, int filter)
 			switch_free_jobinfo(step_ptr->switch_job);
 		}
 		checkpoint_free_jobinfo(step_ptr->check_job);
-		xfree(step_ptr->host);
-		xfree(step_ptr->name);
-		slurm_step_layout_destroy(step_ptr->step_layout);
-		jobacct_gather_g_destroy(step_ptr->jobacct);
-		FREE_NULL_BITMAP(step_ptr->step_node_bitmap);
-		FREE_NULL_BITMAP(step_ptr->exit_node_bitmap);
-		if (step_ptr->network)
-			xfree(step_ptr->network);
-		xfree(step_ptr->ckpt_path);
-		xfree(step_ptr);
+		_free_step_rec(step_ptr);
 	}		
 
 	list_iterator_destroy (step_iterator);
 }
 
+/* _free_step_rec - delete a step record's data structures */
+static void _free_step_rec(struct step_record *step_ptr)
+{
+	xfree(step_ptr->host);
+	xfree(step_ptr->name);
+	slurm_step_layout_destroy(step_ptr->step_layout);
+	jobacct_gather_g_destroy(step_ptr->jobacct);
+	FREE_NULL_BITMAP(step_ptr->core_bitmap_job);
+	FREE_NULL_BITMAP(step_ptr->exit_node_bitmap);
+	FREE_NULL_BITMAP(step_ptr->step_node_bitmap);
+	xfree(step_ptr->resv_port_array);
+	xfree(step_ptr->resv_ports);
+	xfree(step_ptr->network);
+	xfree(step_ptr->ckpt_dir);
+	xfree(step_ptr);
+}
 
 /* 
  * delete_step_record - delete record for job step for specified job_ptr 
@@ -186,21 +204,9 @@ delete_step_record (struct job_record *job_ptr, uint32_t step_id)
 					step_ptr->step_layout->node_list);
 				switch_free_jobinfo (step_ptr->switch_job);
 			}
+			resv_port_free(step_ptr);
 			checkpoint_free_jobinfo (step_ptr->check_job);
-
-			if (step_ptr->mem_per_task)
-				select_g_step_fini(step_ptr);
-
-			xfree(step_ptr->host);
-			xfree(step_ptr->name);
-			slurm_step_layout_destroy(step_ptr->step_layout);
-			jobacct_gather_g_destroy(step_ptr->jobacct);
-			FREE_NULL_BITMAP(step_ptr->step_node_bitmap);
-			FREE_NULL_BITMAP(step_ptr->exit_node_bitmap);
-			if (step_ptr->network)
-				xfree(step_ptr->network);
-			xfree(step_ptr->ckpt_path);
-			xfree(step_ptr);
+			_free_step_rec(step_ptr);
 			error_code = 0;
 			break;
 		}
@@ -218,21 +224,22 @@ delete_step_record (struct job_record *job_ptr, uint32_t step_id)
 void
 dump_step_desc(job_step_create_request_msg_t *step_spec)
 {
-	if (step_spec == NULL) 
-		return;
-
-	debug3("StepDesc: user_id=%u job_id=%u node_count=%u, cpu_count=%u", 
-		step_spec->user_id, step_spec->job_id, 
-		step_spec->node_count, step_spec->cpu_count);
+	debug3("StepDesc: user_id=%u job_id=%u node_count=%u cpu_count=%u", 
+	       step_spec->user_id, step_spec->job_id, 
+	       step_spec->node_count, step_spec->cpu_count);
 	debug3("   num_tasks=%u relative=%u task_dist=%u node_list=%s", 
-		step_spec->num_tasks, step_spec->relative, 
-		step_spec->task_dist, step_spec->node_list);
-	debug3("   host=%s port=%u name=%s network=%s checkpoint=%u", 
-		step_spec->host, step_spec->port, step_spec->name,
-		step_spec->network, step_spec->ckpt_interval);
-	debug3("   checkpoint-path=%s exclusive=%u immediate=%u mem_per_task=%u",
-	        step_spec->ckpt_path, step_spec->exclusive, 
-		step_spec->immediate, step_spec->mem_per_task);
+	       step_spec->num_tasks, step_spec->relative, 
+	       step_spec->task_dist, step_spec->node_list);
+	debug3("   host=%s port=%u name=%s network=%s exclusive=%u", 
+	       step_spec->host, step_spec->port, step_spec->name,
+	       step_spec->network, step_spec->exclusive);
+	debug3("   checkpoint-dir=%s checkpoint_int=%u",
+	       step_spec->ckpt_dir, step_spec->ckpt_interval);
+	debug3("   mem_per_task=%u resv_port_cnt=%u immediate=%u no_kill=%u",
+	       step_spec->mem_per_task, step_spec->resv_port_cnt,
+	       step_spec->immediate, step_spec->no_kill);
+	debug3("   overcommit=%d",
+	       step_spec->overcommit);
 }
 
 
@@ -244,7 +251,7 @@ dump_step_desc(job_step_create_request_msg_t *step_spec)
  * RET pointer to the job step's record, NULL on error
  */
 struct step_record *
-find_step_record(struct job_record *job_ptr, uint16_t step_id) 
+find_step_record(struct job_record *job_ptr, uint32_t step_id) 
 {
 	ListIterator step_iterator;
 	struct step_record *step_ptr;
@@ -254,10 +261,8 @@ find_step_record(struct job_record *job_ptr, uint16_t step_id)
 
 	step_iterator = list_iterator_create (job_ptr->step_list);
 	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
-		if ((step_ptr->step_id == step_id)
-		||  ((uint16_t) step_id == (uint16_t) NO_VAL)) {
+		if ((step_ptr->step_id == step_id) || (step_id == NO_VAL))
 			break;
-		}
 	}		
 	list_iterator_destroy (step_iterator);
 
@@ -424,6 +429,7 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid,
  *	we satisfy the super-set of constraints.
  * IN job_ptr - pointer to job to have new step started
  * IN step_spec - job step specification
+ * IN cpus_per_task - NOTE could be zero
  * IN batch_step - if set then step is a batch script
  * OUT return_code - exit code or SLURM_SUCCESS
  * global: node_record_table_ptr - pointer to global node table
@@ -433,17 +439,19 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid,
 static bitstr_t *
 _pick_step_nodes (struct job_record  *job_ptr, 
 		  job_step_create_request_msg_t *step_spec,
+		  int cpus_per_task,
 		  bool batch_step, int *return_code)
 {
-
 	bitstr_t *nodes_avail = NULL, *nodes_idle = NULL;
 	bitstr_t *nodes_picked = NULL, *node_tmp = NULL;
 	int error_code, nodes_picked_cnt=0, cpus_picked_cnt = 0, i;
 	ListIterator step_iterator;
 	struct step_record *step_p;
-#if STEP_DEBUG
-	char *temp;
-#endif
+	select_job_res_t select_ptr = job_ptr->select_job;
+
+	xassert(select_ptr);
+	xassert(select_ptr->cpus);
+	xassert(select_ptr->cpus_used);
 
 	*return_code = SLURM_SUCCESS;
 	if (job_ptr->node_bitmap == NULL) {
@@ -456,10 +464,51 @@ _pick_step_nodes (struct job_record  *job_ptr,
 		fatal("bit_copy malloc failure");
 	bit_and (nodes_avail, up_node_bitmap);
 
-	/* In exclusive mode, satisfy the processor count.
-	 * Do not use nodes that have no unused CPUs */
+	if (step_spec->mem_per_task &&
+	    ((select_ptr->memory_allocated == NULL) ||
+	     (select_ptr->memory_used == NULL))) {
+		error("_pick_step_nodes: lack memory allocation details "
+		      "to enforce memory limits for job %u", job_ptr->job_id);
+		step_spec->mem_per_task = 0;
+	}
+
+	if (job_ptr->next_step_id == 0) {
+		if (job_ptr->details && job_ptr->details->prolog_running) {
+			*return_code = ESLURM_PROLOG_RUNNING;
+			return NULL;
+		}
+		for (i=bit_ffs(job_ptr->node_bitmap); i<node_record_count; 
+		     i++) {
+			if (!bit_test(job_ptr->node_bitmap, i))
+				continue;
+			if ((node_record_table_ptr[i].node_state &
+			     NODE_STATE_POWER_SAVE) ||
+			    (node_record_table_ptr[i].node_state &
+			     NODE_STATE_NO_RESPOND)) {
+				/* Node is/was powered down. Need to wait 
+				 * for it to start responding again. */
+				FREE_NULL_BITMAP(nodes_avail);
+				*return_code = ESLURM_NODES_BUSY;
+				/* Update job's end-time to allow for node
+				 * boot time. */
+				if (job_ptr->time_limit != INFINITE) {
+					job_ptr->end_time = time(NULL) + 
+						(job_ptr->time_limit * 60);
+				}
+				return NULL;
+			}
+		}
+	}
+
+	/* In exclusive mode, just satisfy the processor count.
+	 * Do not use nodes that have no unused CPUs or insufficient 
+	 * unused memory */
 	if (step_spec->exclusive) {
-		int i, j=0, avail, tot_cpus = 0;
+		int avail_cpus, avail_tasks, total_cpus, total_tasks, node_inx;
+		int i_first, i_last;
+		uint32_t avail_mem, total_mem;
+		uint32_t nodes_picked_cnt = 0;
+		uint32_t tasks_picked_cnt = 0, total_task_cnt = 0;
 		bitstr_t *selected_nodes = NULL;
 
 		if (step_spec->node_list) {
@@ -487,54 +536,99 @@ _pick_step_nodes (struct job_record  *job_ptr,
 				goto cleanup;
 			}
 		}
-		for (i=bit_ffs(job_ptr->node_bitmap); i<node_record_count;
-		     i++) {
-			if (!bit_test(job_ptr->node_bitmap, i))
+
+		node_inx = -1;
+		i_first = bit_ffs(select_ptr->node_bitmap);
+		i_last  = bit_fls(select_ptr->node_bitmap);
+		for (i=i_first; i<=i_last; i++) {
+			if (!bit_test(select_ptr->node_bitmap, i))
 				continue;
-			if (selected_nodes && (!bit_test(selected_nodes, i)))
-				avail = 0;
-			else {
-				avail = job_ptr->alloc_lps[j] - 
-					job_ptr->used_lps[j];
-				tot_cpus += job_ptr->alloc_lps[j];
+			node_inx++;
+			if (!bit_test(nodes_avail, i))
+				continue;	/* node now DOWN */
+			avail_cpus = select_ptr->cpus[node_inx] - 
+				     select_ptr->cpus_used[node_inx];
+			total_cpus = select_ptr->cpus[node_inx];
+			if (cpus_per_task > 0) {
+				avail_tasks = avail_cpus / cpus_per_task;
+				total_tasks = total_cpus / cpus_per_task;
+			} else {
+				avail_tasks = step_spec->num_tasks;
+				total_tasks = step_spec->num_tasks;
 			}
-			if ((avail <= 0) ||
+			if (step_spec->mem_per_task) {
+				avail_mem = select_ptr->
+					    memory_allocated[node_inx] -
+					    select_ptr->memory_used[node_inx];
+				avail_mem /= step_spec->mem_per_task;
+				avail_tasks = MIN(avail_tasks, avail_mem);
+				total_mem = select_ptr->
+					    memory_allocated[node_inx];
+				total_mem /= step_spec->mem_per_task;
+				total_tasks = MIN(total_tasks, total_mem);
+			}
+			if ((avail_tasks <= 0) ||
 			    ((selected_nodes == NULL) &&
-			     (cpus_picked_cnt > 0) &&
-			     (cpus_picked_cnt >= step_spec->cpu_count)))
+			     (nodes_picked_cnt >= step_spec->node_count) &&
+			     (tasks_picked_cnt > 0) &&
+			     (tasks_picked_cnt >= step_spec->num_tasks)))
 				bit_clear(nodes_avail, i);
-			else
-				cpus_picked_cnt += avail;
-			if (++j >= job_ptr->node_cnt)
-				break;
+			else {
+				nodes_picked_cnt++;
+				tasks_picked_cnt += avail_tasks;
+			}
+			total_task_cnt += total_tasks;
 		}
+
 		if (selected_nodes) {
 			if (!bit_equal(selected_nodes, nodes_avail)) {
 				/* some required nodes have no available
 				 * processors, defer request */
-				cpus_picked_cnt = 0;
+				tasks_picked_cnt = 0;
 			}
 			bit_free(selected_nodes);
 		}
-		if (cpus_picked_cnt >= step_spec->cpu_count)
-			return nodes_avail;
 
+		if (tasks_picked_cnt >= step_spec->num_tasks)
+			return nodes_avail;
 		FREE_NULL_BITMAP(nodes_avail);
-		if (tot_cpus >= step_spec->cpu_count)
+		if (total_task_cnt >= step_spec->num_tasks)
 			*return_code = ESLURM_NODES_BUSY;
 		else
 			*return_code = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
 		return NULL;
 	}
 
-	if ( step_spec->node_count == INFINITE)	/* use all nodes */
+	if (step_spec->mem_per_task) {
+		int node_inx = 0, usable_mem;
+		for (i=bit_ffs(select_ptr->node_bitmap); i<node_record_count; 
+		     i++) {
+			if (!bit_test(select_ptr->node_bitmap, i))
+				continue;
+			usable_mem = select_ptr->memory_allocated[node_inx] -
+				     select_ptr->memory_used[node_inx];
+			usable_mem /= step_spec->mem_per_task;
+			if (usable_mem <= 0) {
+				if (step_spec->node_count == INFINITE) {
+					FREE_NULL_BITMAP(nodes_avail);
+					*return_code = 
+						ESLURM_INVALID_TASK_MEMORY;
+					return NULL;
+				}
+				bit_clear(nodes_avail, i);
+			}
+			if (++node_inx >= select_ptr->nhosts)
+				break;
+		}
+	}
+
+	if (step_spec->node_count == INFINITE)	/* use all nodes */
 		return nodes_avail;
 
 	if (step_spec->node_list) {
 		bitstr_t *selected_nodes = NULL;
-#if STEP_DEBUG
-		info("selected nodelist is %s", step_spec->node_list);
-#endif
+		if (slurm_get_debug_flags() & DEBUG_FLAG_STEPS)
+			info("selected nodelist is %s", step_spec->node_list);
 		error_code = node_name2bitmap(step_spec->node_list, false, 
 					      &selected_nodes);
 		
@@ -552,12 +646,13 @@ _pick_step_nodes (struct job_record  *job_ptr,
 			goto cleanup;
 		}
 		if (!bit_super_set(selected_nodes, nodes_avail)) {
-			info ("_pick_step_nodes: requested nodes %s are DOWN",
-			      step_spec->node_list);
+			info ("_pick_step_nodes: requested nodes %s "
+			      "have inadequate memory",
+			       step_spec->node_list);
 			bit_free(selected_nodes);
 			goto cleanup;
 		}
-		if(step_spec->task_dist == SLURM_DIST_ARBITRARY) {
+		if (step_spec->task_dist == SLURM_DIST_ARBITRARY) {
 			/* if we are in arbitrary mode we need to make
 			 * sure we aren't running on an elan switch.
 			 * If we aren't change the number of nodes
@@ -636,42 +731,50 @@ _pick_step_nodes (struct job_record  *job_ptr,
 		while ((step_p = (struct step_record *)
 			list_next(step_iterator))) {
 			bit_or(nodes_idle, step_p->step_node_bitmap);
-#if STEP_DEBUG
-			temp = bitmap2node_name(step_p->step_node_bitmap);
-			info("step %d has nodes %s", step_p->step_id, temp);
-			xfree(temp);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_STEPS) {
+				char *temp;
+				temp = bitmap2node_name(step_p->
+							step_node_bitmap);
+				info("step %u.%u has nodes %s", 
+				     job_ptr->job_id, step_p->step_id, temp);
+				xfree(temp);
+			}
 		} 
 		list_iterator_destroy (step_iterator);
 		bit_not(nodes_idle);
 		bit_and(nodes_idle, nodes_avail);
 	}
-#if STEP_DEBUG
-	temp = bitmap2node_name(nodes_avail);
-	info("can pick from %s %d", temp, step_spec->node_count);
-	xfree(temp);
-	temp = bitmap2node_name(nodes_idle);
-	info("can pick from %s", temp);
-	xfree(temp);
-#endif
+
+	if (slurm_get_debug_flags() & DEBUG_FLAG_STEPS) {
+		char *temp1, *temp2;
+		temp1 = bitmap2node_name(nodes_avail);
+		temp2 = bitmap2node_name(nodes_idle);
+		info("step pick %u nodes, avail:%s idle:%s", 
+		     step_spec->node_count, temp1, temp2);
+		xfree(temp1);
+		xfree(temp2);
+	}
 
 	/* if user specifies step needs a specific processor count and 
 	 * all nodes have the same processor count, just translate this to
 	 * a node count */
-	if (step_spec->cpu_count && (job_ptr->num_cpu_groups == 1) && 
-	    job_ptr->cpus_per_node[0]) {
-		i = (step_spec->cpu_count + (job_ptr->cpus_per_node[0] - 1) ) 
-				/ job_ptr->cpus_per_node[0];
+	if (step_spec->cpu_count && job_ptr->select_job && 
+	    (job_ptr->select_job->cpu_array_cnt == 1) &&
+	    job_ptr->select_job->cpu_array_value) {
+		i = (step_spec->cpu_count + 
+		     (job_ptr->select_job->cpu_array_value[0] - 1)) /
+		    job_ptr->select_job->cpu_array_value[0];
 		step_spec->node_count = (i > step_spec->node_count) ? 
-						i : step_spec->node_count ;
+					 i : step_spec->node_count ;
 		//step_spec->cpu_count = 0;
 	}
 
 	if (step_spec->node_count) {
 		nodes_picked_cnt = bit_set_count(nodes_picked);
-#if STEP_DEBUG
-		info("got %u %d", step_spec->node_count, nodes_picked_cnt);
-#endif
+		if (slurm_get_debug_flags() & DEBUG_FLAG_STEPS) {
+			verbose("got %u %d", step_spec->node_count, 
+			        nodes_picked_cnt);
+		}
 		if (nodes_idle 
 		    && (bit_set_count(nodes_idle) >= step_spec->node_count)
 		    && (step_spec->node_count > nodes_picked_cnt)) {
@@ -708,7 +811,7 @@ _pick_step_nodes (struct job_record  *job_ptr,
 		cpus_picked_cnt = _count_cpus(nodes_picked);
 		/* user is requesting more cpus than we got from the
 		 * picked nodes we should return with an error */
-		if(step_spec->cpu_count > cpus_picked_cnt) {
+		if (step_spec->cpu_count > cpus_picked_cnt) {
 			debug2("Have %d nodes with %d cpus which is less "
 			       "than what the user is asking for (%d cpus) "
 			       "aborting.",
@@ -753,84 +856,290 @@ static int _count_cpus(bitstr_t *bitmap)
 	return sum;
 }
 
+/* Update the step's core bitmaps, create as needed.
+ *	Add the specified task count for a specific node in the job's 
+ *	and step's allocation */
+static void _pick_step_cores(struct step_record *step_ptr, 
+			     select_job_res_t select_ptr, 
+			     int job_node_inx, uint16_t task_cnt)
+{
+	int bit_offset, core_inx, i, sock_inx;
+	uint16_t sockets, cores;
+	int cpu_cnt = (int) task_cnt;
+	bool use_all_cores;
+	static int last_core_inx;
+
+	if (!step_ptr->core_bitmap_job) {
+		step_ptr->core_bitmap_job = bit_alloc(bit_size(select_ptr->
+							       core_bitmap));
+	}
+	if (get_select_job_res_cnt(select_ptr, job_node_inx, &sockets, &cores))
+		fatal("get_select_job_res_cnt");
+
+	if (task_cnt == (cores * sockets))
+		use_all_cores = true;
+	else
+		use_all_cores = false;
+	if (step_ptr->cpus_per_task > 0)
+		cpu_cnt *= step_ptr->cpus_per_task;
+
+	/* select idle cores first */
+	for (core_inx=0; core_inx<cores; core_inx++) {
+		for (sock_inx=0; sock_inx<sockets; sock_inx++) {
+			bit_offset = get_select_job_res_offset(select_ptr,
+							       job_node_inx,
+							       sock_inx, 
+							       core_inx);
+			if (bit_offset < 0)
+				fatal("get_select_job_res_offset");
+			if (!bit_test(select_ptr->core_bitmap, bit_offset))
+				continue;
+			if ((use_all_cores == false) &&
+			    bit_test(select_ptr->core_bitmap_used, bit_offset))
+				continue;
+			bit_set(select_ptr->core_bitmap_used, bit_offset);
+			bit_set(step_ptr->core_bitmap_job, bit_offset);
+#if 0
+			info("step alloc N:%d S:%dC :%d", 
+			     job_node_inx, sock_inx, core_inx);
+#endif
+			if (--cpu_cnt == 0)
+				return;
+		}
+	}
+	if (use_all_cores)
+		return;
+
+	/* We need to over-subscribe one or more cores.
+	 * Use last_core_inx to avoid putting all of the extra
+	 * work onto core zero */
+	verbose("job step needs to over-subscribe cores");
+	last_core_inx = (last_core_inx + 1) % cores;
+	for (i=0; i<cores; i++) {
+		core_inx = (last_core_inx + i) % cores;
+		for (sock_inx=0; sock_inx<sockets; sock_inx++) {
+			bit_offset = get_select_job_res_offset(select_ptr,
+							       job_node_inx,
+							       sock_inx, 
+							       core_inx);
+			if (bit_offset < 0)
+				fatal("get_select_job_res_offset");
+			if (!bit_test(select_ptr->core_bitmap, bit_offset))
+				continue;
+			if (bit_test(step_ptr->core_bitmap_job, bit_offset))
+				continue;   /* already taken by this step */
+			bit_set(step_ptr->core_bitmap_job, bit_offset);
+#if 0
+			info("step alloc N:%d S:%dC :%d", 
+			     job_node_inx, sock_inx, core_inx);
+#endif
+			if (--cpu_cnt == 0)
+				return;
+		}
+	}
+}
+
+
 /* Update a job's record of allocated CPUs when a job step gets scheduled */
 extern void step_alloc_lps(struct step_record *step_ptr)
 {
 	struct job_record  *job_ptr = step_ptr->job_ptr;
+	select_job_res_t select_ptr = job_ptr->select_job;
+	int cpus_alloc;
 	int i_node, i_first, i_last;
 	int job_node_inx = -1, step_node_inx = -1;
+	bool pick_step_cores = true;
+
+	xassert(select_ptr);
+	xassert(select_ptr->core_bitmap);
+	xassert(select_ptr->core_bitmap_used);
+	xassert(select_ptr->cpus);
+	xassert(select_ptr->cpus_used);
 
 	if (step_ptr->step_layout == NULL)	/* batch step */
 		return;
 
-	i_first = bit_ffs(job_ptr->node_bitmap);
-	i_last  = bit_fls(job_ptr->node_bitmap);
+	i_first = bit_ffs(select_ptr->node_bitmap);
+	i_last  = bit_fls(select_ptr->node_bitmap);
 	if (i_first == -1)	/* empty bitmap */
 		return;
+
+	if (step_ptr->core_bitmap_job) {
+		/* "scontrol reconfig" of live system */
+		pick_step_cores = false;
+	} else if ((step_ptr->exclusive == 0) ||
+		   (step_ptr->cpu_count == job_ptr->total_procs)) {
+		/* Step uses all of job's cores
+		 * Just copy the bitmap to save time */
+		step_ptr->core_bitmap_job = bit_copy(select_ptr->core_bitmap);
+		pick_step_cores = false;
+	}
+
+	if (step_ptr->mem_per_task &&
+	    ((select_ptr->memory_allocated == NULL) ||
+	     (select_ptr->memory_used == NULL))) {
+		error("step_alloc_lps: lack memory allocation details "
+		      "to enforce memory limits for job %u", job_ptr->job_id);
+		step_ptr->mem_per_task = 0;
+	}
+
 	for (i_node = i_first; i_node <= i_last; i_node++) {
-		if (!bit_test(job_ptr->node_bitmap, i_node))
+		if (!bit_test(select_ptr->node_bitmap, i_node))
 			continue;
 		job_node_inx++;
 		if (!bit_test(step_ptr->step_node_bitmap, i_node))
 			continue;
 		step_node_inx++;
-		if (step_ptr->cpus_per_task) {
-			job_ptr->used_lps[job_node_inx] +=
-				step_ptr->step_layout->tasks[step_node_inx];
+		if (job_node_inx >= select_ptr->nhosts)
+			fatal("step_alloc_lps: node index bad");
+		/* NOTE: The --overcommit option can result in
+		 * cpus_used[] having a higher value than cpus[] */
+		cpus_alloc = step_ptr->step_layout->tasks[step_node_inx] *
+			     step_ptr->cpus_per_task;
+		select_ptr->cpus_used[job_node_inx] += cpus_alloc; 
+		if (step_ptr->mem_per_task) {
+			select_ptr->memory_used[job_node_inx] += 
+				(step_ptr->mem_per_task *
+				 step_ptr->step_layout->tasks[step_node_inx]);
+		}
+		if (pick_step_cores) {
+			_pick_step_cores(step_ptr, select_ptr, 
+					 job_node_inx,
+					 step_ptr->step_layout->
+					 tasks[step_node_inx]);
+		}
+		if (slurm_get_debug_flags() & DEBUG_FLAG_CPU_BIND)
+			_dump_step_layout(step_ptr);
+		if (slurm_get_debug_flags() & DEBUG_FLAG_STEPS) {
+			info("step alloc of %s procs: %u of %u", 
+			     node_record_table_ptr[i_node].name,
+			     select_ptr->cpus_used[job_node_inx],
+			     select_ptr->cpus[job_node_inx]);
 		}
-#if 0
-		info("step alloc of %s procs: %u of %u", 
-			node_record_table_ptr[i_node].name,
-			job_ptr->used_lps[job_node_inx],
-			job_ptr->alloc_lps[job_node_inx]);
-#endif
 		if (step_node_inx == (step_ptr->step_layout->node_cnt - 1))
 			break;
 	}
 	
 }
 
+/* Dump a job step's CPU binding information.
+ * NOTE: The core_bitmap_job and node index are based upon 
+ * the _job_ allocation */
+static void _dump_step_layout(struct step_record *step_ptr)
+{
+	struct job_record* job_ptr = step_ptr->job_ptr;
+	select_job_res_t select_ptr = job_ptr->select_job;
+	int i, bit_inx, core_inx, node_inx, rep, sock_inx;
+
+	if ((step_ptr->core_bitmap_job == NULL) ||
+	    (select_ptr == NULL) || (select_ptr->cores_per_socket == NULL))
+		return;
+
+	info("====================");
+	info("step_id:%u.%u", job_ptr->job_id, step_ptr->step_id);
+	for (i=0, bit_inx= 0, node_inx=0; node_inx<select_ptr->nhosts; i++) {
+		for (rep=0; rep<select_ptr->sock_core_rep_count[i]; rep++) {
+			for (sock_inx=0; 
+			     sock_inx<select_ptr->sockets_per_node[i]; 
+			     sock_inx++) {
+				for (core_inx=0; 
+			 	     core_inx<select_ptr->cores_per_socket[i]; 
+			 	     core_inx++) {
+					if (bit_test(step_ptr->
+						     core_bitmap_job, 
+						     bit_inx++)) {
+						info("JobNode[%d] Socket[%d] "
+						     "Core[%d] is allocated",
+						     node_inx, sock_inx, 
+						     core_inx);
+					}
+				}
+			}
+			node_inx++;
+		}
+	}
+	info("====================");
+}
+
 static void _step_dealloc_lps(struct step_record *step_ptr)
 {
 	struct job_record  *job_ptr = step_ptr->job_ptr;
+	select_job_res_t select_ptr = job_ptr->select_job;
+	int cpus_alloc;
 	int i_node, i_first, i_last;
 	int job_node_inx = -1, step_node_inx = -1;
 
+	xassert(select_ptr);
+	xassert(select_ptr->core_bitmap);
+	xassert(select_ptr->core_bitmap_used);
+	xassert(select_ptr->cpus);
+	xassert(select_ptr->cpus_used);
+
 	if (step_ptr->step_layout == NULL)	/* batch step */
 		return;
 
-	i_first = bit_ffs(job_ptr->node_bitmap);
-	i_last  = bit_fls(job_ptr->node_bitmap);
+	i_first = bit_ffs(select_ptr->node_bitmap);
+	i_last  = bit_fls(select_ptr->node_bitmap);
 	if (i_first == -1)	/* empty bitmap */
 		return;
+
+	if (step_ptr->mem_per_task &&
+	    ((select_ptr->memory_allocated == NULL) ||
+	     (select_ptr->memory_used == NULL))) {
+		error("_step_dealloc_lps: lack memory allocation details "
+		      "to enforce memory limits for job %u", job_ptr->job_id);
+		step_ptr->mem_per_task = 0;
+	}
+
 	for (i_node = i_first; i_node <= i_last; i_node++) {
-		if (!bit_test(job_ptr->node_bitmap, i_node))
+		if (!bit_test(select_ptr->node_bitmap, i_node))
 			continue;
 		job_node_inx++;
 		if (!bit_test(step_ptr->step_node_bitmap, i_node))
 			continue;
 		step_node_inx++;
-		if (step_ptr->cpus_per_task == 0)
-			;	/* no CPUs allocated */
-		else if (job_ptr->used_lps[job_node_inx] >=
-		    step_ptr->step_layout->tasks[step_node_inx]) {
-			job_ptr->used_lps[job_node_inx] -= 
-				step_ptr->step_layout->tasks[step_node_inx];
-		} else {
-			error("_step_dealloc_lps: underflow for %u.%u",
+		if (job_node_inx >= select_ptr->nhosts)
+			fatal("_step_dealloc_lps: node index bad");
+		cpus_alloc = step_ptr->step_layout->tasks[step_node_inx] *
+			     step_ptr->cpus_per_task;
+		if (select_ptr->cpus_used[job_node_inx] >= cpus_alloc)
+			select_ptr->cpus_used[job_node_inx] -= cpus_alloc;
+		else {
+			error("_step_dealloc_lps: cpu underflow for %u.%u",
 				job_ptr->job_id, step_ptr->step_id);
-			job_ptr->used_lps[job_node_inx] = 0;
+			select_ptr->cpus_used[job_node_inx] = 0;
+		}
+		if (step_ptr->mem_per_task) {
+			uint32_t mem_use = step_ptr->mem_per_task *
+					   step_ptr->step_layout->
+					   tasks[step_node_inx];
+			if (select_ptr->memory_used[job_node_inx] >= mem_use) {
+				select_ptr->memory_used[job_node_inx] -= 
+						mem_use;
+			} else {
+				error("_step_dealloc_lps: "
+				      "mem underflow for %u.%u",
+				      job_ptr->job_id, step_ptr->step_id);
+				select_ptr->memory_used[job_node_inx] = 0;
+			}
+		}
+		if (slurm_get_debug_flags() & DEBUG_FLAG_STEPS) {
+			info("step dealloc of %s procs: %u of %u", 
+			     node_record_table_ptr[i_node].name,
+			     select_ptr->cpus_used[job_node_inx],
+			     select_ptr->cpus[job_node_inx]);
 		}
-#if 0
-		info("step dealloc of %s procs: %u of %u", 
-			node_record_table_ptr[i_node].name,
-			job_ptr->used_lps[job_node_inx],
-			job_ptr->alloc_lps[job_node_inx]);
-#endif
 		if (step_node_inx == (step_ptr->step_layout->node_cnt - 1))
 			break;
 	}
-	
+	if (step_ptr->core_bitmap_job) {
+		/* Mark the job's cores as no longer in use */
+		bit_not(step_ptr->core_bitmap_job);
+		bit_and(select_ptr->core_bitmap_used,
+			step_ptr->core_bitmap_job);
+		/* no need for bit_not(step_ptr->core_bitmap_job); */
+		FREE_NULL_BITMAP(step_ptr->core_bitmap_job);
+	}
 }
 
 /*
@@ -852,9 +1161,10 @@ step_create(job_step_create_request_msg_t *step_specs,
 	struct step_record *step_ptr;
 	struct job_record  *job_ptr;
 	bitstr_t *nodeset;
-	int cpus_per_task, node_count, ret_code;
+	int cpus_per_task, node_count, ret_code, i;
 	time_t now = time(NULL);
 	char *step_node_list = NULL;
+	uint32_t orig_cpu_count;
 
 	*new_step_record = NULL;
 	job_ptr = find_job_record (step_specs->job_id);
@@ -890,17 +1200,6 @@ step_create(job_step_create_request_msg_t *step_specs,
 	    (job_ptr->end_time <= time(NULL)))
 		return ESLURM_ALREADY_DONE;
 
-	if (job_ptr->details->job_min_memory) {
-		/* use memory reserved by job, no limit on steps */
-		step_specs->mem_per_task = 0;
-	} else if (step_specs->mem_per_task) {
-		if (slurmctld_conf.max_mem_per_task &&
-		    (step_specs->mem_per_task > 
-		     slurmctld_conf.max_mem_per_task))
-			return ESLURM_INVALID_TASK_MEMORY;
-	} else
-		step_specs->mem_per_task = slurmctld_conf.def_mem_per_task;
-
 	if ((step_specs->task_dist != SLURM_DIST_CYCLIC) &&
 	    (step_specs->task_dist != SLURM_DIST_BLOCK) &&
 	    (step_specs->task_dist != SLURM_DIST_CYCLIC_CYCLIC) &&
@@ -911,8 +1210,8 @@ step_create(job_step_create_request_msg_t *step_specs,
 	    (step_specs->task_dist != SLURM_DIST_ARBITRARY))
 		return ESLURM_BAD_DIST;
 
-	if (step_specs->task_dist == SLURM_DIST_ARBITRARY
-	    && (!strcmp(slurmctld_conf.switch_type, "switch/elan"))) {
+	if ((step_specs->task_dist == SLURM_DIST_ARBITRARY) &&
+	    (!strcmp(slurmctld_conf.switch_type, "switch/elan"))) {
 		return ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED;
 	}
 
@@ -924,27 +1223,37 @@ step_create(job_step_create_request_msg_t *step_specs,
 	     (strlen(step_specs->network)   > MAX_STR_LEN)) ||
 	    (step_specs->name      && 
 	     (strlen(step_specs->name)      > MAX_STR_LEN)) ||
-	    (step_specs->ckpt_path && 
-	     (strlen(step_specs->ckpt_path) > MAX_STR_LEN)))
+	    (step_specs->ckpt_dir && 
+	     (strlen(step_specs->ckpt_dir) > MAX_STR_LEN)))
 		return ESLURM_PATHNAME_TOO_LONG;
 
-	/* we can figure out the cpus_per_task here by reversing what happens
-	 * in srun, record argument, plus save/restore in slurm v1.4 */
+	/* if the overcommit flag is checked, we 0 set cpu_count=0
+	 * which makes it so we don't check to see the available cpus
+	 */
+	orig_cpu_count =  step_specs->cpu_count;
+	if (step_specs->overcommit) {
+		if (step_specs->exclusive) {
+			/* Not really a legitimate combination, try to 
+			 * exclusively allocate one CPU per task */
+			step_specs->overcommit = 0;
+			step_specs->cpu_count = step_specs->num_tasks;
+		} else
+			step_specs->cpu_count = 0;
+	}
+
+	/* determine cpus_per_task value by reversing what srun does */
+	if (step_specs->num_tasks < 1)
+		return ESLURM_BAD_TASK_COUNT;
 	if (step_specs->cpu_count == 0)
 		cpus_per_task = 0;
-	else if (step_specs->num_tasks < 1)
-		cpus_per_task = 1;
 	else {
 		cpus_per_task = step_specs->cpu_count / step_specs->num_tasks;
 		if (cpus_per_task < 1)
 			cpus_per_task = 1;
 	}
 
-	/* if the overcommit flag is checked we 0 out the cpu_count
-	 * which makes it so we don't check to see the available cpus
-	 */	 
-	if (step_specs->overcommit)
-		step_specs->cpu_count = 0;
+	if (step_specs->no_kill > 1)
+		step_specs->no_kill = 1;
 
 	if (job_ptr->kill_on_step_done)
 		/* Don't start more steps, job already being cancelled */
@@ -952,7 +1261,8 @@ step_create(job_step_create_request_msg_t *step_specs,
 	job_ptr->kill_on_step_done = kill_job_when_step_done;
 
 	job_ptr->time_last_active = now;
-	nodeset = _pick_step_nodes(job_ptr, step_specs, batch_step, &ret_code);
+	nodeset = _pick_step_nodes(job_ptr, step_specs,
+				   cpus_per_task, batch_step, &ret_code);
 	if (nodeset == NULL)
 		return ret_code;
 	node_count = bit_set_count(nodeset);
@@ -964,17 +1274,19 @@ step_create(job_step_create_request_msg_t *step_specs,
 			step_specs->num_tasks = node_count;
 	}
 	
-	if ((step_specs->num_tasks < 1)
-	||  (step_specs->num_tasks > (node_count*MAX_TASKS_PER_NODE))) {
+	if (step_specs->num_tasks > (node_count*MAX_TASKS_PER_NODE)) {
 		error("step has invalid task count: %u", 
 		      step_specs->num_tasks);
 		bit_free(nodeset);
 		return ESLURM_BAD_TASK_COUNT;
 	}
 
-	step_ptr = create_step_record (job_ptr);
-	if (step_ptr == NULL)
-		fatal ("create_step_record failed with no memory");
+	step_ptr = _create_step_record (job_ptr);
+	if (step_ptr == NULL) {
+		bit_free(nodeset);
+		return ESLURMD_TOOMANYSTEPS;
+	}
+	step_ptr->step_id = job_ptr->next_step_id++;
 
 	/* set the step_record values */
 
@@ -989,10 +1301,10 @@ step_create(job_step_create_request_msg_t *step_specs,
 		xfree(step_specs->node_list);
 		step_specs->node_list = xstrdup(step_node_list);
 	}
-#if STEP_DEBUG
-	info("got %s and %s looking for %d nodes", step_node_list,
-	     step_specs->node_list, step_specs->node_count);
-#endif
+	if (slurm_get_debug_flags() & DEBUG_FLAG_STEPS) {
+		verbose("got %s and %s looking for %d nodes", step_node_list,
+		        step_specs->node_list, step_specs->node_count);
+	}
 	step_ptr->step_node_bitmap = nodeset;
 	
 	switch(step_specs->task_dist) {
@@ -1013,9 +1325,11 @@ step_create(job_step_create_request_msg_t *step_specs,
 	step_ptr->mem_per_task = step_specs->mem_per_task;
 	step_ptr->ckpt_interval = step_specs->ckpt_interval;
 	step_ptr->ckpt_time = now;
+	step_ptr->cpu_count = orig_cpu_count;
 	step_ptr->exit_code = NO_VAL;
 	step_ptr->exclusive = step_specs->exclusive;
-	step_ptr->ckpt_path = xstrdup(step_specs->ckpt_path);
+	step_ptr->ckpt_dir  = xstrdup(step_specs->ckpt_dir);
+	step_ptr->no_kill   = step_specs->no_kill;
 
 	/* step's name and network default to job's values if not 
 	 * specified in the step specification */
@@ -1038,8 +1352,30 @@ step_create(job_step_create_request_msg_t *step_specs,
 					   (uint16_t)cpus_per_task,
 					   step_specs->task_dist,
 					   step_specs->plane_size);
-		if (!step_ptr->step_layout)
+		xfree(step_node_list);
+		if (!step_ptr->step_layout) {
+			delete_step_record (job_ptr, step_ptr->step_id);
 			return SLURM_ERROR;
+		}
+
+		if ((step_specs->resv_port_cnt != (uint16_t) NO_VAL) &&
+		    (step_specs->resv_port_cnt == 0)) {
+			/* reserved port count set to max task count any node */
+			for (i=0; i<step_ptr->step_layout->node_cnt; i++) {
+				step_specs->resv_port_cnt = 
+					MAX(step_specs->resv_port_cnt,
+					    step_ptr->step_layout->tasks[i]);
+			}
+		}
+		if (step_specs->resv_port_cnt != (uint16_t) NO_VAL) {
+			step_ptr->resv_port_cnt = step_specs->resv_port_cnt;
+			i = resv_port_alloc(step_ptr);
+			if (i != SLURM_SUCCESS) {
+				delete_step_record (job_ptr, step_ptr->step_id);
+				return i;
+			}
+		}
+
 		if (switch_alloc_jobinfo (&step_ptr->switch_job) < 0)
 			fatal ("step_create: switch_alloc_jobinfo error");
 		
@@ -1053,17 +1389,10 @@ step_create(job_step_create_request_msg_t *step_specs,
 			return ESLURM_INTERCONNECT_FAILURE;
 		}
 		step_alloc_lps(step_ptr);
-	}
+	} else
+		xfree(step_node_list);
 	if (checkpoint_alloc_jobinfo (&step_ptr->check_job) < 0)
 		fatal ("step_create: checkpoint_alloc_jobinfo error");
-	xfree(step_node_list);
-	if (step_ptr->mem_per_task &&
-	    (select_g_step_begin(step_ptr) != SLURM_SUCCESS)) {
-		error("No memory to allocate step for job %u", job_ptr->job_id);
-		step_ptr->mem_per_task = 0;	/* no memory to be freed */
-		delete_step_record (job_ptr, step_ptr->step_id);
-		return ESLURM_INVALID_TASK_MEMORY;
-	}
 	*new_step_record = step_ptr;
 	jobacct_storage_g_step_start(acct_db_conn, step_ptr);
 	return SLURM_SUCCESS;
@@ -1071,39 +1400,63 @@ step_create(job_step_create_request_msg_t *step_specs,
 
 extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 					       char *step_node_list,
-					       uint16_t node_count,
+					       uint32_t node_count,
 					       uint32_t num_tasks,
 					       uint16_t cpus_per_task,
 					       uint16_t task_dist,
 					       uint32_t plane_size)
 {
-	uint32_t cpus_per_node[node_count];
+	uint16_t cpus_per_node[node_count];
 	uint32_t cpu_count_reps[node_count];
 	int cpu_inx = -1;
-	int usable_cpus = 0, i;
-	int set_nodes = 0, set_cpus = 0;
+	int i, usable_cpus, usable_mem;
+	int set_cpus = 0, set_nodes = 0, set_tasks = 0;
 	int pos = -1;
+	int first_bit, last_bit;
 	struct job_record *job_ptr = step_ptr->job_ptr;
+	select_job_res_t select_ptr = job_ptr->select_job;
+
+	xassert(select_ptr);
+	xassert(select_ptr->cpus);
+	xassert(select_ptr->cpus_used);
+
+	if (step_ptr->mem_per_task &&
+	    ((select_ptr->memory_allocated == NULL) ||
+	     (select_ptr->memory_used == NULL))) {
+		error("step_layout_create: lack memory allocation details "
+		      "to enforce memory limits for job %u", job_ptr->job_id);
+		step_ptr->mem_per_task = 0;
+	}
 
 	/* build the cpus-per-node arrays for the subset of nodes
-	   used by this job step */
-	for (i = 0; i < node_record_count; i++) {
+	 * used by this job step */
+	first_bit = bit_ffs(step_ptr->step_node_bitmap);
+	last_bit  = bit_fls(step_ptr->step_node_bitmap);
+	for (i = first_bit; i <= last_bit; i++) {
 		if (bit_test(step_ptr->step_node_bitmap, i)) {
 			/* find out the position in the job */
-			pos = bit_get_pos_num(job_ptr->node_bitmap, i);
+			pos = bit_get_pos_num(select_ptr->node_bitmap, i);
 			if (pos == -1)
 				return NULL;
+			if (pos >= select_ptr->nhosts)
+				fatal("step_layout_create: node index bad");
 			if (step_ptr->exclusive) {
-				usable_cpus = job_ptr->alloc_lps[pos] -
-					      job_ptr->used_lps[pos];
-				if (usable_cpus < 0) {
-					error("step_layout_create exclusive");
-					return NULL;
-				}
-				usable_cpus = MIN(usable_cpus, 
-						  (num_tasks - set_cpus));
+				usable_cpus = select_ptr->cpus[pos] -
+					      select_ptr->cpus_used[pos];
 			} else
-				usable_cpus = job_ptr->alloc_lps[pos];
+				usable_cpus = select_ptr->cpus[pos];
+			if (step_ptr->mem_per_task) {
+				usable_mem = select_ptr->memory_allocated[pos] -
+					     select_ptr->memory_used[pos];
+				usable_mem /= step_ptr->mem_per_task;
+				if (cpus_per_task > 0)
+					usable_mem *= cpus_per_task;
+				usable_cpus = MIN(usable_cpus, usable_mem);
+			}
+			if (usable_cpus <= 0) {
+				error("step_layout_create no usable cpus");
+				return NULL;
+			}
 			debug3("step_layout cpus = %d pos = %d", 
 			       usable_cpus, pos);
 			
@@ -1117,11 +1470,15 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 				cpu_count_reps[cpu_inx]++;
 			set_nodes++;
 			set_cpus += usable_cpus;
+			if (cpus_per_task > 0)
+				set_tasks += usable_cpus / cpus_per_task;
+			else
+				set_tasks = num_tasks;
 			if (set_nodes == node_count)
 				break;
 		}
 	}
-	
+
 	/* layout the tasks on the nodes */
 	return slurm_step_layout_create(step_node_list,
 					cpus_per_node, cpu_count_reps, 
@@ -1149,7 +1506,7 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer)
 		node_list = step_ptr->job_ptr->nodes;	
 	}
 	pack32(step_ptr->job_ptr->job_id, buffer);
-	pack16(step_ptr->step_id, buffer);
+	pack32(step_ptr->step_id, buffer);
 	pack16(step_ptr->ckpt_interval, buffer);
 	pack32(step_ptr->job_ptr->user_id, buffer);
 	pack32(task_cnt, buffer);
@@ -1165,11 +1522,12 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer)
 	}
 	pack_time(run_time, buffer);
 	packstr(step_ptr->job_ptr->partition, buffer);
+	packstr(step_ptr->resv_ports, buffer);
 	packstr(node_list, buffer);
 	packstr(step_ptr->name, buffer);
 	packstr(step_ptr->network, buffer);
 	pack_bit_fmt(step_ptr->step_node_bitmap, buffer);
-	packstr(step_ptr->ckpt_path, buffer);
+	packstr(step_ptr->ckpt_dir, buffer);
 	
 }
 
@@ -1283,29 +1641,35 @@ extern int pack_ctld_job_step_info_response_msg(uint32_t job_id,
 }
 
 /* 
- * step_on_node - determine if the specified job has any job steps allocated to 
- * 	the specified node 
+ * kill_step_on_node - determine if the specified job has any job steps
+ *	allocated to the specified node and kill them unless no_kill flag
+ *	is set on the step
  * IN job_ptr - pointer to an active job record
  * IN node_ptr - pointer to a node record
- * RET true of job has step on the node, false otherwise 
+ * RET count of killed job steps
  */
-bool step_on_node(struct job_record  *job_ptr, struct node_record *node_ptr)
+extern int kill_step_on_node(struct job_record  *job_ptr, 
+			     struct node_record *node_ptr)
 {
 	ListIterator step_iterator;
 	struct step_record *step_ptr;
-	bool found = false;
+	int found = 0;
 	int bit_position;
 
 	if ((job_ptr == NULL) || (node_ptr == NULL))
-		return false;
+		return found;
 
 	bit_position = node_ptr - node_record_table_ptr;
 	step_iterator = list_iterator_create (job_ptr->step_list);	
 	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
-		if (bit_test(step_ptr->step_node_bitmap, bit_position)) {
-			found = true;
-			break;
-		}
+		if (step_ptr->no_kill ||
+		    (bit_test(step_ptr->step_node_bitmap, bit_position) == 0))
+			continue;
+		info("killing step %u.%u on down node %s", 
+		     job_ptr->job_id, step_ptr->step_id, node_ptr->name);
+		srun_step_complete(step_ptr);
+		signal_step_tasks(step_ptr, SIGKILL);
+		found++;
 	}		
 
 	list_iterator_destroy (step_iterator);
@@ -1353,40 +1717,23 @@ extern int job_step_checkpoint(checkpoint_msg_t *ckpt_ptr,
 		goto reply;
 	}
 
-	bzero((void *)&resp_data, sizeof(checkpoint_resp_msg_t));
-	/* find the individual job step */
-	if (ckpt_ptr->step_id != NO_VAL) {
-		step_ptr = find_step_record(job_ptr, ckpt_ptr->step_id);
-		if (step_ptr == NULL) {
-			rc = ESLURM_INVALID_JOB_ID;
-			goto reply;
-		} else {
-			rc = checkpoint_op(ckpt_ptr->op, ckpt_ptr->data, 
-				(void *)step_ptr, &resp_data.event_time, 
-				&resp_data.error_code, &resp_data.error_msg);
-			last_job_update = time(NULL);
-		}
-	}
-
-	/* operate on all of a job's steps */
-	else {
-		int update_rc = -2;
-		ListIterator step_iterator;
-
-		step_iterator = list_iterator_create (job_ptr->step_list);
-		while ((step_ptr = (struct step_record *) 
-					list_next (step_iterator))) {
-			update_rc = checkpoint_op(ckpt_ptr->op, 
-						  ckpt_ptr->data,
-						  (void *)step_ptr,
-						  &resp_data.event_time,
-						  &resp_data.error_code,
-						  &resp_data.error_msg);
-			rc = MAX(rc, update_rc);
+	memset((void *)&resp_data, 0, sizeof(checkpoint_resp_msg_t));
+	step_ptr = find_step_record(job_ptr, ckpt_ptr->step_id);
+	if (step_ptr == NULL) {
+		rc = ESLURM_INVALID_JOB_ID;
+	} else {
+		if (ckpt_ptr->image_dir == NULL) {
+			ckpt_ptr->image_dir = xstrdup(step_ptr->ckpt_dir);
 		}
-		if (update_rc != -2)	/* some work done */
-			last_job_update = time(NULL);
-		list_iterator_destroy (step_iterator);
+		xstrfmtcat(ckpt_ptr->image_dir, "/%u.%u", job_ptr->job_id, 
+			   step_ptr->step_id);
+
+		rc = checkpoint_op(ckpt_ptr->job_id, ckpt_ptr->step_id, 
+				   step_ptr, ckpt_ptr->op, ckpt_ptr->data,
+				   ckpt_ptr->image_dir, &resp_data.event_time, 
+				   &resp_data.error_code, 
+				   &resp_data.error_msg);
+		last_job_update = time(NULL);
 	}
 
     reply:
@@ -1520,12 +1867,13 @@ extern int job_step_checkpoint_task_comp(checkpoint_task_comp_msg_t *ckpt_ptr,
  * step_partial_comp - Note the completion of a job step on at least
  *	some of its nodes
  * IN req     - step_completion_msg RPC from slurmstepd
+ * IN uid     - UID issuing the request
  * OUT rem    - count of nodes for which responses are still pending
  * OUT max_rc - highest return code for any step thus far
  * RET 0 on success, otherwise ESLURM error code
  */
-extern int step_partial_comp(step_complete_msg_t *req, int *rem, 
-			     uint32_t *max_rc)
+extern int step_partial_comp(step_complete_msg_t *req, uid_t uid, 
+			     int *rem, uint32_t *max_rc)
 {
 	struct job_record *job_ptr;
 	struct step_record *step_ptr;
@@ -1533,13 +1881,29 @@ extern int step_partial_comp(step_complete_msg_t *req, int *rem,
 
 	/* find the job, step, and validate input */
 	job_ptr = find_job_record (req->job_id);
-	if (job_ptr == NULL)
+	if (job_ptr == NULL) {
+		info("step_partial_comp: JobID=%u invalid", req->job_id);
 		return ESLURM_INVALID_JOB_ID;
-	if (job_ptr->job_state == JOB_PENDING)
+	}
+	if (job_ptr->job_state == JOB_PENDING) {
+		info("step_partial_comp: JobID=%u pending", req->job_id);
 		return ESLURM_JOB_PENDING;
+	}
+
+	if ((!validate_super_user(uid)) && (uid != job_ptr->user_id)) {
+		/* Normally from slurmstepd, from srun on some failures */
+		error("Security violation: "
+		      "REQUEST_STEP_COMPLETE RPC for job %u from uid=%u",
+		      job_ptr->job_id, (unsigned int) uid);
+		return ESLURM_USER_ID_MISSING;
+	}
+
 	step_ptr = find_step_record(job_ptr, req->job_step_id);
-	if (step_ptr == NULL)
+	if (step_ptr == NULL) {
+		info("step_partial_comp: StepID=%u.%u invalid", 
+		     req->job_id, req->job_step_id);
 		return ESLURM_INVALID_JOB_ID;
+	}
 	if (step_ptr->batch_step) {
 		if(rem)
 			*rem = 0;
@@ -1555,8 +1919,8 @@ extern int step_partial_comp(step_complete_msg_t *req, int *rem,
 		return SLURM_SUCCESS;
 	}
 	if (req->range_last < req->range_first) {
-		error("step_partial_comp: range: %u-%u", req->range_first, 
-			req->range_last);
+		error("step_partial_comp: JobID=%u range=%u-%u", 
+		      req->job_id, req->range_first, req->range_last);
 		return EINVAL;
 	}
 
@@ -1566,8 +1930,8 @@ extern int step_partial_comp(step_complete_msg_t *req, int *rem,
 		/* initialize the node bitmap for exited nodes */
 		nodes = bit_set_count(step_ptr->step_node_bitmap);
 		if (req->range_last >= nodes) {	/* range is zero origin */
-			error("step_partial_comp: last=%u, nodes=%d",
-				req->range_last, nodes);
+			error("step_partial_comp: JobID=%u last=%u, nodes=%d",
+			      req->job_id, req->range_last, nodes);
 			return EINVAL;
 		}
 		step_ptr->exit_node_bitmap = bit_alloc(nodes);
@@ -1577,8 +1941,8 @@ extern int step_partial_comp(step_complete_msg_t *req, int *rem,
 	} else {
 		nodes = _bitstr_bits(step_ptr->exit_node_bitmap);
 		if (req->range_last >= nodes) {	/* range is zero origin */
-			error("step_partial_comp: last=%u, nodes=%d",
-				req->range_last, nodes);
+			error("step_partial_comp: JobID=%u last=%u, nodes=%d",
+			      req->job_id, req->range_last, nodes);
 			return EINVAL;
 		}
 		step_ptr->exit_code = MAX(step_ptr->exit_code, req->step_rc);
@@ -1784,18 +2148,29 @@ resume_job_step(struct job_record *job_ptr)
  */
 extern void dump_job_step_state(struct step_record *step_ptr, Buf buffer)
 {
-	pack16(step_ptr->step_id, buffer);
+	pack32(step_ptr->step_id, buffer);
 	pack16(step_ptr->cyclic_alloc, buffer);
 	pack16(step_ptr->port, buffer);
 	pack16(step_ptr->ckpt_interval, buffer);
-	pack16(step_ptr->mem_per_task, buffer);
+	pack16(step_ptr->cpus_per_task, buffer);
+	pack16(step_ptr->resv_port_cnt, buffer);
+
+	pack8(step_ptr->no_kill, buffer);
 
+	pack32(step_ptr->cpu_count, buffer);
+	pack32(step_ptr->mem_per_task, buffer);
 	pack32(step_ptr->exit_code, buffer);
 	if (step_ptr->exit_code != NO_VAL) {
 		pack_bit_fmt(step_ptr->exit_node_bitmap, buffer);
 		pack16((uint16_t) _bitstr_bits(step_ptr->exit_node_bitmap), 
 			buffer);
 	}
+	if (step_ptr->core_bitmap_job) {
+		uint32_t core_size = bit_size(step_ptr->core_bitmap_job);
+		pack32(core_size, buffer);
+		pack_bit_fmt(step_ptr->core_bitmap_job, buffer);
+	} else
+		pack32((uint32_t) 0, buffer);
 
 	pack_time(step_ptr->start_time, buffer);
 	pack_time(step_ptr->pre_sus_time, buffer);
@@ -1803,9 +2178,10 @@ extern void dump_job_step_state(struct step_record *step_ptr, Buf buffer)
 	pack_time(step_ptr->ckpt_time, buffer);
 
 	packstr(step_ptr->host,  buffer);
+	packstr(step_ptr->resv_ports, buffer);
 	packstr(step_ptr->name, buffer);
 	packstr(step_ptr->network, buffer);
-	packstr(step_ptr->ckpt_path, buffer);
+	packstr(step_ptr->ckpt_dir, buffer);
 	pack16(step_ptr->batch_step, buffer);
 	if (!step_ptr->batch_step) {
 		pack_slurm_step_layout(step_ptr->step_layout, buffer);
@@ -1815,44 +2191,56 @@ extern void dump_job_step_state(struct step_record *step_ptr, Buf buffer)
 }
 
 /*
- * Create a new job step from data in a buffer (as created by dump_job_step_state)
+ * Create a new job step from data in a buffer (as created by 
+ *	dump_job_step_state)
  * IN/OUT - job_ptr - point to a job for which the step is to be loaded.
- * IN/OUT buffer - location from which to get data, pointers automatically advanced
+ * IN/OUT buffer - location to get data from, pointers advanced
  */
 extern int load_step_state(struct job_record *job_ptr, Buf buffer)
 {
 	struct step_record *step_ptr = NULL;
-	uint16_t step_id, cyclic_alloc, port, batch_step, bit_cnt;
-	uint16_t ckpt_interval, mem_per_task;
-	uint32_t exit_code, name_len;
+	uint8_t no_kill;
+	uint16_t cyclic_alloc, port, batch_step, bit_cnt;
+	uint16_t ckpt_interval, cpus_per_task, resv_port_cnt;
+	uint32_t core_size, cpu_count, exit_code, mem_per_task, name_len;
+	uint32_t step_id;
 	time_t start_time, pre_sus_time, tot_sus_time, ckpt_time;
-	char *host = NULL, *ckpt_path = NULL;
-	char *name = NULL, *network = NULL, *bit_fmt = NULL;
+	char *host = NULL, *ckpt_dir = NULL, *core_job = NULL;
+	char *resv_ports = NULL, *name = NULL, *network = NULL, *bit_fmt = NULL;
 	switch_jobinfo_t switch_tmp = NULL;
 	check_jobinfo_t check_tmp = NULL;
 	slurm_step_layout_t *step_layout = NULL;
 	
-	safe_unpack16(&step_id, buffer);
+	safe_unpack32(&step_id, buffer);
 	safe_unpack16(&cyclic_alloc, buffer);
 	safe_unpack16(&port, buffer);
 	safe_unpack16(&ckpt_interval, buffer);
-	safe_unpack16(&mem_per_task, buffer);
+	safe_unpack16(&cpus_per_task, buffer);
+	safe_unpack16(&resv_port_cnt, buffer);
 
+	safe_unpack8(&no_kill, buffer);
+
+	safe_unpack32(&cpu_count, buffer);
+	safe_unpack32(&mem_per_task, buffer);
 	safe_unpack32(&exit_code, buffer);
 	if (exit_code != NO_VAL) {
 		safe_unpackstr_xmalloc(&bit_fmt, &name_len, buffer);
 		safe_unpack16(&bit_cnt, buffer);
 	}
-	
+	safe_unpack32(&core_size, buffer);
+	if (core_size)
+		safe_unpackstr_xmalloc(&core_job, &name_len, buffer);
+
 	safe_unpack_time(&start_time, buffer);
 	safe_unpack_time(&pre_sus_time, buffer);
 	safe_unpack_time(&tot_sus_time, buffer);
 	safe_unpack_time(&ckpt_time, buffer);
 
 	safe_unpackstr_xmalloc(&host, &name_len, buffer);
+	safe_unpackstr_xmalloc(&resv_ports, &name_len, buffer);
 	safe_unpackstr_xmalloc(&name, &name_len, buffer);
 	safe_unpackstr_xmalloc(&network, &name_len, buffer);
-	safe_unpackstr_xmalloc(&ckpt_path, &name_len, buffer);
+	safe_unpackstr_xmalloc(&ckpt_dir, &name_len, buffer);
 	safe_unpack16(&batch_step, buffer);
 	if (!batch_step) {
 		if (unpack_slurm_step_layout(&step_layout, buffer))
@@ -1871,30 +2259,39 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer)
 		      job_ptr->job_id, step_id, cyclic_alloc);
 		goto unpack_error;
 	}
+	if (no_kill > 1) {
+		error("Invalid data for job %u.%u: no_kill=%u",
+		      job_ptr->job_id, step_id, no_kill);
+		goto unpack_error;
+	}
 
 	step_ptr = find_step_record(job_ptr, step_id);
 	if (step_ptr == NULL)
-		step_ptr = create_step_record(job_ptr);
+		step_ptr = _create_step_record(job_ptr);
 	if (step_ptr == NULL)
 		goto unpack_error;
 
 	/* set new values */
 	step_ptr->step_id      = step_id;
+	step_ptr->cpu_count    = cpu_count;
+	step_ptr->cpus_per_task= cpus_per_task;
 	step_ptr->cyclic_alloc = cyclic_alloc;
+	step_ptr->resv_port_cnt= resv_port_cnt;
+	step_ptr->resv_ports   = resv_ports;
 	step_ptr->name         = name;
 	step_ptr->network      = network;
-	step_ptr->ckpt_path    = ckpt_path;
+	step_ptr->no_kill      = no_kill;
+	step_ptr->ckpt_dir     = ckpt_dir;
 	step_ptr->port         = port;
 	step_ptr->ckpt_interval= ckpt_interval;
 	step_ptr->mem_per_task = mem_per_task;
 	step_ptr->host         = host;
-	step_ptr->batch_step   = batch_step;
 	host                   = NULL;  /* re-used, nothing left to free */
+	step_ptr->batch_step   = batch_step;
 	step_ptr->start_time   = start_time;
 	step_ptr->pre_sus_time = pre_sus_time;
 	step_ptr->tot_sus_time = tot_sus_time;
 	step_ptr->ckpt_time    = ckpt_time;
-	step_ptr->cpus_per_task = 1;	/* Need to save/restore in v1.4 */
 
 	slurm_step_layout_destroy(step_ptr->step_layout);
 	step_ptr->step_layout  = step_layout;
@@ -1916,6 +2313,14 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer)
 		}
 		xfree(bit_fmt);
 	}
+	if (core_size) {
+		step_ptr->core_bitmap_job = bit_alloc(core_size);
+		if (bit_unfmt(step_ptr->core_bitmap_job, core_job)) {
+			error("error recovering core_bitmap_job from %s",
+			      core_job);
+		}
+		xfree(core_job);
+	}
 
 	if (step_ptr->step_layout && step_ptr->step_layout->node_list) {
 		switch_g_job_step_allocated(switch_tmp, 
@@ -1928,10 +2333,12 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer)
 
       unpack_error:
 	xfree(host);
+	xfree(resv_ports);
 	xfree(name);
 	xfree(network);
-	xfree(ckpt_path);
+	xfree(ckpt_dir);
 	xfree(bit_fmt);
+	xfree(core_job);
 	if (switch_tmp)
 		switch_free_jobinfo(switch_tmp);
 	slurm_step_layout_destroy(step_layout);
@@ -1950,6 +2357,7 @@ extern void step_checkpoint(void)
 	time_t event_time;
 	uint32_t error_code;
 	char *error_msg;
+	checkpoint_msg_t ckpt_req;
 
 	/* Exit if "checkpoint/none" is configured */
 	if (ckpt_run == -1) {
@@ -1967,20 +2375,62 @@ extern void step_checkpoint(void)
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		if (job_ptr->job_state != JOB_RUNNING)
 			continue;
+		if (job_ptr->batch_flag &&
+		    (job_ptr->ckpt_interval != 0)) { /* periodic job ckpt */
+			ckpt_due = job_ptr->ckpt_time +
+				   (job_ptr->ckpt_interval * 60);
+			if (ckpt_due > now)
+				continue;
+			/* 
+			 * DO NOT initiate a checkpoint request if the job is
+			 * started just now, in case it is restarting from checkpoint.
+			 */
+			ckpt_due = job_ptr->start_time +
+				   (job_ptr->ckpt_interval * 60);
+			if (ckpt_due > now)
+				continue;
+
+			ckpt_req.op = CHECK_CREATE;
+			ckpt_req.data = 0;
+			ckpt_req.job_id = job_ptr->job_id;
+			ckpt_req.step_id = SLURM_BATCH_SCRIPT;
+			ckpt_req.image_dir = NULL;
+			job_checkpoint(&ckpt_req, getuid(), -1);
+			job_ptr->ckpt_time = now;
+			last_job_update = now;
+			continue; /* ignore periodic step ckpt */
+		}
 		step_iterator = list_iterator_create (job_ptr->step_list);
 		while ((step_ptr = (struct step_record *) 
 				list_next (step_iterator))) {
+			char *image_dir = NULL;
 			if (step_ptr->ckpt_interval == 0)
 				continue;
 			ckpt_due = step_ptr->ckpt_time +
-				(step_ptr->ckpt_interval * 60);
+				   (step_ptr->ckpt_interval * 60);
 			if (ckpt_due > now) 
 				continue;
+			/* 
+			 * DO NOT initiate a checkpoint request if the step is
+			 * started just now, in case it is restarting from 
+			 * checkpoint.
+			 */
+			ckpt_due = step_ptr->start_time + 
+				   (step_ptr->ckpt_interval * 60);
+			if (ckpt_due > now)
+				continue;
+
 			step_ptr->ckpt_time = now;
 			last_job_update = now;
-			(void) checkpoint_op(CHECK_CREATE, 0, 
-				(void *)step_ptr, &event_time, 
-				&error_code, &error_msg);
+			image_dir = xstrdup(step_ptr->ckpt_dir);
+			xstrfmtcat(image_dir, "/%u.%u", job_ptr->job_id, 
+				   step_ptr->step_id);
+			(void) checkpoint_op(job_ptr->job_id, 
+					     step_ptr->step_id,
+					     step_ptr, CHECK_CREATE, 0, 
+					     image_dir, &event_time,
+					     &error_code, &error_msg);
+			xfree(image_dir);
 		}
 		list_iterator_destroy (step_iterator);
 	}
diff --git a/src/slurmctld/topo_plugin.c b/src/slurmctld/topo_plugin.c
new file mode 100644
index 0000000000000000000000000000000000000000..9d88375956218c0b701940033234b214ffb102ce
--- /dev/null
+++ b/src/slurmctld/topo_plugin.c
@@ -0,0 +1,251 @@
+/*****************************************************************************\
+ *  topo_plugin.c - Topology plugin function stup.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include <pthread.h>
+
+#include "src/common/log.h"
+#include "src/common/plugrack.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#if 0
+#include "src/slurmctld/slurmctld.h"
+#endif
+
+
+/* ************************************************************************ */
+/*  TAG(                        slurm_topo_ops_t                         )  */
+/* ************************************************************************ */
+typedef struct slurm_topo_ops {
+	int		(*build_config)		( void );
+} slurm_topo_ops_t;
+
+
+/* ************************************************************************ */
+/*  TAG(                        slurm_topo_contex_t                      )  */
+/* ************************************************************************ */
+typedef struct slurm_topo_context {
+	char	       	*topo_type;
+	plugrack_t     	plugin_list;
+	plugin_handle_t	cur_plugin;
+	int		topo_errno;
+	slurm_topo_ops_t ops;
+} slurm_topo_context_t;
+
+static slurm_topo_context_t	*g_topo_context = NULL;
+static pthread_mutex_t		g_topo_context_lock = PTHREAD_MUTEX_INITIALIZER;
+
+
+/* ************************************************************************ */
+/*  TAG(                       slurm_topo_get_ops                        )  */
+/* ************************************************************************ */
+static slurm_topo_ops_t *
+slurm_topo_get_ops( slurm_topo_context_t *c )
+{
+	/*
+	 * Must be synchronized with slurm_topo_ops_t above.
+	 */
+	static const char *syms[] = {
+		"topo_build_config",
+	};
+	int n_syms = sizeof( syms ) / sizeof( char * );
+
+	/* Find the correct plugin. */
+        c->cur_plugin = plugin_load_and_link(c->topo_type, n_syms, syms,
+					     (void **) &c->ops);
+        if ( c->cur_plugin != PLUGIN_INVALID_HANDLE ) 
+        	return &c->ops;
+
+	error("Couldn't find the specified plugin name for %s "
+	      "looking at all files",
+	      c->topo_type);
+	
+	/* Get plugin list. */
+	if ( c->plugin_list == NULL ) {
+		char *plugin_dir;
+		c->plugin_list = plugrack_create();
+		if ( c->plugin_list == NULL ) {
+			error( "cannot create plugin manager" );
+			return NULL;
+		}
+		plugrack_set_major_type( c->plugin_list, "topo" );
+		plugrack_set_paranoia( c->plugin_list,
+				       PLUGRACK_PARANOIA_NONE,
+				       0 );
+		plugin_dir = slurm_get_plugin_dir();
+		plugrack_read_dir( c->plugin_list, plugin_dir );
+		xfree(plugin_dir);
+	}
+
+	c->cur_plugin = plugrack_use_by_type( c->plugin_list, c->topo_type );
+	if ( c->cur_plugin == PLUGIN_INVALID_HANDLE ) {
+		error( "cannot find topology plugin for %s", c->topo_type );
+		return NULL;
+	}
+
+	/* Dereference the API. */
+	if ( plugin_get_syms( c->cur_plugin,
+			      n_syms,
+			      syms,
+			      (void **) &c->ops ) < n_syms ) {
+		error( "incomplete topology plugin detected" );
+		return NULL;
+	}
+
+	return &c->ops;
+}
+
+
+/* ************************************************************************ */
+/*  TAG(                  slurm_topo_context_create                      )  */
+/* ************************************************************************ */
+static slurm_topo_context_t *
+slurm_topo_context_create( const char *topo_type )
+{
+	slurm_topo_context_t *c;
+
+	if ( topo_type == NULL ) {
+		debug3( "slurm_topo_context:  no topology type" );
+		return NULL;
+	}
+
+	c = xmalloc( sizeof( slurm_topo_context_t ) );
+	c->topo_type	= xstrdup( topo_type );
+	c->plugin_list	= NULL;
+	c->cur_plugin	= PLUGIN_INVALID_HANDLE;
+	c->topo_errno 	= SLURM_SUCCESS;
+
+	return c;
+}
+
+
+/* ************************************************************************ */
+/*  TAG(                  slurm_topo_context_destroy                     )  */
+/* ************************************************************************ */
+static int
+slurm_topo_context_destroy( slurm_topo_context_t *c )
+{
+	/*
+	 * Must check return code here because plugins might still
+	 * be loaded and active.
+	 */
+	if ( c->plugin_list ) {
+		if ( plugrack_destroy( c->plugin_list ) != SLURM_SUCCESS ) {
+			return SLURM_ERROR;
+		}
+	} else {
+		plugin_unload(c->cur_plugin);
+	}
+
+	xfree( c->topo_type );
+	xfree( c );
+
+	return SLURM_SUCCESS;
+}
+
+
+/* *********************************************************************** */
+/*  TAG(                        slurm_topo_init                         )  */
+/*                                                                         */
+/*  NOTE: The topology plugin can not be changed via reconfiguration       */
+/*        due to background threads, job priorities, etc. Slurmctld must   */
+/*        be restarted  and job priority changes may be required to change */
+/*        the topology type.                                               */
+/* *********************************************************************** */
+extern int
+slurm_topo_init( void )
+{
+	int retval = SLURM_SUCCESS;
+	char *topo_type = NULL;
+	
+	slurm_mutex_lock( &g_topo_context_lock );
+
+	if ( g_topo_context )
+		goto done;
+
+	topo_type = slurm_get_topology_plugin();
+	g_topo_context = slurm_topo_context_create( topo_type );
+	if ( g_topo_context == NULL ) {
+		error( "cannot create topology context for %s",
+			 topo_type );
+		retval = SLURM_ERROR;
+		goto done;
+	}
+
+	if ( slurm_topo_get_ops( g_topo_context ) == NULL ) {
+		error( "cannot resolve topology plugin operations" );
+		slurm_topo_context_destroy( g_topo_context );
+		g_topo_context = NULL;
+		retval = SLURM_ERROR;
+	}
+
+ done:
+	slurm_mutex_unlock( &g_topo_context_lock );
+	xfree(topo_type);
+	return retval;
+}
+
+/* *********************************************************************** */
+/*  TAG(                        slurm_topo_fini                         )  */
+/* *********************************************************************** */
+extern int
+slurm_topo_fini( void )
+{
+	int rc;
+
+	if (!g_topo_context)
+		return SLURM_SUCCESS;
+
+	rc = slurm_topo_context_destroy(g_topo_context);
+	g_topo_context = NULL;
+	return rc;
+}
+
+
+/* *********************************************************************** */
+/*  TAG(                      slurm_topo_build_config                   )  */
+/* *********************************************************************** */
+extern int
+slurm_topo_build_config( void )
+{
+	if ( slurm_topo_init() < 0 )
+		return SLURM_ERROR;
+
+	return (*(g_topo_context->ops.build_config))();
+}
+
diff --git a/src/slurmctld/topo_plugin.h b/src/slurmctld/topo_plugin.h
new file mode 100644
index 0000000000000000000000000000000000000000..aab14d5c5dfec972e872c3a708e6ba7081315460
--- /dev/null
+++ b/src/slurmctld/topo_plugin.h
@@ -0,0 +1,71 @@
+/*****************************************************************************\
+ *  topo_plugin.h - Define topology plugin functions.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __SLURM_CONTROLLER_TOPO_PLUGIN_API_H__
+#define __SLURM_CONTROLLER_TOPO_PLUGIN_API_H__
+
+#include <slurm/slurm.h>
+#include <src/slurmctld/slurmctld.h>
+
+/*
+ * Initialize the topology plugin.
+ *
+ * Returns a SLURM errno.
+ */
+int slurm_topo_init( void );
+
+/*
+ * Terminate the topology plugin.
+ * 
+ * Returns a SLURM errno.
+ */
+extern int slurm_topo_fini(void);
+
+/*
+ **************************************************************************
+ *                          P L U G I N   C A L L S                       *
+ **************************************************************************
+ */
+
+/*
+ * slurm_topo_build_config - build or rebuild system topology information
+ *	after a system startup or reconfiguration.
+ */
+int slurm_topo_build_config( void );
+
+#endif /*__SLURM_CONTROLLER_TOPO_PLUGIN_API_H__*/
diff --git a/src/slurmctld/trigger_mgr.c b/src/slurmctld/trigger_mgr.c
index f9ae9f3991989fe5df73df140ffdf5f10734a9ce..142e4eeb20b3dcf4a6fe2ae435c4d461f9f0981c 100644
--- a/src/slurmctld/trigger_mgr.c
+++ b/src/slurmctld/trigger_mgr.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -61,17 +62,11 @@
 #include "src/slurmctld/state_save.h"
 #include "src/slurmctld/trigger_mgr.h"
 
-#define _DEBUG 0
 #define MAX_PROG_TIME 300	/* maximum run time for program */
 
 /* Change TRIGGER_STATE_VERSION value when changing the state save format */
 #define TRIGGER_STATE_VERSION      "VER002"
 
-/* TRIG_IS_JOB_FINI differs from IS_JOB_FINISHED by considering 
- * completing jobs as not really finished */
-#define TRIG_IS_JOB_FINI(_X)             \
-        (IS_JOB_FINISHED(_X) && ((_X->job_state & JOB_COMPLETING) == 0))
-
 List trigger_list;
 uint32_t next_trigger_id = 1;
 static pthread_mutex_t trigger_mutex = PTHREAD_MUTEX_INITIALIZER;
@@ -140,7 +135,6 @@ static char *_trig_type(uint16_t trig_type)
 		return "unknown";
 }
 
-#if _DEBUG
 static int _trig_offset(uint16_t offset)
 {
 	static int rc;
@@ -153,6 +147,9 @@ static void _dump_trigger_msg(char *header, trigger_info_msg_t *msg)
 {
 	int i;
 
+	if ((slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) == 0)
+		return;
+
 	info(header);
 	if ((msg == NULL) || (msg->record_count == 0)) {
 		info("Trigger has no entries");
@@ -171,11 +168,6 @@ static void _dump_trigger_msg(char *header, trigger_info_msg_t *msg)
 			msg->trigger_array[i].program);
 	}
 }
-#else
-static void _dump_trigger_msg(char *header, trigger_info_msg_t *msg)
-{
-}
-#endif
 
 /* Validate trigger program */
 static bool _validate_trigger(trig_mgr_info_t *trig_in)
@@ -495,16 +487,16 @@ static int _load_trigger_state(Buf buffer)
 	if (trig_ptr->res_type == TRIGGER_RES_TYPE_JOB) {
 		trig_ptr->job_id = (uint32_t) atol(trig_ptr->res_id);
 		trig_ptr->job_ptr = find_job_record(trig_ptr->job_id);
-		if ((trig_ptr->job_id == 0)
-		||  (trig_ptr->job_ptr == NULL)
-		||  (TRIG_IS_JOB_FINI(trig_ptr->job_ptr)))
+		if ((trig_ptr->job_id == 0)     ||
+		    (trig_ptr->job_ptr == NULL) ||
+		    (IS_JOB_COMPLETED(trig_ptr->job_ptr)))
 			goto unpack_error;
 	} else {
 		trig_ptr->job_id = 0;
 		trig_ptr->job_ptr = NULL;
-		if ((trig_ptr->res_id != NULL)
-		&&  (trig_ptr->res_id[0] != '*')
-		&&  (node_name2bitmap(trig_ptr->res_id, false,
+		if ((trig_ptr->res_id != NULL)   &&
+		    (trig_ptr->res_id[0] != '*') &&
+		    (node_name2bitmap(trig_ptr->res_id, false,
 				&trig_ptr->nodes_bitmap) != 0))
 			goto unpack_error;
 	}
@@ -528,6 +520,7 @@ unpack_error:
 }
 extern int trigger_state_save(void)
 {
+	/* Save high-water mark to avoid buffer growth with copies */
 	static int high_buffer_size = (1024 * 1024);
 	int error_code = 0, log_fd;
 	char *old_file, *new_file, *reg_file;
@@ -684,21 +677,21 @@ static void _trigger_job_event(trig_mgr_info_t *trig_in, time_t now)
 
 	if ((trig_in->trig_type & TRIGGER_TYPE_FINI)
 	&&  ((trig_in->job_ptr == NULL) ||
-	     (TRIG_IS_JOB_FINI(trig_in->job_ptr)))) {
+	     (IS_JOB_COMPLETED(trig_in->job_ptr)))) {
 		trig_in->state = 1;
 		trig_in->trig_time = now + (trig_in->trig_time - 0x8000);
-#if _DEBUG
-		info("trigger[%u] event for job %u fini",
-			trig_in->trig_id, trig_in->job_id);
-#endif
+		if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+			info("trigger[%u] event for job %u fini",
+				trig_in->trig_id, trig_in->job_id);
+		}
 		return;
 	}
 
 	if (trig_in->job_ptr == NULL) {
-#if _DEBUG
-		info("trigger[%u] for defunct job %u",
-			trig_in->trig_id, trig_in->job_id);
-#endif
+		if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+			info("trigger[%u] for defunct job %u",
+				trig_in->trig_id, trig_in->job_id);
+		}
 		trig_in->state = 2;
 		trig_in->trig_time = now;
 		return;
@@ -709,10 +702,10 @@ static void _trigger_job_event(trig_mgr_info_t *trig_in, time_t now)
 		if (rem_time <= (0x8000 - trig_in->trig_time)) {
 			trig_in->state = 1;
 			trig_in->trig_time = now;
-#if _DEBUG
-			info("trigger[%u] for job %u time",
-				trig_in->trig_id, trig_in->job_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for job %u time",
+					trig_in->trig_id, trig_in->job_id);
+			}
 			return;
 		}
 	}
@@ -721,10 +714,10 @@ static void _trigger_job_event(trig_mgr_info_t *trig_in, time_t now)
 		if (trigger_down_nodes_bitmap
 		&&  bit_overlap(trig_in->job_ptr->node_bitmap, 
 				trigger_down_nodes_bitmap)) {
-#if _DEBUG
-			info("trigger[%u] for job %u down",
-				trig_in->trig_id, trig_in->job_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for job %u down",
+					trig_in->trig_id, trig_in->job_id);
+			}
 			trig_in->state = 1;
 			trig_in->trig_time = now + 
 					(trig_in->trig_time - 0x8000);
@@ -736,10 +729,10 @@ static void _trigger_job_event(trig_mgr_info_t *trig_in, time_t now)
 		if (trigger_fail_nodes_bitmap
 		&&  bit_overlap(trig_in->job_ptr->node_bitmap, 
 				trigger_fail_nodes_bitmap)) {
-#if _DEBUG
-			info("trigger[%u] for job %u node fail",
-				trig_in->trig_id, trig_in->job_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for job %u node fail",
+					trig_in->trig_id, trig_in->job_id);
+			}
 			trig_in->state = 1;
 			trig_in->trig_time = now + 
 					(trig_in->trig_time - 0x8000);
@@ -754,10 +747,10 @@ static void _trigger_job_event(trig_mgr_info_t *trig_in, time_t now)
 			trig_in->state = 1;
 			trig_in->trig_time = now + 
 					(0x8000 - trig_in->trig_time);
-#if _DEBUG
-			info("trigger[%u] for job %u up",
-				trig_in->trig_id, trig_in->job_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for job %u up",
+					trig_in->trig_id, trig_in->job_id);
+			}
 			return;
 		}
 	}
@@ -769,9 +762,8 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 	&&   trigger_block_err) {
 		trig_in->state = 1;
 		trig_in->trig_time = now + (trig_in->trig_time - 0x8000);
-#if _DEBUG
-		info("trigger[%u] for block_err", trig_in->trig_id);
-#endif
+		if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS)
+			info("trigger[%u] for block_err", trig_in->trig_id);
 		return;
 	}
 
@@ -795,10 +787,10 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 		if (trig_in->state == 1) {
 			trig_in->trig_time = now + 
 					(trig_in->trig_time - 0x8000);
-#if _DEBUG
-			info("trigger[%u] for node %s down",
-				trig_in->trig_id, trig_in->res_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for node %s down",
+					trig_in->trig_id, trig_in->res_id);
+			}
 			return;
 		}
 	}
@@ -823,10 +815,10 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 		if (trig_in->state == 1) {
 			trig_in->trig_time = now + 
 					(trig_in->trig_time - 0x8000);
-#if _DEBUG
-			info("trigger[%u] for node %s drained",
-				trig_in->trig_id, trig_in->res_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for node %s drained",
+					trig_in->trig_id, trig_in->res_id);
+			}
 			return;
 		}
 	}
@@ -851,10 +843,10 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 		if (trig_in->state == 1) {
 			trig_in->trig_time = now + 
 					(trig_in->trig_time - 0x8000);
-#if _DEBUG
-			info("trigger[%u] for node %s fail",
-				trig_in->trig_id, trig_in->res_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for node %s fail",
+					trig_in->trig_id, trig_in->res_id);
+			}
 			return;
 		}
 	}
@@ -893,10 +885,10 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 		bit_free(trigger_idle_node_bitmap);
 		if (trig_in->state == 1) {
 			trig_in->trig_time = now;
-#if _DEBUG
-			info("trigger[%u] for node %s idle",
-				trig_in->trig_id, trig_in->res_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for node %s idle",
+					trig_in->trig_id, trig_in->res_id);
+			}
 			return;
 		}
 	}
@@ -921,10 +913,10 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 		if (trig_in->state == 1) {
 			trig_in->trig_time = now + 
 					(trig_in->trig_time - 0x8000);
-#if _DEBUG
-			info("trigger[%u] for node %s up",
-				trig_in->trig_id, trig_in->res_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("trigger[%u] for node %s up",
+					trig_in->trig_id, trig_in->res_id);
+			}
 			return;
 		}
 	}
@@ -935,9 +927,8 @@ static void _trigger_node_event(trig_mgr_info_t *trig_in, time_t now)
 		trig_in->trig_time = now + (trig_in->trig_time - 0x8000);
 		xfree(trig_in->res_id);
 		trig_in->res_id = xstrdup("reconfig");
-#if _DEBUG
-		info("trigger[%u] for reconfig", trig_in->trig_id);
-#endif
+		if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS)
+			info("trigger[%u] for reconfig", trig_in->trig_id);
 		return;
 	}
 }
@@ -1049,13 +1040,13 @@ extern void trigger_process(void)
 		}
 		if ((trig_in->state == 1) &&
 		    (trig_in->trig_time <= now)) {
-#if _DEBUG
-			info("launching program for trigger[%u]",
-				trig_in->trig_id);
-			info("  uid=%u gid=%u program=%s arg=%s", 
-				trig_in->user_id, trig_in->group_id,
-				trig_in->program, trig_in->res_id);
-#endif
+			if (slurm_get_debug_flags() & DEBUG_FLAG_TRIGGERS) {
+				info("launching program for trigger[%u]",
+					trig_in->trig_id);
+				info("  uid=%u gid=%u program=%s arg=%s", 
+					trig_in->user_id, trig_in->group_id,
+					trig_in->program, trig_in->res_id);
+			}
 			trig_in->state = 2;
 			trig_in->trig_time = now;
 			state_change = true;
@@ -1082,9 +1073,11 @@ extern void trigger_process(void)
 			}
 
 			if (trig_in->group_id == 0) {
-#if _DEBUG
-				info("purging trigger[%u]", trig_in->trig_id);
-#endif
+				if (slurm_get_debug_flags() & 
+				    DEBUG_FLAG_TRIGGERS) {
+					info("purging trigger[%u]", 
+					     trig_in->trig_id);
+				}
 				list_delete_item(trig_iter);
 				state_change = true;
 			}
diff --git a/src/slurmctld/trigger_mgr.h b/src/slurmctld/trigger_mgr.h
index 69986d14346879ff788e655b88f4217014f6f5ba..f9a61cccd8114a644eddfebf6479ddf3b5bf33e5 100644
--- a/src/slurmctld/trigger_mgr.h
+++ b/src/slurmctld/trigger_mgr.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/Makefile.in b/src/slurmd/Makefile.in
index 6e86126449728fdde4d2ee29f56f5c303058ccd3..2af123cc55289c471524a23f1a20006faf606642 100644
--- a/src/slurmd/Makefile.in
+++ b/src/slurmd/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -89,6 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/slurmd/common/proctrack.c b/src/slurmd/common/proctrack.c
index ec47637f38a0ece778fd8572a9b22156527a1ce6..796f4aec4c3dbd51037b7b9d89a70015952479d0 100644
--- a/src/slurmd/common/proctrack.c
+++ b/src/slurmd/common/proctrack.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/common/proctrack.h b/src/slurmd/common/proctrack.h
index 35325770e7bc9b1e3854e9bdea40299e6c2faded..61ac54ab044ea46ba9b1554fb7e1e42ae59136c4 100644
--- a/src/slurmd/common/proctrack.h
+++ b/src/slurmd/common/proctrack.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/common/reverse_tree.h b/src/slurmd/common/reverse_tree.h
index a406db9a3b532277351f1934dbd982a139c64b89..7703c904436239a22d2e9e37011c14422030b251 100644
--- a/src/slurmd/common/reverse_tree.h
+++ b/src/slurmd/common/reverse_tree.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/common/run_script.c b/src/slurmd/common/run_script.c
index 7e848ff50905ef9772a3be7b37c55431528dcee1..05d139b3ed02116b76ea7c01a9c49f51d202261b 100644
--- a/src/slurmd/common/run_script.c
+++ b/src/slurmd/common/run_script.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/common/run_script.h b/src/slurmd/common/run_script.h
index 837d4dce6200f859c666cc736144645418ab3a33..dbab911af7bd2c9ce912f21f63cdf55aa150a618 100644
--- a/src/slurmd/common/run_script.h
+++ b/src/slurmd/common/run_script.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/common/set_oomadj.c b/src/slurmd/common/set_oomadj.c
new file mode 100644
index 0000000000000000000000000000000000000000..26d0a87cdc4ec21c46b61d59747c4725846db759
--- /dev/null
+++ b/src/slurmd/common/set_oomadj.c
@@ -0,0 +1,70 @@
+/*****************************************************************************\
+ *  set_oomadj.c - prevent slurmd/slurmstepd from being killed by the
+ *	kernel OOM killer
+ *****************************************************************************
+ *  Written by Hongjia Cao, National University of Defense Technology, China.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include "src/common/log.h"
+
+extern int set_oom_adj(int adj)
+{
+	int fd;
+	char oom_adj[16];
+
+	fd = open("/proc/self/oom_adj", O_WRONLY);
+	if (fd < 0) {
+		if (errno == ENOENT)
+			debug("failed to open /proc/self/oom_adj: %m");
+		else
+			error("failed to open /proc/self/oom_adj: %m");
+		return -1;
+	}
+	if (snprintf(oom_adj, 16, "%d", adj) >= 16) {
+		return -1;
+	}
+	while ((write(fd, oom_adj, strlen(oom_adj)) < 0) && (errno == EINTR))
+		;
+	close(fd);
+
+	return 0;
+}
+
+
diff --git a/src/slurmd/common/set_oomadj.h b/src/slurmd/common/set_oomadj.h
new file mode 100644
index 0000000000000000000000000000000000000000..c725900b4a1ccd2a9e0a13bbce1bde38d16b003d
--- /dev/null
+++ b/src/slurmd/common/set_oomadj.h
@@ -0,0 +1,47 @@
+/*****************************************************************************\
+ *  set_oomadj.h - prevent slurmd/slurmstepd from being killed by the
+ *	kernel OOM killer
+ *****************************************************************************
+ *  Written by Hongjia Cao, National University of Defense Technology, China.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _SET_OOMADJ_H
+#define _SET_OOMADJ_H
+
+/* from linux/mm.h */
+#define OOM_DISABLE (-17)
+
+extern int set_oom_adj(int adj);
+
+#endif /* _SET_OOMADJ_H */
+
diff --git a/src/slurmd/common/setproctitle.c b/src/slurmd/common/setproctitle.c
index 646e9e5586018ccba1ccdceb1c18016f8d66f371..441ee6f3026a5e19c41eb690a72a1362cd233b81 100644
--- a/src/slurmd/common/setproctitle.c
+++ b/src/slurmd/common/setproctitle.c
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  * src/slurmd/common/setproctitle.c - argv manipulation 
- * $Id: setproctitle.c 13672 2008-03-19 23:10:58Z jette $
+ * $Id: setproctitle.c 17276 2009-04-17 17:03:49Z jette $
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -307,3 +309,18 @@ init_setproctitle(int argc, char *argv[])
 #endif /* PS_USE_CLOBBER_ARGV */
 }
 
+/* Free memory allocated by init_setproctitle.
+ * Used to verify that all allocated memory gets freed */
+void fini_setproctitle(void)
+{
+#if SETPROCTITLE_STRATEGY == PS_USE_CLOBBER_ARGV
+	int i;
+
+	for (i = 0; environ[i] != NULL; i++) {
+		free(environ[i]);
+	}
+	free(environ);
+	environ = (char **) NULL;
+#endif /* PS_USE_CLOBBER_ARGV */
+}
+
diff --git a/src/slurmd/common/setproctitle.h b/src/slurmd/common/setproctitle.h
index 30caef88696d148288f7f4353be91e0d1f3d8ea4..a9e20a7f2545481c19b3a7abeec3881b32a4b136 100644
--- a/src/slurmd/common/setproctitle.h
+++ b/src/slurmd/common/setproctitle.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/slurmd/common/setproctitle.h - Emulation of BSD setproctitle()
- * $Id: setproctitle.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: setproctitle.h 17276 2009-04-17 17:03:49Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -46,6 +47,7 @@
 #ifndef HAVE_SETPROCTITLE
 void setproctitle(const char *fmt, ...);
 void init_setproctitle(int argc, char *argv[]);
+void fini_setproctitle(void);
 #endif
 
 #endif /* _BSD_SETPROCTITLE_H */
diff --git a/src/slurmd/common/slurmstepd_init.c b/src/slurmd/common/slurmstepd_init.c
index 81cfd21beac02fbd8ccfd34cbebec3b2427fcd03..463241c4625fc02edf8709a48fa7c19954335a9b 100644
--- a/src/slurmd/common/slurmstepd_init.c
+++ b/src/slurmd/common/slurmstepd_init.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -55,7 +56,7 @@ extern void pack_slurmd_conf_lite(slurmd_conf_t *conf, Buf buffer)
 	pack32(conf->daemonize, buffer);
 	pack32((uint32_t)conf->slurm_user_id, buffer);
 	pack16(conf->use_pam, buffer);
-	pack16(conf->use_cpusets, buffer);
+	pack16(conf->task_plugin_param, buffer);
 }
 
 extern int unpack_slurmd_conf_lite_no_alloc(slurmd_conf_t *conf, Buf buffer)
@@ -80,7 +81,7 @@ extern int unpack_slurmd_conf_lite_no_alloc(slurmd_conf_t *conf, Buf buffer)
 	safe_unpack32(&uint32_tmp, buffer);
 	conf->slurm_user_id = (uid_t)uint32_tmp;
 	safe_unpack16(&conf->use_pam, buffer);
-	safe_unpack16(&conf->use_cpusets, buffer);
+	safe_unpack16(&conf->task_plugin_param, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
diff --git a/src/slurmd/common/slurmstepd_init.h b/src/slurmd/common/slurmstepd_init.h
index 0499d17a0f26ebe47ac98456e010503ba4456818..046dd34f5aa768150b18e67371c4a05546c18542 100644
--- a/src/slurmd/common/slurmstepd_init.h
+++ b/src/slurmd/common/slurmstepd_init.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/common/task_plugin.c b/src/slurmd/common/task_plugin.c
index 63ec388a62318ace707474d6c182dedcd32c6f3f..6fd9a09df1e96314d88b3beacd989d4eb0f752a0 100644
--- a/src/slurmd/common/task_plugin.c
+++ b/src/slurmd/common/task_plugin.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
- *  task_plugin.h - task launch plugin stub.
+ *  task_plugin.c - task launch plugin stub.
  *****************************************************************************
  *  Copyright (C) 2005-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -47,6 +48,8 @@
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
 typedef struct slurmd_task_ops {
+	int	(*slurmd_batch_request)		(uint32_t job_id, 
+						 batch_job_launch_msg_t *req);
 	int	(*slurmd_launch_request)	(uint32_t job_id, 
 						 launch_tasks_request_msg_t *req,
 						 uint32_t node_id);
@@ -81,6 +84,7 @@ _slurmd_task_get_ops(slurmd_task_context_t *c)
 	 * Must be synchronized with slurmd_task_ops_t above.
 	 */
 	static const char *syms[] = {
+		"task_slurmd_batch_request",
 		"task_slurmd_launch_request",
 		"task_slurmd_reserve_resources",
 		"task_slurmd_suspend_job",
@@ -232,6 +236,19 @@ extern int slurmd_task_fini(void)
 	return rc;
 }
 
+/*
+ * Slurmd has received a batch job launch request.
+ *
+ * RET - slurm error code
+ */
+extern int slurmd_batch_request(uint32_t job_id, batch_job_launch_msg_t *req)
+{
+	if (slurmd_task_init())
+		return SLURM_ERROR;
+
+	return (*(g_task_context->ops.slurmd_batch_request))(job_id, req);
+}
+
 /*
  * Slurmd has received a launch request.
  *
diff --git a/src/slurmd/common/task_plugin.h b/src/slurmd/common/task_plugin.h
index 121876569de2e577f7b4507ec9fd0ce4b82e6529..228fdf276a64cb13d338f0b610b99267c0973cbe 100644
--- a/src/slurmd/common/task_plugin.h
+++ b/src/slurmd/common/task_plugin.h
@@ -2,13 +2,14 @@
  *  task_plugin.h - Define plugin functions for task pre_launch and post_term.
  *****************************************************************************
  *  Copyright (C) 2005-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -61,6 +62,13 @@ extern int slurmd_task_fini(void);
  **************************************************************************
  */
 
+/*
+ * Slurmd has received a batch job launch request.
+ *
+ * RET - slurm error code
+ */
+extern int slurmd_batch_request(uint32_t job_id, batch_job_launch_msg_t *req);
+
 /*
  * Slurmd has received a launch request.
  *
diff --git a/src/slurmd/slurmd/Makefile.am b/src/slurmd/slurmd/Makefile.am
index 0e1a14502ab16b2f0239f78b8944e1817ba03f59..26c533f91789fa9c24c73e5eff5d8374bb44ad2c 100644
--- a/src/slurmd/slurmd/Makefile.am
+++ b/src/slurmd/slurmd/Makefile.am
@@ -30,6 +30,8 @@ SLURMD_SOURCES = \
 	$(top_builddir)/src/slurmd/common/run_script.h \
 	$(top_builddir)/src/slurmd/common/task_plugin.c \
 	$(top_builddir)/src/slurmd/common/task_plugin.h \
+	$(top_builddir)/src/slurmd/common/set_oomadj.c \
+	$(top_builddir)/src/slurmd/common/set_oomadj.h \
 	$(top_builddir)/src/slurmd/common/reverse_tree.h
 
 slurmd_SOURCES = $(SLURMD_SOURCES)
diff --git a/src/slurmd/slurmd/Makefile.in b/src/slurmd/slurmd/Makefile.in
index aac7cbffa933a29e22f9cdb305d35ff3c330edb8..9d171ba24b4b719c7491a7398586a71627e31c8d 100644
--- a/src/slurmd/slurmd/Makefile.in
+++ b/src/slurmd/slurmd/Makefile.in
@@ -45,14 +45,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -76,7 +80,7 @@ am__objects_1 = slurmd.$(OBJEXT) req.$(OBJEXT) get_mach_stat.$(OBJEXT) \
 	read_proc.$(OBJEXT) reverse_tree_math.$(OBJEXT) xcpu.$(OBJEXT) \
 	proctrack.$(OBJEXT) setproctitle.$(OBJEXT) \
 	slurmstepd_init.$(OBJEXT) run_script.$(OBJEXT) \
-	task_plugin.$(OBJEXT)
+	task_plugin.$(OBJEXT) set_oomadj.$(OBJEXT)
 am_slurmd_OBJECTS = $(am__objects_1)
 slurmd_OBJECTS = $(am_slurmd_OBJECTS)
 am__DEPENDENCIES_1 =
@@ -112,6 +116,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -295,6 +303,8 @@ SLURMD_SOURCES = \
 	$(top_builddir)/src/slurmd/common/run_script.h \
 	$(top_builddir)/src/slurmd/common/task_plugin.c \
 	$(top_builddir)/src/slurmd/common/task_plugin.h \
+	$(top_builddir)/src/slurmd/common/set_oomadj.c \
+	$(top_builddir)/src/slurmd/common/set_oomadj.h \
 	$(top_builddir)/src/slurmd/common/reverse_tree.h
 
 slurmd_SOURCES = $(SLURMD_SOURCES)
@@ -380,6 +390,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/req.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reverse_tree_math.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/run_script.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/set_oomadj.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setproctitle.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmd.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmstepd_init.Po@am__quote@
@@ -477,6 +488,20 @@ task_plugin.obj: $(top_builddir)/src/slurmd/common/task_plugin.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_plugin.obj `if test -f '$(top_builddir)/src/slurmd/common/task_plugin.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/task_plugin.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/task_plugin.c'; fi`
 
+set_oomadj.o: $(top_builddir)/src/slurmd/common/set_oomadj.c
+@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT set_oomadj.o -MD -MP -MF $(DEPDIR)/set_oomadj.Tpo -c -o set_oomadj.o `test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/set_oomadj.c
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/set_oomadj.Tpo $(DEPDIR)/set_oomadj.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/set_oomadj.c' object='set_oomadj.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o set_oomadj.o `test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/set_oomadj.c
+
+set_oomadj.obj: $(top_builddir)/src/slurmd/common/set_oomadj.c
+@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT set_oomadj.obj -MD -MP -MF $(DEPDIR)/set_oomadj.Tpo -c -o set_oomadj.obj `if test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/set_oomadj.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/set_oomadj.c'; fi`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/set_oomadj.Tpo $(DEPDIR)/set_oomadj.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/set_oomadj.c' object='set_oomadj.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o set_oomadj.obj `if test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/set_oomadj.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/set_oomadj.c'; fi`
+
 mostlyclean-libtool:
 	-rm -f *.lo
 
diff --git a/src/slurmd/slurmd/get_mach_stat.c b/src/slurmd/slurmd/get_mach_stat.c
index a082be7a2ed2c0dc029856ef264a0b94833a91b1..c5c83f145a64d67b9c61242eb42ace34a8e6b906 100644
--- a/src/slurmd/slurmd/get_mach_stat.c
+++ b/src/slurmd/slurmd/get_mach_stat.c
@@ -10,10 +10,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmd/get_mach_stat.h b/src/slurmd/slurmd/get_mach_stat.h
index 586c7e5345a4a3e9dec80385395cf452de596764..1fcb78121066ea5043273f130691936579221219 100644
--- a/src/slurmd/slurmd/get_mach_stat.h
+++ b/src/slurmd/slurmd/get_mach_stat.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmd/read_proc.c b/src/slurmd/slurmd/read_proc.c
index a0169fef59c633a6bb1bf94642064f88479aa20d..2d46a8c77eacb2fda9b1df6893b5425731b08705 100644
--- a/src/slurmd/slurmd/read_proc.c
+++ b/src/slurmd/slurmd/read_proc.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index cadf2417ea7f11c6c14ce16814ffd7eaf1fcfe6a..ff8c8dc9255b9b6e053aad3aa5ad1bcd6d6715b8 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  src/slurmd/slurmd/req.c - slurmd request handling
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -101,7 +102,7 @@ typedef struct {
 
 static int  _abort_job(uint32_t job_id);
 static int  _abort_step(uint32_t job_id, uint32_t step_id);
-static char ** _build_env(uint32_t jobid, uid_t uid, char *bg_part_id);
+static char ** _build_env(uint32_t jobid, uid_t uid, char *resv_id);
 static void _delay_rpc(int host_inx, int host_cnt, int usec_per_rpc);
 static void _destroy_env(char **env);
 static bool _slurm_authorized_user(uid_t uid);
@@ -132,8 +133,8 @@ static int  _rpc_health_check(slurm_msg_t *);
 static int  _rpc_step_complete(slurm_msg_t *msg);
 static int  _rpc_stat_jobacct(slurm_msg_t *msg);
 static int  _rpc_daemon_status(slurm_msg_t *msg);
-static int  _run_prolog(uint32_t jobid, uid_t uid, char *bg_part_id);
-static int  _run_epilog(uint32_t jobid, uid_t uid, char *bg_part_id);
+static int  _run_prolog(uint32_t jobid, uid_t uid, char *resv_id);
+static int  _run_epilog(uint32_t jobid, uid_t uid, char *resv_id);
 
 static bool _pause_for_job_completion(uint32_t jobid, char *nodes, 
 		int maxtime);
@@ -543,7 +544,7 @@ _forkexec_slurmstepd(slurmd_step_type_t type, void *req,
 		}
 		if (read(to_slurmd[0], &rc, sizeof(int)) != sizeof(int)) {
 			error("Error reading return code message "
-			      " from slurmstepd: %m");
+			      "from slurmstepd: %m");
 			rc = SLURM_FAILURE;
 		}
 
@@ -560,8 +561,14 @@ _forkexec_slurmstepd(slurmd_step_type_t type, void *req,
 		char slurm_stepd_path[MAXPATHLEN];
 		char *const argv[2] = { slurm_stepd_path, NULL};
 		int failed = 0;
-		snprintf(slurm_stepd_path, sizeof(slurm_stepd_path),
-			 "%s/sbin/slurmstepd", SLURM_PREFIX);
+		if (conf->stepd_loc) {
+			snprintf(slurm_stepd_path, sizeof(slurm_stepd_path),
+				 "%s", conf->stepd_loc);
+		} else {
+			snprintf(slurm_stepd_path, sizeof(slurm_stepd_path),
+				 "%s/sbin/slurmstepd", SLURM_PREFIX);
+		}
+
 		/*
 		 * Child forks and exits
 		 */
@@ -615,7 +622,7 @@ _forkexec_slurmstepd(slurmd_step_type_t type, void *req,
  * The job(step) credential is the only place to get a definitive
  * list of the nodes allocated to a job step.  We need to return
  * a hostset_t of the nodes. Validate the incoming RPC, updating 
- * job_mem and task_mem as needed.
+ * job_mem needed.
  */
 static int
 _check_job_credential(launch_tasks_request_msg_t *req, uid_t uid,
@@ -631,6 +638,8 @@ _check_job_credential(launch_tasks_request_msg_t *req, uid_t uid,
 	uint32_t         jobid = req->job_id;
 	uint32_t         stepid = req->job_step_id;
 	int              tasks_to_launch = req->tasks_to_launch[node_id];
+	uint32_t         alloc_lps = 0;
+
 	/*
 	 * First call slurm_cred_verify() so that all valid
 	 * credentials are checked
@@ -684,56 +693,72 @@ _check_job_credential(launch_tasks_request_msg_t *req, uid_t uid,
 		goto fail;
 	}
 
-        if ((arg.alloc_lps_cnt > 0) && (tasks_to_launch > 0)) {
-                host_index = hostset_find(hset, conf->node_name);
-
-#if(0)
-		/* Left for debugging purposes */
-                if (host_index >= 0)
-                  info(" cons_res %u alloc_lps_cnt %u "
-			"task[%d] = %u = task_to_launch %d host %s ", 
-			arg.jobid, arg.alloc_lps_cnt, host_index, 
-			arg.alloc_lps[host_index], 
-			tasks_to_launch, conf->node_name);
-#endif
-
-                if (host_index < 0) { 
+        if ((arg.job_nhosts > 0) && (tasks_to_launch > 0)) {
+		uint32_t i, i_first_bit=0, i_last_bit=0;
+		host_index = hostset_find(hset, conf->node_name);
+		if ((host_index < 0) || (host_index >= arg.job_nhosts)) { 
                         error("job cr credential invalid host_index %d for "
 			      "job %u", host_index, arg.jobid);
                         goto fail; 
                 }
-		if (host_index > arg.alloc_lps_cnt)
-			error("host_index > alloc_lps_cnt in credential");
-                else if (arg.alloc_lps[host_index] == 0)
+		host_index++;	/* change from 0-origin to 1-origin */
+		for (i=0; host_index; i++) {
+			if (host_index > arg.sock_core_rep_count[i]) {
+				i_first_bit += arg.sockets_per_node[i] *
+					       arg.cores_per_socket[i] *
+					       arg.sock_core_rep_count[i];
+				host_index -= arg.sock_core_rep_count[i];
+			} else {
+				i_first_bit += arg.sockets_per_node[i] *
+					       arg.cores_per_socket[i] *
+					       (host_index - 1);
+				i_last_bit = i_first_bit +
+					     arg.sockets_per_node[i] *
+					     arg.cores_per_socket[i];
+				break;
+			}
+		}
+		/* Now count the allocated processors */
+		for (i = i_first_bit; i < i_last_bit; i++) {
+			if (bit_test(arg.core_bitmap, i))
+				alloc_lps++;
+		}
+                if (alloc_lps == 0) {
 			error("cons_res: zero processors allocated to step");
-                if (tasks_to_launch > arg.alloc_lps[host_index]) {
-			/* This is expected with the --overcommit option */
-			verbose("cons_res: More than one tasks per logical "
-				"processor (%d > %u) on host [%u.%u %ld %s] ",
-				tasks_to_launch, arg.alloc_lps[host_index], 
-				arg.jobid, arg.stepid, (long) arg.uid, 
-				arg.hostlist);
-			verbose("cons_res: Use task/affinity plug-in to bind "
-				"the tasks to the allocated resources");
-		}
-        }
+			alloc_lps = 1;
+		}
+		if (tasks_to_launch > alloc_lps) {
+			/* This is expected with the --overcommit option
+			 * or hyperthreads */
+			debug("cons_res: More than one tasks per logical "
+			      "processor (%d > %u) on host [%u.%u %ld %s] ",
+			      tasks_to_launch, alloc_lps, arg.jobid,
+			      arg.stepid, (long) arg.uid, arg.hostlist);
+		}
+		/* NOTE: alloc_lps is the count of allocated resources
+		 * (typically cores). Convert to CPU count as needed */
+		if (i_last_bit <= i_first_bit)
+			error("step credential has no CPUs selected");
+		else {
+			i = conf->conf_cpus / (i_last_bit - i_first_bit);
+			if (i > 1)
+				alloc_lps *= i;
+		}
+	} else
+		alloc_lps = 1;
 
 	/* Overwrite any memory limits in the RPC with contents of the 
 	 * memory limit within the credential. 
 	 * Reset the CPU count on this node to correct value. */
 	if (arg.job_mem & MEM_PER_CPU) {
 		req->job_mem = arg.job_mem & (~MEM_PER_CPU);
-		if ((host_index >= 0) && (host_index < arg.alloc_lps_cnt) &&
-		    (arg.alloc_lps[host_index] > 0))
-			req->job_mem *= arg.alloc_lps[host_index];
+		req->job_mem *= alloc_lps;
 	} else
 		req->job_mem = arg.job_mem;
-	req->task_mem = arg.task_mem;	/* Defunct */
-	if ((host_index >= 0) && (host_index < arg.alloc_lps_cnt))
-		req->cpus_allocated[node_id] = arg.alloc_lps[host_index];
+	req->cpus_allocated[node_id] = alloc_lps;
 #if 0
 	info("mem orig:%u cpus:%u limit:%u", 
-	     arg.job_mem, arg.alloc_lps[host_index], req->job_mem);
+	     arg.job_mem, alloc_lps, req->job_mem);
 #endif
 
 	*step_hset = hset;
@@ -949,11 +974,35 @@ _set_batch_job_limits(slurm_msg_t *msg)
 
 	if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS)
 		return;
-
+		
 	if (arg.job_mem & MEM_PER_CPU) {
+		int i;
+		uint32_t alloc_lps = 0, last_bit = 0;   
+		if (arg.job_nhosts > 0) {
+			last_bit = arg.sockets_per_node[0] * 
+				   arg.cores_per_socket[0];
+			for (i=0; i<last_bit; i++) {
+				if (bit_test(arg.core_bitmap, i))
+					alloc_lps++;
+			}
+		}
+		if (alloc_lps == 0) {
+			error("_set_batch_job_limit: alloc_lps is zero");
+			alloc_lps = 1;
+		}
+
+		/* NOTE: alloc_lps is the count of allocated resources
+		 * (typically cores). Convert to CPU count as needed */
+		if (last_bit < 1)
+			error("Batch job credential allocates no CPUs");
+		else {
+			i = conf->conf_cpus / last_bit;
+			if (i > 1)
+				alloc_lps *= i;
+		}
+
 		req->job_mem = arg.job_mem & (~MEM_PER_CPU);
-		if (arg.alloc_lps_cnt > 1)
-			req->job_mem *= arg.alloc_lps_cnt;
+		req->job_mem *= alloc_lps;
 	} else
 		req->job_mem = arg.job_mem;
 
@@ -967,7 +1016,7 @@ _rpc_batch_job(slurm_msg_t *msg)
 	bool     first_job_run = true;
 	int      rc = SLURM_SUCCESS;
 	uid_t    req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
-	char    *bg_part_id = NULL;
+	char    *resv_id = NULL;
 	bool	 replied = false;
 	slurm_addr *cli = &msg->orig_addr;
 	
@@ -985,6 +1034,8 @@ _rpc_batch_job(slurm_msg_t *msg)
 		goto done;
 	}
 
+	slurmd_batch_request(req->job_id, req);	/* determine task affinity */
+
 	if ((req->step_id != SLURM_BATCH_SCRIPT) && (req->step_id != 0))
 		first_job_run = false;
 
@@ -1015,12 +1066,16 @@ _rpc_batch_job(slurm_msg_t *msg)
 		/* 
 	 	 * Run job prolog on this node
 	 	 */
+#ifdef HAVE_BG
 		select_g_get_jobinfo(req->select_jobinfo, 
-				     SELECT_DATA_BLOCK_ID, 
-				     &bg_part_id);
-
-		rc = _run_prolog(req->job_id, req->uid, bg_part_id);
-		xfree(bg_part_id);
+				     SELECT_DATA_BLOCK_ID, &resv_id);
+#endif
+#ifdef HAVE_CRAY_XT
+		select_g_get_jobinfo(req->select_jobinfo, 
+				     SELECT_DATA_RESV_ID, &resv_id);
+#endif
+		rc = _run_prolog(req->job_id, req->uid, resv_id);
+		xfree(resv_id);
 		if (rc) {
 			int term_sig, exit_status;
 			if (WIFSIGNALED(rc)) {
@@ -1356,12 +1411,18 @@ _rpc_ping(slurm_msg_t *msg)
 {
 	int        rc = SLURM_SUCCESS;
 	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	static bool first_msg = true;
 
 	if (!_slurm_authorized_user(req_uid)) {
 		error("Security violation, ping RPC from uid %u",
 		      (unsigned int) req_uid);
+		if (first_msg) {
+			error("Do you have SlurmUser configured as uid %u?",
+			     (unsigned int) req_uid);
+		}
 		rc = ESLURM_USER_ID_MISSING;	/* or bad in this case */
 	}
+	first_msg = false;
 
 	/* Return result. If the reply can't be sent this indicates that
 	 * 1. The network is broken OR
@@ -1387,7 +1448,7 @@ _rpc_health_check(slurm_msg_t *msg)
 	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 
 	if (!_slurm_authorized_user(req_uid)) {
-		error("Security violation, ping RPC from uid %u",
+		error("Security violation, health check RPC from uid %u",
 		      (unsigned int) req_uid);
 		rc = ESLURM_USER_ID_MISSING;	/* or bad in this case */
 	}
@@ -1513,7 +1574,7 @@ _rpc_checkpoint_tasks(slurm_msg_t *msg)
 		goto done3;
 	}
 
-	rc = stepd_checkpoint(fd, req->signal, req->timestamp);
+	rc = stepd_checkpoint(fd, req->timestamp, req->image_dir);
 	if (rc == -1)
 		rc = ESLURMD_JOB_NOTRUNNING;
 
@@ -2565,7 +2626,8 @@ _rpc_suspend_job(slurm_msg_t *msg)
 		while ((stepd = list_next(i))) {
 			if (stepd->jobid != req->job_id) {
 				/* multiple jobs expected on shared nodes */
-				debug3("Step from other job: jobid=%u (this jobid=%u)",
+				debug3("Step from other job: jobid=%u "
+				       "(this jobid=%u)",
 				      stepd->jobid, req->job_id);
 				continue;
 			}
@@ -2625,7 +2687,7 @@ _rpc_abort_job(slurm_msg_t *msg)
 {
 	kill_job_msg_t *req    = msg->data;
 	uid_t           uid    = g_slurm_auth_get_uid(msg->auth_cred, NULL);
-	char           *bg_part_id = NULL;
+	char           *resv_id = NULL;
 
 	debug("_rpc_abort_job, uid = %d", uid);
 	/* 
@@ -2682,11 +2744,16 @@ _rpc_abort_job(slurm_msg_t *msg)
 	}
 
 	save_cred_state(conf->vctx);
-
+#ifdef HAVE_BG
 	select_g_get_jobinfo(req->select_jobinfo, SELECT_DATA_BLOCK_ID,
-		&bg_part_id);
-	_run_epilog(req->job_id, req->job_uid, bg_part_id);
-	xfree(bg_part_id);
+			     &resv_id);
+#endif
+#ifdef HAVE_CRAY_XT
+	select_g_get_jobinfo(req->select_jobinfo, SELECT_DATA_RESV_ID,
+			     &resv_id);
+#endif
+	_run_epilog(req->job_id, req->job_uid, resv_id);
+	xfree(resv_id);
 }
 
 static void 
@@ -2697,7 +2764,7 @@ _rpc_terminate_job(slurm_msg_t *msg)
 	uid_t           uid    = g_slurm_auth_get_uid(msg->auth_cred, NULL);
 	int             nsteps = 0;
 	int		delay;
-	char           *bg_part_id = NULL;
+	char           *resv_id = NULL;
 	uint16_t	base_job_state = req->job_state & (~JOB_COMPLETING);
 	slurm_ctl_conf_t *cf;
 
@@ -2824,10 +2891,16 @@ _rpc_terminate_job(slurm_msg_t *msg)
 
 	save_cred_state(conf->vctx);
 
+#ifdef HAVE_BG
 	select_g_get_jobinfo(req->select_jobinfo, SELECT_DATA_BLOCK_ID,
-		&bg_part_id);
-	rc = _run_epilog(req->job_id, req->job_uid, bg_part_id);
-	xfree(bg_part_id);
+			     &resv_id);
+#endif
+#ifdef HAVE_CRAY_XT
+	select_g_get_jobinfo(req->select_jobinfo, SELECT_DATA_RESV_ID,
+			     &resv_id);
+#endif
+	rc = _run_epilog(req->job_id, req->job_uid, resv_id);
+	xfree(resv_id);
 	
 	if (rc) {
 		int term_sig, exit_status;
@@ -3035,17 +3108,28 @@ _rpc_update_time(slurm_msg_t *msg)
 	slurm_send_rc_msg(msg, rc);
 }
 
-/* NOTE: xfree returned value */
+/* NOTE: call _destroy_env() to free returned value */
 static char **
-_build_env(uint32_t jobid, uid_t uid, char *bg_part_id)
+_build_env(uint32_t jobid, uid_t uid, char *resv_id)
 {
+	char *name;
 	char **env = xmalloc(sizeof(char *));
+
 	env[0]  = NULL;
+	setenvf(&env, "SLURM_JOB_ID", "%u", jobid);
+	setenvf(&env, "SLURM_JOB_UID",   "%u", uid);
+	name = uid_to_string(uid);
+	setenvf(&env, "SLURM_JOB_USER", "%s", name);
+	xfree(name);
 	setenvf(&env, "SLURM_JOBID", "%u", jobid);
 	setenvf(&env, "SLURM_UID",   "%u", uid);
-	if (bg_part_id) {
-		setenvf(&env, "MPIRUN_PARTITION",
-			"%s", bg_part_id);
+	if (resv_id) {
+#ifdef HAVE_BG
+		setenvf(&env, "MPIRUN_PARTITION", "%s", resv_id);
+#endif
+#ifdef HAVE_CRAY_XT
+		setenvf(&env, "BASIL_RESERVATION_ID", "%s", resv_id);
+#endif
 	}
 	return env;
 }
@@ -3066,11 +3150,13 @@ _destroy_env(char **env)
 }
 
 static int 
-_run_prolog(uint32_t jobid, uid_t uid, char *bg_part_id)
+_run_prolog(uint32_t jobid, uid_t uid, char *resv_id)
 {
 	int error_code;
 	char *my_prolog;
-	char **my_env = _build_env(jobid, uid, bg_part_id);
+	char **my_env = _build_env(jobid, uid, resv_id);
+	time_t start_time = time(NULL), diff_time;
+	static uint16_t msg_timeout = 0;
 
 	slurm_mutex_lock(&conf->config_mutex);
 	my_prolog = xstrdup(conf->prolog);
@@ -3080,15 +3166,23 @@ _run_prolog(uint32_t jobid, uid_t uid, char *bg_part_id)
 	xfree(my_prolog);
 	_destroy_env(my_env);
 
+	diff_time = difftime(time(NULL), start_time);
+	if (msg_timeout == 0)
+		msg_timeout = slurm_get_msg_timeout();
+	if (diff_time >= msg_timeout) {
+		error("prolog for job %u ran for %d seconds", 
+		      jobid, diff_time);
+	}
+
 	return error_code;
 }
 
 static int 
-_run_epilog(uint32_t jobid, uid_t uid, char *bg_part_id)
+_run_epilog(uint32_t jobid, uid_t uid, char *resv_id)
 {
 	int error_code;
 	char *my_epilog;
-	char **my_env = _build_env(jobid, uid, bg_part_id);
+	char **my_env = _build_env(jobid, uid, resv_id);
 
 	slurm_mutex_lock(&conf->config_mutex);
 	my_epilog = xstrdup(conf->epilog);
diff --git a/src/slurmd/slurmd/req.h b/src/slurmd/slurmd/req.h
index 211c90ac7978eca41d5307b54e4be0bc84b1c1f1..1fff983c72cd736684975d8b38b5384b980ea118 100644
--- a/src/slurmd/slurmd/req.h
+++ b/src/slurmd/slurmd/req.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/slurmd/slurmd/req.h - slurmd request handling
- * $Id: req.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: req.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmd/reverse_tree_math.c b/src/slurmd/slurmd/reverse_tree_math.c
index 9cb03b9f6d1c88df53e751c4d9fdbf24e77d980e..c7d627b97956746ac0b82b73ec061dd75ab886d3 100644
--- a/src/slurmd/slurmd/reverse_tree_math.c
+++ b/src/slurmd/slurmd/reverse_tree_math.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmd/reverse_tree_math.h b/src/slurmd/slurmd/reverse_tree_math.h
index 6896db8ace8228cab860934c464f5599a53c61a8..b65e9537d350abc5b7ee8e15db8e9b51ae7dbd4a 100644
--- a/src/slurmd/slurmd/reverse_tree_math.h
+++ b/src/slurmd/slurmd/reverse_tree_math.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c
index 600ace4bbbacc81f57a31795a97d21bb2af97516..63d450d52c4f9a890255ab5c0c1501ac6143e98c 100644
--- a/src/slurmd/slurmd/slurmd.c
+++ b/src/slurmd/slurmd/slurmd.c
@@ -1,16 +1,17 @@
 /*****************************************************************************\
  *  src/slurmd/slurmd/slurmd.c - main slurm node server daemon
- *  $Id: slurmd.c 17177 2009-04-07 18:09:43Z jette $
+ *  $Id: slurmd.c 17397 2009-05-04 16:07:42Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Portions Copyright (C) 2008 Vijay Ramasubramanian.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -84,8 +85,9 @@
 #include "src/slurmd/common/setproctitle.h"
 #include "src/slurmd/common/proctrack.h"
 #include "src/slurmd/common/task_plugin.h"
+#include "src/slurmd/common/set_oomadj.h"
 
-#define GETOPT_ARGS	"L:Dvhcf:MN:V"
+#define GETOPT_ARGS	"cd:Df:hL:MN:vV"
 
 #ifndef MAXHOSTNAMELEN
 #  define MAXHOSTNAMELEN	64
@@ -152,6 +154,9 @@ main (int argc, char *argv[])
 {
 	int i, pidfd;
 	int blocked_signals[] = {SIGPIPE, 0};
+	char *oom_value;
+	uint32_t slurmd_uid = 0;
+	uint32_t curr_uid = 0;
 
 	/*
 	 * Make sure we have no extra open files which 
@@ -180,7 +185,30 @@ main (int argc, char *argv[])
 	_init_conf();
 	conf->argv = &argv;
 	conf->argc = &argc;
-
+	slurmd_uid = slurm_get_slurmd_user_id();
+	curr_uid = getuid();
+	if(curr_uid != slurmd_uid) {
+		struct passwd *pw = NULL;
+		char *slurmd_user = NULL;
+		char *curr_user = NULL;
+
+		/* since when you do a getpwuid you get a pointer to a
+		   structure you have to do a xstrdup on the first
+		   call or your information will just get over
+		   written.  This is a memory leak, but a fatal is
+		   called right after so it isn't that big of a deal.
+		*/
+		if ((pw=getpwuid(slurmd_uid)))
+			slurmd_user = xstrdup(pw->pw_name);	
+		if ((pw=getpwuid(curr_uid)))
+			curr_user = pw->pw_name;	
+
+		fatal("You are running slurmd as something "
+		      "other than user %s(%d).  If you want to "
+		      "run as this user add SlurmdUser=%s "
+		      "to the slurm.conf file.",
+		      slurmd_user, slurmd_uid, curr_user);
+	}
 	init_setproctitle(argc, argv);
 
 	/* NOTE: conf->logfile always NULL at this point */
@@ -212,6 +240,12 @@ main (int argc, char *argv[])
 	info("slurmd version %s started", SLURM_VERSION);
 	debug3("finished daemonize");
 
+	if ((oom_value = getenv("SLURMD_OOM_ADJ"))) {
+		i = atoi(oom_value);
+		debug("Setting slurmd oom_adj to %d", i);
+		set_oom_adj(i);
+	}
+
 	_kill_old_slurmd();
 
 	if (conf->mlock_pages) {
@@ -467,6 +501,7 @@ _fill_registration_msg(slurm_node_registration_status_msg_t *msg)
 	int  n;
 	char *arch, *os;
 	struct utsname buf;
+	static bool first_msg = true;
 
 	msg->node_name  = xstrdup (conf->node_name);
 	msg->cpus	 = conf->cpus;
@@ -476,10 +511,18 @@ _fill_registration_msg(slurm_node_registration_status_msg_t *msg)
 	msg->real_memory = conf->real_memory_size;
 	msg->tmp_disk    = conf->tmp_disk_space;
 
-	debug3("Procs=%u Sockets=%u Cores=%u Threads=%u Memory=%u TmpDisk=%u",
-	       msg->cpus, msg->sockets, msg->cores, msg->threads,
-	       msg->real_memory, msg->tmp_disk);
-
+	if (first_msg) {
+		first_msg = false;
+		info("Procs=%u Sockets=%u Cores=%u Threads=%u "
+		     "Memory=%u TmpDisk=%u",
+		     msg->cpus, msg->sockets, msg->cores, msg->threads,
+		     msg->real_memory, msg->tmp_disk);
+	} else {
+		debug3("Procs=%u Sockets=%u Cores=%u Threads=%u "
+		       "Memory=%u TmpDisk=%u",
+		       msg->cpus, msg->sockets, msg->cores, msg->threads,
+		       msg->real_memory, msg->tmp_disk);
+	}
 	uname(&buf);
 	if ((arch = getenv("SLURM_ARCH")))
 		msg->arch = xstrdup(arch);
@@ -605,6 +648,11 @@ _read_config()
 
 	_massage_pathname(&conf->logfile);
 
+	/* set node_addr if relevant */
+	if((conf->node_addr = slurm_conf_get_nodeaddr(conf->hostname)))
+		if (strcmp(conf->node_addr, conf->hostname) == 0)
+			xfree(conf->node_addr);	/* Sets to NULL */
+
 	conf->port = slurm_conf_get_port(conf->node_name);
 	slurm_conf_get_cpus_sct(conf->node_name,
 				&conf->conf_cpus,  &conf->conf_sockets,
@@ -659,9 +707,7 @@ _read_config()
 	if (cf->slurmctld_port == 0)
 		fatal("Unable to establish controller port");
 	conf->use_pam = cf->use_pam;
-
-	if (cf->task_plugin_param & TASK_PARAM_CPUSETS)
-		conf->use_cpusets = 1;
+	conf->task_plugin_param = cf->task_plugin_param;
 
 	slurm_mutex_unlock(&conf->config_mutex);
 	slurm_conf_unlock();
@@ -746,20 +792,24 @@ _print_conf()
 	debug3("Logfile     = `%s'",     cf->slurmd_logfile);
 	debug3("HealthCheck = `%s'",     conf->health_check_program);
 	debug3("NodeName    = %s",       conf->node_name);
+	debug3("NodeAddr    = %s",       conf->node_addr);
 	debug3("Port        = %u",       conf->port);
 	debug3("Prolog      = `%s'",     conf->prolog);
 	debug3("TmpFS       = `%s'",     conf->tmpfs);
 	debug3("Public Cert = `%s'",     conf->pubkey);
+	debug3("Slurmstepd  = `%s'",     conf->stepd_loc);
 	debug3("Spool Dir   = `%s'",     conf->spooldir);
 	debug3("Pid File    = `%s'",     conf->pidfile);
 	debug3("Slurm UID   = %u",       conf->slurm_user_id);
 	debug3("TaskProlog  = `%s'",     conf->task_prolog);
 	debug3("TaskEpilog  = `%s'",     conf->task_epilog);
-	debug3("Use CPUSETS = %u",       conf->use_cpusets);
+	debug3("TaskPluginParam = %u",   conf->task_plugin_param);
 	debug3("Use PAM     = %u",       conf->use_pam);
 	slurm_conf_unlock();
 }
 
+/* Initialize slurmd configuration table.
+ * Everything is already NULL/zero filled when called */
 static void
 _init_conf()
 {
@@ -771,33 +821,12 @@ _init_conf()
 		exit(1);
 	}
 	conf->hostname    = xstrdup(host);
-	conf->node_name   = NULL;
-	conf->sockets     = 0;
-	conf->cores       = 0;
-	conf->threads     = 0;
-	conf->block_map_size = 0;
-	conf->block_map   = NULL;
-	conf->block_map_inv = NULL;
-	conf->conffile    = NULL;
-	conf->epilog      = NULL;
-	conf->health_check_program = NULL;
-	conf->logfile     = NULL;
-	conf->pubkey      = NULL;
-	conf->prolog      = NULL;
-	conf->task_prolog = NULL;
-	conf->task_epilog = NULL;
-
-	conf->port        =  0;
 	conf->daemonize   =  1;
 	conf->lfd         = -1;
-	conf->cleanstart  =  0;
-	conf->mlock_pages =  0;
 	conf->log_opts    = lopts;
 	conf->debug_level = LOG_LEVEL_INFO;
 	conf->pidfile     = xstrdup(DEFAULT_SLURMD_PIDFILE);
 	conf->spooldir	  = xstrdup(DEFAULT_SPOOLDIR);
-	conf->use_pam	  =  0;
-	conf->use_cpusets =  0;
 
 	slurm_mutex_init(&conf->config_mutex);
 	return;
@@ -812,6 +841,7 @@ _destroy_conf()
 		xfree(conf->health_check_program);
 		xfree(conf->hostname);
 		xfree(conf->node_name);
+		xfree(conf->node_addr);
 		xfree(conf->conffile);
 		xfree(conf->prolog);
 		xfree(conf->epilog);
@@ -821,6 +851,7 @@ _destroy_conf()
 		xfree(conf->task_epilog);
 		xfree(conf->pidfile);
 		xfree(conf->spooldir);
+		xfree(conf->stepd_loc);
 		xfree(conf->tmpfs);
 		slurm_mutex_destroy(&conf->config_mutex);
 		slurm_cred_ctx_destroy(conf->vctx);
@@ -838,12 +869,15 @@ _process_cmdline(int ac, char **av)
 
 	while ((c = getopt(ac, av, GETOPT_ARGS)) > 0) {
 		switch (c) {
+		case 'c':
+			conf->cleanstart = 1;
+			break;
+		case 'd':
+			conf->stepd_loc = xstrdup(optarg);
+			break;
 		case 'D': 
 			conf->daemonize = 0;
 			break;
-		case 'v':
-			conf->debug_level++;
-			break;
 		case 'f':
 			conf->conffile = xstrdup(optarg);
 			break;
@@ -854,15 +888,15 @@ _process_cmdline(int ac, char **av)
 		case 'L':
 			conf->logfile = xstrdup(optarg);
 			break;
-		case 'c':
-			conf->cleanstart = 1;
-			break;
 		case 'M':
 			conf->mlock_pages = 1;
 			break;
 		case 'N':
 			conf->node_name = xstrdup(optarg);
 			break;
+		case 'v':
+			conf->debug_level++;
+			break;
 		case 'V':
 			printf("%s %s\n", PACKAGE, SLURM_VERSION);
 			exit(0);
@@ -879,10 +913,18 @@ _process_cmdline(int ac, char **av)
 static void
 _create_msg_socket()
 {
-	slurm_fd ld = slurm_init_msg_engine_port(conf->port);
+	char* node_addr;
+
+	slurm_fd ld = slurm_init_msg_engine_addrname_port(conf->node_addr,
+							  conf->port);
+	if (conf->node_addr == NULL)
+		node_addr = "*";
+	else
+		node_addr = conf->node_addr;
 
 	if (ld < 0) {
-		error("Unable to bind listen port (%d): %m", conf->port);
+		error("Unable to bind listen port (%s:%d): %m",
+		      node_addr, conf->port);
 		exit(1);
 	}
 
@@ -890,7 +932,8 @@ _create_msg_socket()
 
 	conf->lfd = ld;
 
-	debug3("succesfully opened slurm listen port %d", conf->port);
+	debug3("succesfully opened slurm listen port %s:%d",
+	       node_addr, conf->port);
 
 	return;
 }
@@ -984,8 +1027,13 @@ _slurmd_init()
 	fd_set_close_on_exec(devnull);
 
 	/* make sure we have slurmstepd installed */
-	snprintf(slurm_stepd_path, sizeof(slurm_stepd_path),
-		 "%s/sbin/slurmstepd", SLURM_PREFIX);
+	if (conf->stepd_loc) {
+		snprintf(slurm_stepd_path, sizeof(slurm_stepd_path),
+			 "%s", conf->stepd_loc);
+	} else {
+		snprintf(slurm_stepd_path, sizeof(slurm_stepd_path),
+			 "%s/sbin/slurmstepd", SLURM_PREFIX);
+	}
 	if (stat(slurm_stepd_path, &stat_buf)) {
 		fatal("Unable to find slurmstepd file at %s",
 			slurm_stepd_path);
@@ -1046,9 +1094,7 @@ cleanup:
  * Then exercise the slurmd functionality before executing
  * > scontrol shutdown
  *
- * There should be some definitely lost records from 
- * init_setproctitle (setproctitle.c), but it should otherwise account 
- * for all memory.
+ * All allocated memory should be freed
 \**************************************************************************/
 static int
 _slurmd_fini()
@@ -1061,6 +1107,7 @@ _slurmd_fini()
 	slurm_proctrack_fini();
 	slurm_auth_fini();
 	slurmd_req(NULL);	/* purge memory allocated by slurmd_req() */
+	fini_setproctitle();
 	return SLURM_SUCCESS;
 }
 
@@ -1141,6 +1188,7 @@ _usage()
 	fprintf(stderr, "\
 Usage: %s [OPTIONS]\n\
    -c          Force cleanup of slurmd shared memory.\n\
+   -d stepd    Pathname to the slurmstepd program.\n\
    -D          Run daemon in foreground.\n\
    -M          Use mlock() to lock slurmd pages into memory.\n\
    -h          Print this help message.\n\
diff --git a/src/slurmd/slurmd/slurmd.h b/src/slurmd/slurmd/slurmd.h
index 1723f9d4da722bbb0f52141ccedea6364c9b52a0..deda34360c61dc9b58595dc5c077e33d7050aa36 100644
--- a/src/slurmd/slurmd/slurmd.h
+++ b/src/slurmd/slurmd/slurmd.h
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  * src/slurmd/slurmd/slurmd.h - header for slurmd
- * $Id: slurmd.h 13690 2008-03-21 18:17:38Z jette $
+ * $Id: slurmd.h 17386 2009-05-01 19:40:09Z jette $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -94,7 +96,8 @@ typedef struct slurmd_config {
 	uint16_t      cr_type;           /* Consumable Resource Type:       *
 					 * CR_SOCKET, CR_CORE, CR_MEMORY,  *
 					 * CR_DEFAULT, etc.                */
-        char         *node_name;        /* node name                       */
+	char         *node_name;	/* node name                       */
+	char         *node_addr;	/* node's address                  */
 	char         *conffile;		/* config filename                 */
 	char         *logfile;		/* slurmd logfile, if any          */
 	char         *spooldir;		/* SlurmdSpoolDir	           */
@@ -104,6 +107,7 @@ typedef struct slurmd_config {
 	char         *pubkey;		/* location of job cred public key */
 	char         *epilog;		/* Path to Epilog script	   */
 	char         *prolog;		/* Path to prolog script           */
+	char         *stepd_loc;	/* Non-standard slurmstepd path    */
 	char         *task_prolog;	/* per-task prolog script          */
 	char         *task_epilog;	/* per-task epilog script          */
 	int           port;	        /* local slurmd port               */
@@ -121,7 +125,8 @@ typedef struct slurmd_config {
 	pthread_mutex_t config_mutex;	/* lock for slurmd_config access   */
 	uint16_t        job_acct_gather_freq;
 	uint16_t	use_pam;
-	uint16_t	use_cpusets;	/* Use cpusets, if available       */
+	uint16_t	task_plugin_param; /* TaskPluginParams, expressed
+					 * using cpu_bind_type_t flags */
 	uint16_t	propagate_prio;	/* PropagatePrioProcess flag       */
 } slurmd_conf_t;
 
diff --git a/src/slurmd/slurmd/xcpu.c b/src/slurmd/slurmd/xcpu.c
index 6bc5d330e2192cc5b201eb525d57b4c6243b64f2..698713d19ed6f0713b4879bd612212623e3e189d 100644
--- a/src/slurmd/slurmd/xcpu.c
+++ b/src/slurmd/slurmd/xcpu.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmd/xcpu.h b/src/slurmd/slurmd/xcpu.h
index b98ba58d7049c1314bd4e8cc1835aabbd480fac1..ec91503ec90648164166cef14ca30242d7cc996a 100644
--- a/src/slurmd/slurmd/xcpu.h
+++ b/src/slurmd/slurmd/xcpu.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/Makefile.am b/src/slurmd/slurmstepd/Makefile.am
index 4c090b9bbfa4eed879b01b91d98459566c9f668b..21f2e4a7033c3ab720c58953dbc2add4a5a1c649 100644
--- a/src/slurmd/slurmstepd/Makefile.am
+++ b/src/slurmd/slurmstepd/Makefile.am
@@ -35,6 +35,8 @@ slurmstepd_SOURCES = 	        	\
 	$(top_builddir)/src/slurmd/common/run_script.h \
 	$(top_builddir)/src/slurmd/common/task_plugin.c \
 	$(top_builddir)/src/slurmd/common/task_plugin.h \
+	$(top_builddir)/src/slurmd/common/set_oomadj.c \
+	$(top_builddir)/src/slurmd/common/set_oomadj.h \
 	$(top_builddir)/src/slurmd/common/reverse_tree.h
 
 if HAVE_AIX
diff --git a/src/slurmd/slurmstepd/Makefile.in b/src/slurmd/slurmstepd/Makefile.in
index da2e59b7c784c0d07b6bcaa7f0a3f91da8c0f574..916a6ab8e5f22e7867aceebf61e1cba4b70a2d8d 100644
--- a/src/slurmd/slurmstepd/Makefile.in
+++ b/src/slurmd/slurmstepd/Makefile.in
@@ -45,14 +45,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -78,7 +82,8 @@ am_slurmstepd_OBJECTS = slurmstepd.$(OBJEXT) mgr.$(OBJEXT) \
 	pam_ses.$(OBJEXT) req.$(OBJEXT) multi_prog.$(OBJEXT) \
 	step_terminate_monitor.$(OBJEXT) proctrack.$(OBJEXT) \
 	setproctitle.$(OBJEXT) slurmstepd_init.$(OBJEXT) \
-	run_script.$(OBJEXT) task_plugin.$(OBJEXT)
+	run_script.$(OBJEXT) task_plugin.$(OBJEXT) \
+	set_oomadj.$(OBJEXT)
 slurmstepd_OBJECTS = $(am_slurmstepd_OBJECTS)
 am__DEPENDENCIES_1 =
 slurmstepd_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \
@@ -114,6 +119,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -303,6 +312,8 @@ slurmstepd_SOURCES = \
 	$(top_builddir)/src/slurmd/common/run_script.h \
 	$(top_builddir)/src/slurmd/common/task_plugin.c \
 	$(top_builddir)/src/slurmd/common/task_plugin.h \
+	$(top_builddir)/src/slurmd/common/set_oomadj.c \
+	$(top_builddir)/src/slurmd/common/set_oomadj.h \
 	$(top_builddir)/src/slurmd/common/reverse_tree.h
 
 @HAVE_AIX_FALSE@slurmstepd_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
@@ -390,6 +401,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proctrack.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/req.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/run_script.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/set_oomadj.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setproctitle.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmstepd.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmstepd_init.Po@am__quote@
@@ -490,6 +502,20 @@ task_plugin.obj: $(top_builddir)/src/slurmd/common/task_plugin.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o task_plugin.obj `if test -f '$(top_builddir)/src/slurmd/common/task_plugin.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/task_plugin.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/task_plugin.c'; fi`
 
+set_oomadj.o: $(top_builddir)/src/slurmd/common/set_oomadj.c
+@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT set_oomadj.o -MD -MP -MF $(DEPDIR)/set_oomadj.Tpo -c -o set_oomadj.o `test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/set_oomadj.c
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/set_oomadj.Tpo $(DEPDIR)/set_oomadj.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/set_oomadj.c' object='set_oomadj.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o set_oomadj.o `test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c' || echo '$(srcdir)/'`$(top_builddir)/src/slurmd/common/set_oomadj.c
+
+set_oomadj.obj: $(top_builddir)/src/slurmd/common/set_oomadj.c
+@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT set_oomadj.obj -MD -MP -MF $(DEPDIR)/set_oomadj.Tpo -c -o set_oomadj.obj `if test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/set_oomadj.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/set_oomadj.c'; fi`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/set_oomadj.Tpo $(DEPDIR)/set_oomadj.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$(top_builddir)/src/slurmd/common/set_oomadj.c' object='set_oomadj.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o set_oomadj.obj `if test -f '$(top_builddir)/src/slurmd/common/set_oomadj.c'; then $(CYGPATH_W) '$(top_builddir)/src/slurmd/common/set_oomadj.c'; else $(CYGPATH_W) '$(srcdir)/$(top_builddir)/src/slurmd/common/set_oomadj.c'; fi`
+
 mostlyclean-libtool:
 	-rm -f *.lo
 
diff --git a/src/slurmd/slurmstepd/fname.c b/src/slurmd/slurmstepd/fname.c
index 6c639a9701b191e4b2fca0fc2ef777949e695db4..13b1ca3695d45a08b161b737046f2c7d6d841bc9 100644
--- a/src/slurmd/slurmstepd/fname.c
+++ b/src/slurmd/slurmstepd/fname.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/fname.h b/src/slurmd/slurmstepd/fname.h
index 156423b5750eb14adac2ccb777d52cb110fa0a5a..31c3e7ff1e9a5215f64b6da05ec6d0aa01a453a1 100644
--- a/src/slurmd/slurmstepd/fname.h
+++ b/src/slurmd/slurmstepd/fname.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/io.c b/src/slurmd/slurmstepd/io.c
index bc01281306cdb801b6e2ab7f5382f56d751ac9a3..d4ff3c45ab11691c27d51b1daea12d5f909c1636 100644
--- a/src/slurmd/slurmstepd/io.c
+++ b/src/slurmd/slurmstepd/io.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/slurmd/slurmstepd/io.c - Standard I/O handling routines for slurmstepd
- * $Id: io.c 13672 2008-03-19 23:10:58Z jette $
+ * $Id: io.c 17803 2009-06-10 22:06:56Z da $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -79,6 +80,7 @@
 #include "src/common/xmalloc.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
+#include "src/common/write_labelled_message.h"
 
 #include "src/slurmd/slurmd/slurmd.h"
 #include "src/slurmd/slurmstepd/io.h"
@@ -118,8 +120,27 @@ struct client_io_info {
 	struct io_buf *out_msg;
 	int32_t out_remaining;
 	bool out_eof;
+
+	/* For clients that only write stdout or stderr, and/or only
+	   write for one task. -1 means accept output from any task. */
+	int  ltaskid_stdout, ltaskid_stderr;
+	bool labelio;
+	int  label_width;
+
+	/* true if writing to a file, false if writing to a socket */
+	bool is_local_file;
 };
 
+
+static bool _local_file_writable(eio_obj_t *);
+static int  _local_file_write(eio_obj_t *, List);
+
+struct io_operations local_file_ops = {
+	writable:	&_local_file_writable,
+	handle_write:	&_local_file_write,
+};
+
+
 /**********************************************************************
  * Task write declarations
  **********************************************************************/
@@ -452,6 +473,90 @@ again:
 	return SLURM_SUCCESS;
 }
 
+
+static bool 
+_local_file_writable(eio_obj_t *obj)
+{
+	struct client_io_info *client = (struct client_io_info *) obj->arg;
+
+	xassert(client->magic == CLIENT_IO_MAGIC);
+
+	if (client->out_eof == true)
+		return false;
+
+	if (client->out_msg != NULL || !list_is_empty(client->msg_queue))
+		return true;
+
+	return false;
+}
+
+
+/*
+ * The slurmstepd writes I/O to a file, possibly adding a label.
+ */
+static int
+_local_file_write(eio_obj_t *obj, List objs)
+{
+	struct client_io_info *client = (struct client_io_info *) obj->arg;
+	void *buf;
+	int n;
+	struct slurm_io_header header;
+	Buf header_tmp_buf;
+
+	xassert(client->magic == CLIENT_IO_MAGIC);
+	/*
+	 * If we aren't already in the middle of sending a message, get the
+	 * next message from the queue.
+	 */
+	if (client->out_msg == NULL) {
+		client->out_msg = list_dequeue(client->msg_queue);
+		if (client->out_msg == NULL) {
+			return SLURM_SUCCESS;
+		}
+		client->out_remaining = client->out_msg->length - 
+					io_hdr_packed_size();
+	}
+
+	/* This code to make a buffer, fill it, unpack its contents, and free
+	   it is just used to read the header to get the global task id. */
+	header_tmp_buf = create_buf(client->out_msg->data, 
+				    client->out_msg->length);
+	io_hdr_unpack(&header, header_tmp_buf);
+	header_tmp_buf->head = NULL;
+	free_buf(header_tmp_buf);
+
+	/* A zero-length message indicates the end of a stream from one
+	   of the tasks.  Just free the message and return. */
+	if (header.length == 0) {
+		_free_outgoing_msg(client->out_msg, client->job);
+		client->out_msg = NULL;
+		return SLURM_SUCCESS;
+	}
+
+	/* Write the message to the file. */
+	buf = client->out_msg->data + 
+		(client->out_msg->length - client->out_remaining);
+
+	n = write_labelled_message(obj->fd, buf, client->out_remaining, 
+				   header.gtaskid, client->labelio, 
+				   client->label_width);
+	if (n < 0) {
+		client->out_eof = true;
+		_free_all_outgoing_msgs(client->msg_queue, client->job);
+		return SLURM_ERROR;
+	}
+
+	client->out_remaining -= n;
+	if (client->out_remaining == 0) {
+		_free_outgoing_msg(client->out_msg, client->job);
+		client->out_msg = NULL;
+	}
+	return SLURM_SUCCESS;
+}
+
+
+
+
 /**********************************************************************
  * Task write functions
  **********************************************************************/
@@ -541,7 +646,7 @@ _task_write(eio_obj_t *obj, List objs)
 	}
 
 	/*
-	 * Write message to socket.
+	 * Write message to pipe.
 	 */
 	buf = in->msg->data + (in->msg->length - in->remaining);
 again:
@@ -798,22 +903,7 @@ _spawn_window_manager(slurmd_task_info_t *task, slurmd_job_t *job)
 static int
 _init_task_stdio_fds(slurmd_task_info_t *task, slurmd_job_t *job)
 {
-	slurm_ctl_conf_t *conf;
-	int file_flags;
-
-	/* set files for opening stdout/err */
-	if (job->open_mode == OPEN_MODE_APPEND)
-		file_flags = O_CREAT|O_WRONLY|O_APPEND;
-	else if (job->open_mode == OPEN_MODE_TRUNCATE)
-		file_flags = O_CREAT|O_WRONLY|O_APPEND|O_TRUNC;
-	else {
-		conf = slurm_conf_lock();
-		if (conf->job_file_append)
-			file_flags = O_CREAT|O_WRONLY|O_APPEND;
-		else
-			file_flags = O_CREAT|O_WRONLY|O_APPEND|O_TRUNC;
-		slurm_conf_unlock();
-	}
+	int file_flags = io_get_file_flags(job);
 
 	/*
 	 *  Initialize stdin
@@ -899,20 +989,17 @@ _init_task_stdio_fds(slurmd_task_info_t *task, slurmd_job_t *job)
 			fd_set_close_on_exec(task->stdout_fd);
 			task->from_stdout = -1;  /* not used */
 		}
-	} else if (task->ofname != NULL) {
+	} else if (task->ofname != NULL && 
+		   (!job->labelio || strcmp(task->ofname, "/dev/null")==0)) {
 #else
-	if (task->ofname != NULL) {
+	if (task->ofname != NULL && 
+	    (!job->labelio || strcmp(task->ofname, "/dev/null")==0) ) {
 #endif
 		/* open file on task's stdout */
 		debug5("  stdout file name = %s", task->ofname);
 		task->stdout_fd = open(task->ofname, file_flags, 0666);
 		if (task->stdout_fd == -1) {
-			error("Could not open stdout file: %m");
-			xfree(task->ofname);
-			task->ofname = fname_create(job, "slurm-%J.out", 0);
-			task->stdout_fd = open(task->ofname, file_flags, 0666);
-			if (task->stdout_fd == -1)
-				return SLURM_ERROR;
+			return SLURM_ERROR;
 		}
 		fd_set_close_on_exec(task->stdout_fd);
 		task->from_stdout = -1; /* not used */
@@ -957,20 +1044,17 @@ _init_task_stdio_fds(slurmd_task_info_t *task, slurmd_job_t *job)
 			fd_set_close_on_exec(task->stderr_fd);
 			task->from_stderr = -1;  /* not used */
 		}
-	} else if (task->efname != NULL) {
+	} else if (task->efname != NULL && 
+		   (!job->labelio || strcmp(task->efname, "/dev/null")==0)) {
 #else
-	if (task->efname != NULL) {
+	if (task->efname != NULL && 
+	    (!job->labelio || strcmp(task->efname, "/dev/null")==0) ) {
 #endif
 		/* open file on task's stdout */
 		debug5("  stderr file name = %s", task->efname);
 		task->stderr_fd = open(task->efname, file_flags, 0666);
 		if (task->stderr_fd == -1) {
-			error("Could not open stderr file: %m");
-			xfree(task->efname);
-			task->efname = fname_create(job, "slurm-%J.err", 0);
-			task->stderr_fd = open(task->efname, file_flags, 0666);
-			if (task->stderr_fd == -1)
-				return SLURM_ERROR;
+			return SLURM_ERROR;
 		}
 		fd_set_close_on_exec(task->stderr_fd);
 		task->from_stderr = -1; /* not used */
@@ -999,13 +1083,15 @@ _init_task_stdio_fds(slurmd_task_info_t *task, slurmd_job_t *job)
 int
 io_init_tasks_stdio(slurmd_job_t *job)
 {
-	int i;
+	int i, rc = SLURM_SUCCESS, tmprc;
 
 	for (i = 0; i < job->ntasks; i++) {
-		_init_task_stdio_fds(job->task[i], job);
+		tmprc = _init_task_stdio_fds(job->task[i], job);
+		if (tmprc != SLURM_SUCCESS)
+			rc = tmprc;
 	}
 
-	return 0;
+	return rc;
 }
 
 int
@@ -1070,14 +1156,25 @@ _route_msg_task_to_client(eio_obj_t *obj)
 		if (msg == NULL)
 			return;
 
-/* 		debug5("\"%s\"", msg->data + io_hdr_packed_size()); */
-
 		/* Add message to the msg_queue of all clients */
 		clients = list_iterator_create(out->job->clients);
 		while((eio = list_next(clients))) {
 			client = (struct client_io_info *)eio->arg;
 			if (client->out_eof == true)
 				continue;
+
+			/* Some clients only take certain I/O streams */
+			if (out->type==SLURM_IO_STDOUT) {
+				if (client->ltaskid_stdout != -1 && 
+				    client->ltaskid_stdout != out->ltaskid)
+					continue;
+			}
+			if (out->type==SLURM_IO_STDERR) {
+				if (client->ltaskid_stderr != -1 && 
+				    client->ltaskid_stderr != out->ltaskid)
+					continue;
+			}
+
 			debug5("======================== Enqueued message");
 			xassert(client->magic == CLIENT_IO_MAGIC);
 			if (list_enqueue(client->msg_queue, msg))
@@ -1192,11 +1289,39 @@ io_close_all(slurmd_job_t *job)
 	eio_signal_shutdown(job->eio);
 }
 
+void 
+io_close_local_fds(slurmd_job_t *job)
+{
+	ListIterator clients;
+	eio_obj_t *eio;
+	int rc;
+	struct client_io_info *client;
+
+	if (job == NULL || job->clients == NULL)
+		return;
+
+	clients = list_iterator_create(job->clients);
+	while((eio = list_next(clients))) {
+		client = (struct client_io_info *)eio->arg;
+		if (client->is_local_file) {
+			if (eio->fd >= 0) {
+				do {
+					rc = close(eio->fd);
+				} while (rc == -1 && errno == EINTR);
+				eio->fd = -1;
+			}
+		}
+	}
+}
+
+
+
 static void *
 _io_thr(void *arg)
 {
 	slurmd_job_t *job = (slurmd_job_t *) arg;
 	sigset_t set;
+	int rc;
 
 	/* A SIGHUP signal signals a reattach to the mgr thread.  We need
 	 * to block SIGHUP from being delivered to this thread so the mgr
@@ -1208,11 +1333,60 @@ _io_thr(void *arg)
 	pthread_sigmask(SIG_BLOCK, &set, NULL);
 
 	debug("IO handler started pid=%lu", (unsigned long) getpid());
-	eio_handle_mainloop(job->eio);
-	debug("IO handler exited");
+	rc = eio_handle_mainloop(job->eio);
+	debug("IO handler exited, rc=%d", rc);
 	return (void *)1;
 }
 
+/*
+ *  Add a client to the job's client list that will write stdout and/or
+ *  stderr from the slurmstepd.  The slurmstepd handles the write when
+ *  a file is created per node or per task, and the output needs to be
+ *  modified in some way, like labelling lines with the task number.
+ */
+int
+io_create_local_client(const char *filename, int file_flags, 
+		       slurmd_job_t *job, bool labelio,
+		       int stdout_tasks, int stderr_tasks)
+{
+	int fd = -1;
+	struct client_io_info *client;
+	eio_obj_t *obj;
+	int tmp;
+
+	fd = open(filename, file_flags, 0666);
+	if (fd == -1) {
+		return ESLURMD_IO_ERROR;
+	}
+	fd_set_close_on_exec(fd);
+
+	/* Now set up the eio object */
+	client = xmalloc(sizeof(struct client_io_info));
+#ifndef NDEBUG
+	client->magic = CLIENT_IO_MAGIC;
+#endif
+	client->job = job;
+	client->msg_queue = list_create(NULL); /* FIXME - destructor */
+
+	client->ltaskid_stdout = stdout_tasks;
+	client->ltaskid_stderr = stderr_tasks;
+	client->labelio = labelio;
+	client->is_local_file = true;
+
+	client->label_width = 1;
+	tmp = job->ntasks-1;
+	while ((tmp /= 10) > 0)
+		client->label_width++;
+
+
+	obj = eio_obj_create(fd, &local_file_ops, (void *)client);
+	list_append(job->clients, (void *)obj);
+	eio_new_initial_obj(job->eio, (void *)obj);
+	debug5("Now handling %d IO Client object(s)", list_count(job->clients));
+
+	return SLURM_SUCCESS;
+}
+
 /* 
  * Create the initial TCP connection back to a waiting client (e.g. srun).
  *
@@ -1224,7 +1398,8 @@ _io_thr(void *arg)
  * an IO stream.
  */
 int
-io_initial_client_connect(srun_info_t *srun, slurmd_job_t *job)
+io_initial_client_connect(srun_info_t *srun, slurmd_job_t *job, 
+			  int stdout_tasks, int stderr_tasks)
 {
 	int sock = -1;
 	struct client_io_info *client;
@@ -1267,6 +1442,12 @@ io_initial_client_connect(srun_info_t *srun, slurmd_job_t *job)
 	client->job = job;
 	client->msg_queue = list_create(NULL); /* FIXME - destructor */
 
+	client->ltaskid_stdout = stdout_tasks;
+	client->ltaskid_stderr = stderr_tasks;
+	client->labelio = false;
+	client->label_width = 0;
+	client->is_local_file = false;
+
 	obj = eio_obj_create(sock, &client_ops, (void *)client);
 	list_append(job->clients, (void *)obj);
 	eio_new_initial_obj(job->eio, (void *)obj);
@@ -1320,6 +1501,13 @@ io_client_connect(srun_info_t *srun, slurmd_job_t *job)
 #endif
 	client->job = job;
 	client->msg_queue = NULL; /* initialized in _client_writable */
+
+	client->ltaskid_stdout = -1;     /* accept from all tasks */
+	client->ltaskid_stderr = -1;     /* accept from all tasks */
+	client->labelio = false;
+	client->label_width = 0;
+	client->is_local_file = false;
+
 	/* client object adds itself to job->clients in _client_writable */
 
 	obj = eio_obj_create(sock, &client_ops, (void *)client);
@@ -1426,6 +1614,19 @@ _send_eof_msg(struct task_read_info *out)
 		client = (struct client_io_info *)eio->arg;
 		debug5("======================== Enqueued eof message");
 		xassert(client->magic == CLIENT_IO_MAGIC);
+
+		/* Some clients only take certain I/O streams */
+		if (out->type==SLURM_IO_STDOUT) {
+			if (client->ltaskid_stdout != -1 && 
+			    client->ltaskid_stdout != out->ltaskid)
+				continue;
+		}
+		if (out->type==SLURM_IO_STDERR) {
+			if (client->ltaskid_stderr != -1 && 
+			    client->ltaskid_stderr != out->ltaskid)
+				continue;
+		}
+
 		if (list_enqueue(client->msg_queue, msg))
 			msg->ref_count++;
 	}
@@ -1624,3 +1825,141 @@ user_managed_io_client_connect(int ntasks, srun_info_t *srun,
 	return SLURM_SUCCESS;
 }
 
+
+void
+io_find_filename_pattern( slurmd_job_t *job, 
+			  slurmd_filename_pattern_t *outpattern, 
+			  slurmd_filename_pattern_t *errpattern,
+			  bool *same_out_err_files )
+{
+	int ii, jj;
+	int of_num_null = 0, ef_num_null = 0;
+	int of_num_devnull = 0, ef_num_devnull = 0;
+	int of_lastnull = -1, ef_lastnull = -1;
+	bool of_all_same = true, ef_all_same = true;
+	bool of_all_unique = true, ef_all_unique = true;
+
+	*outpattern = SLURMD_UNKNOWN;
+	*errpattern = SLURMD_UNKNOWN;
+	*same_out_err_files = false;
+
+	for (ii = 0; ii < job->ntasks; ii++) {
+		if (job->task[ii]->ofname == NULL) {
+			of_num_null++;
+			of_lastnull = ii;
+		} else if (strcmp(job->task[ii]->ofname, "/dev/null")==0) {
+			of_num_devnull++;
+		}
+
+		if (job->task[ii]->efname == NULL) {
+			ef_num_null++;
+			ef_lastnull = ii;
+		} else if (strcmp(job->task[ii]->efname, "/dev/null")==0) {
+			ef_num_devnull++;
+		}
+	}
+	if (of_num_null == job->ntasks)
+		*outpattern = SLURMD_ALL_NULL;
+
+	if (ef_num_null == job->ntasks)
+		*errpattern = SLURMD_ALL_NULL;
+
+	if (of_num_null == 1 && of_num_devnull == job->ntasks-1)
+		*outpattern = SLURMD_ONE_NULL;
+
+	if (ef_num_null == 1 && ef_num_devnull == job->ntasks-1)
+		*errpattern = SLURMD_ONE_NULL;
+
+	if (*outpattern == SLURMD_ALL_NULL && *errpattern == SLURMD_ALL_NULL)
+		*same_out_err_files = true;
+
+	if (*outpattern == SLURMD_ONE_NULL && *errpattern == SLURMD_ONE_NULL &&
+	    of_lastnull == ef_lastnull)
+		*same_out_err_files = true;
+
+	if (*outpattern != SLURMD_UNKNOWN && *errpattern != SLURMD_UNKNOWN)
+		return;
+
+	for (ii = 1; ii < job->ntasks; ii++) {
+		if (!job->task[ii]->ofname || !job->task[0]->ofname ||
+		    strcmp(job->task[ii]->ofname, job->task[0]->ofname) != 0)
+			of_all_same = false;
+
+		if (!job->task[ii]->efname || !job->task[0]->efname ||
+		    strcmp(job->task[ii]->efname, job->task[0]->efname) != 0)
+			ef_all_same = false;
+	}
+
+	if (of_all_same && *outpattern == SLURMD_UNKNOWN)
+		*outpattern = SLURMD_ALL_SAME;
+
+	if (ef_all_same && *errpattern == SLURMD_UNKNOWN)
+		*errpattern = SLURMD_ALL_SAME;
+
+	if (job->task[0]->ofname && job->task[0]->efname &&
+	    strcmp(job->task[0]->ofname, job->task[0]->efname)==0)
+		*same_out_err_files = true;
+
+	if (*outpattern != SLURMD_UNKNOWN && *errpattern != SLURMD_UNKNOWN)
+		return;
+
+	for (ii = 0; ii < job->ntasks-1; ii++) {
+		for (jj = ii+1; jj < job->ntasks; jj++) {
+
+			if (!job->task[ii]->ofname || !job->task[jj]->ofname ||
+			    strcmp(job->task[ii]->ofname, 
+				   job->task[jj]->ofname) == 0)
+				of_all_unique = false;
+
+			if (!job->task[ii]->efname || !job->task[jj]->efname ||
+			    strcmp(job->task[ii]->efname, 
+				   job->task[jj]->efname) == 0)
+				ef_all_unique = false;
+		}
+	}
+
+	if (of_all_unique)
+		*outpattern = SLURMD_ALL_UNIQUE;
+
+	if (ef_all_unique)
+		*errpattern = SLURMD_ALL_UNIQUE;
+
+	if (of_all_unique && ef_all_unique) {
+		*same_out_err_files = true;
+		for (ii = 0; ii < job->ntasks; ii++) {
+			if (job->task[ii]->ofname && 
+			    job->task[ii]->efname &&
+			    strcmp(job->task[ii]->ofname,
+				   job->task[ii]->efname) != 0) {
+				*same_out_err_files = false;
+				break;
+			}
+		}
+	}
+}
+
+
+int
+io_get_file_flags(slurmd_job_t *job)
+{
+	slurm_ctl_conf_t *conf;
+	int file_flags;
+
+	/* set files for opening stdout/err */
+	if (job->open_mode == OPEN_MODE_APPEND)
+		file_flags = O_CREAT|O_WRONLY|O_APPEND;
+	else if (job->open_mode == OPEN_MODE_TRUNCATE)
+		file_flags = O_CREAT|O_WRONLY|O_APPEND|O_TRUNC;
+	else {
+		conf = slurm_conf_lock();
+		if (conf->job_file_append)
+			file_flags = O_CREAT|O_WRONLY|O_APPEND;
+		else
+			file_flags = O_CREAT|O_WRONLY|O_APPEND|O_TRUNC;
+		slurm_conf_unlock();
+	}
+	return file_flags;
+}
+
+
+
diff --git a/src/slurmd/slurmstepd/io.h b/src/slurmd/slurmstepd/io.h
index e1adb77f0dfd91b5526940214eccc2aa0d31dc6b..9bf9c2f476d4cd17e52a14dcd74ed0e94ef66114 100644
--- a/src/slurmd/slurmstepd/io.h
+++ b/src/slurmd/slurmstepd/io.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/slurmd/slurmstepd/io.h - slurmstepd standard IO routines
- * $Id: io.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: io.h 17803 2009-06-10 22:06:56Z da $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -56,6 +57,23 @@ struct io_buf {
 	void *data;
 };
 
+/* For each task's ofname and efname, are all the names NULL, 
+   one null and the others "/dev/null", all non-null and unique,
+   or all non-null and identical. */
+typedef enum {
+	SLURMD_ALL_NULL,   /* output from all tasks goes to the client (srun) */
+	SLURMD_ONE_NULL,   /* output from one task goes to the client, output
+			      from other tasks is discarded */
+	SLURMD_ALL_UNIQUE, /* separate output files per task.  written from 
+			      tasks unless slurmd_job_t->labelio == true, in 
+			      which case the slurmstepd does the write */
+	SLURMD_ALL_SAME,   /* all tasks write to the same file.  written from 
+			      tasks unless slurmd_job_t->labelio == true, in 
+			      which case the slurmstepd does the write */
+	SLURMD_UNKNOWN
+} slurmd_filename_pattern_t;
+
+
 struct io_buf *alloc_io_buf(void);
 void free_io_buf(struct io_buf *buf);
 
@@ -66,7 +84,8 @@ void free_io_buf(struct io_buf *buf);
  * yet started, we initialize the msg_queue as an empty list and
  * directly add the eio_obj_t to the eio handle with eio_new_initial_handle.
  */
-int io_initial_client_connect(srun_info_t *srun, slurmd_job_t *job);
+int io_initial_client_connect(srun_info_t *srun, slurmd_job_t *job, 
+			      int stdout_tasks, int stderr_tasks);
 
 /* 
  * Initiate a TCP connection back to a waiting client (e.g. srun).
@@ -76,6 +95,16 @@ int io_initial_client_connect(srun_info_t *srun, slurmd_job_t *job);
  */
 int io_client_connect(srun_info_t *srun, slurmd_job_t *job);
 
+
+/* 
+ * Open a local file and create and eio object for files written
+ * from the slurmstepd, probably with labelled output.
+ */
+int
+io_create_local_client(const char *filename, int file_flags, 
+		       slurmd_job_t *job, bool labelio,
+		       int stdout_tasks, int stderr_tasks);
+
 /*
  * Initialize each task's standard I/O file descriptors.  The file descriptors
  * may be files, or may be the end of a pipe which is handled by an eio_obj_t.
@@ -101,6 +130,24 @@ void io_close_task_fds(slurmd_job_t *job);
 
 void io_close_all(slurmd_job_t *job);
 
+void io_close_local_fds(slurmd_job_t *job);
+
+
+/* 
+ *  Look for a pattern in the stdout and stderr file names, and see
+ *  if stdout and stderr point to the same file(s).
+ *  See comments above for slurmd_filename_pattern_t.
+ */
+void io_find_filename_pattern(  slurmd_job_t *job, 
+				slurmd_filename_pattern_t *outpattern, 
+				slurmd_filename_pattern_t *errpattern,
+				bool *same_out_err_files );
+
+/* 
+ *  Get the flags to be used with the open call to create output files.
+ */
+int io_get_file_flags(slurmd_job_t *job);
+
 /*
  *  Initialize "user managed" IO, where each task has a single TCP
  *  socket end point shared on stdin, stdout, and stderr.
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index 3ad0e1d369b8588aee2dd1ecdb3757767dcc5dee..cd09599ba8f4c532b77d503b134fc3058135ffcb 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -1,15 +1,16 @@
 /*****************************************************************************\
  *  src/slurmd/slurmstepd/mgr.c - job manager functions for slurmstepd
- *  $Id: mgr.c 17040 2009-03-26 15:03:18Z jette $
+ *  $Id: mgr.c 17803 2009-06-10 22:06:56Z da $
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -76,22 +77,23 @@
 
 #include <slurm/slurm_errno.h>
 
+#include "src/common/basil_resv_conf.h"
 #include "src/common/cbuf.h"
 #include "src/common/env.h"
+#include "src/common/fd.h"
+#include "src/common/forward.h"
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
+#include "src/common/mpi.h"
 #include "src/common/node_select.h"
-#include "src/common/fd.h"
+#include "src/common/plugstack.h"
 #include "src/common/safeopen.h"
 #include "src/common/slurm_jobacct_gather.h"
 #include "src/common/switch.h"
+#include "src/common/util-net.h"
+#include "src/common/xmalloc.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
-#include "src/common/xmalloc.h"
-#include "src/common/util-net.h"
-#include "src/common/forward.h"
-#include "src/common/plugstack.h"
-#include "src/common/mpi.h"
 
 #include "src/slurmd/slurmd/slurmd.h"
 
@@ -100,6 +102,7 @@
 #include "src/slurmd/common/task_plugin.h"
 #include "src/slurmd/common/run_script.h"
 #include "src/slurmd/common/reverse_tree.h"
+#include "src/slurmd/common/set_oomadj.h"
 
 #include "src/slurmd/slurmstepd/slurmstepd.h"
 #include "src/slurmd/slurmstepd/mgr.h"
@@ -124,11 +127,11 @@ static int mgr_sigarray[] = {
 };
 
 struct priv_state {
-	uid_t           saved_uid;
-	gid_t           saved_gid;
-	gid_t *         gid_list;
-	int             ngids;
-	char            saved_cwd [4096];
+	uid_t	saved_uid;
+	gid_t	saved_gid;
+	gid_t *	gid_list;
+	int	ngids;
+	char	saved_cwd [4096];
 };
 
 step_complete_t step_complete = {
@@ -143,7 +146,7 @@ step_complete_t step_complete = {
 	true,
 	(bitstr_t *)NULL,
 	0,
-        NULL
+	NULL
 };
 
 typedef struct kill_thread {
@@ -161,20 +164,20 @@ typedef struct kill_thread {
  */
 static int  _access(const char *path, int modes, uid_t uid, gid_t gid);
 static void _send_launch_failure(launch_tasks_request_msg_t *, 
-                                 slurm_addr *, int);
+				 slurm_addr *, int);
 static int  _fork_all_tasks(slurmd_job_t *job);
 static int  _become_user(slurmd_job_t *job, struct priv_state *ps);
 static void  _set_prio_process (slurmd_job_t *job);
 static void _set_job_log_prefix(slurmd_job_t *job);
 static int  _setup_normal_io(slurmd_job_t *job);
 static int  _drop_privileges(slurmd_job_t *job, bool do_setuid,
-				struct priv_state *state);
+			     struct priv_state *state);
 static int  _reclaim_privileges(struct priv_state *state);
 static void _send_launch_resp(slurmd_job_t *job, int rc);
 static void _slurmd_job_log_init(slurmd_job_t *job);
 static void _wait_for_io(slurmd_job_t *job);
 static int  _send_exit_msg(slurmd_job_t *job, uint32_t *tid, int n, 
-		int status);
+			   int status);
 static void _wait_for_children_slurmstepd(slurmd_job_t *job);
 static int  _send_pending_exit_msgs(slurmd_job_t *job);
 static void _send_step_complete_msgs(slurmd_job_t *job);
@@ -356,7 +359,7 @@ _set_job_log_prefix(slurmd_job_t *job)
 static int
 _setup_normal_io(slurmd_job_t *job)
 {
-	int            rc   = 0;
+	int rc = 0, ii = 0;
 	struct priv_state sprivs;
 
 	debug2("Entering _setup_normal_io");
@@ -369,12 +372,8 @@ _setup_normal_io(slurmd_job_t *job)
 	if (_drop_privileges(job, true, &sprivs) < 0)
 		return ESLURMD_SET_UID_OR_GID_ERROR;
 
-	/* FIXME - need to check a return code for failures */
-	io_init_tasks_stdio(job);
-
-	if (_reclaim_privileges(&sprivs) < 0)
-		error("sete{u/g}id(%lu/%lu): %m", 
-		      (u_long) sprivs.saved_uid, (u_long) sprivs.saved_gid);
+	if (io_init_tasks_stdio(job) != SLURM_SUCCESS)
+		return ESLURMD_IO_ERROR;
 
 	/*
 	 * MUST create the initial client object before starting
@@ -382,15 +381,102 @@ _setup_normal_io(slurmd_job_t *job)
 	 */
 	if (!job->batch) {
 		srun_info_t *srun = list_peek(job->sruns);
+
+		/* local id of task that sends to srun, -1 for all tasks,
+		   any other value for no tasks */
+		int srun_stdout_tasks = -1;
+		int srun_stderr_tasks = -1;
+
 		xassert(srun != NULL);
-		rc = io_initial_client_connect(srun, job);
+
+		/* If I/O is labelled with task num, and if a separate file is
+		   written per node or per task, the I/O needs to be sent 
+		   back to the stepd, get a label appended, and written from
+		   the stepd rather than sent back to srun or written directly
+		   from the node.  When a task has ofname or efname == NULL, it
+		   means data gets sent back to the client. */
+
+		if (job->labelio) {
+			slurmd_filename_pattern_t outpattern, errpattern;
+			bool same = false;
+			int file_flags;
+
+			io_find_filename_pattern(job, &outpattern, &errpattern,
+						 &same);
+			file_flags = io_get_file_flags(job);
+
+			/* Make eio objects to write from the slurmstepd */
+			if (outpattern == SLURMD_ALL_UNIQUE) {
+				/* Open a separate file per task */
+				for (ii = 0; ii < job->ntasks; ii++) {
+					rc = io_create_local_client( 
+						job->task[ii]->ofname, 
+						file_flags, job, job->labelio,
+						job->task[ii]->id,
+						same ? job->task[ii]->id : -2);
+					if (rc != SLURM_SUCCESS)
+						return ESLURMD_IO_ERROR;
+				}
+				srun_stdout_tasks = -2;
+				if (same)
+					srun_stderr_tasks = -2;
+			} else if (outpattern == SLURMD_ALL_SAME) {
+				/* Open a file for all tasks */
+				rc = io_create_local_client( 
+					job->task[0]->ofname, 
+					file_flags, job, job->labelio,
+					-1, same ? -1 : -2);
+				if (rc != SLURM_SUCCESS)
+					return ESLURMD_IO_ERROR;
+
+				srun_stdout_tasks = -2;
+				if (same)
+					srun_stderr_tasks = -2;
+			}
+
+			if (!same) {
+				if (errpattern == SLURMD_ALL_UNIQUE) {
+					/* Open a separate file per task */
+					for (ii = 0; ii < job->ntasks; ii++) {
+						rc = io_create_local_client( 
+							job->task[ii]->efname, 
+							file_flags, job, 
+							job->labelio,
+							-2, job->task[ii]->id);
+						if (rc != SLURM_SUCCESS)
+							return ESLURMD_IO_ERROR;
+					}
+					srun_stderr_tasks = -2;
+				} else if (errpattern == SLURMD_ALL_SAME) {
+					/* Open a file for all tasks */
+					rc = io_create_local_client( 
+						job->task[0]->efname, 
+						file_flags, job, job->labelio,
+						-2, -1);
+					if (rc != SLURM_SUCCESS)
+						return ESLURMD_IO_ERROR;
+
+					srun_stderr_tasks = -2;
+				}
+			}
+		}
+
+		rc = io_initial_client_connect(srun, job, srun_stdout_tasks, 
+					       srun_stderr_tasks);
 		if (rc < 0) 
 			return ESLURMD_IO_ERROR;
 	}
 
-	if (!job->batch)
+	if (_reclaim_privileges(&sprivs) < 0) {
+		error("sete{u/g}id(%lu/%lu): %m",
+		      (u_long) sprivs.saved_uid, (u_long) sprivs.saved_gid);
+	}
+
+	if (!job->batch) {
 		if (io_thread_start(job) < 0)
 			return ESLURMD_IO_ERROR;
+	}
+
 	debug2("Leaving  _setup_normal_io");
 	return SLURM_SUCCESS;
 }
@@ -436,14 +522,14 @@ _send_exit_msg(slurmd_job_t *job, uint32_t *tid, int n, int status)
 
 	debug3("sending task exit msg for %d tasks", n);
 
-	msg.task_id_list = tid;
-	msg.num_tasks    = n;
-	msg.return_code  = status;
-	msg.job_id       = job->jobid;
-	msg.step_id      = job->stepid;
+	msg.task_id_list	= tid;
+	msg.num_tasks		= n;
+	msg.return_code		= status;
+	msg.job_id		= job->jobid;
+	msg.step_id		= job->stepid;
 	slurm_msg_t_init(&resp);
-	resp.data        = &msg;
-	resp.msg_type    = MESSAGE_TASK_EXIT;
+	resp.data		= &msg;
+	resp.msg_type		= MESSAGE_TASK_EXIT;
 	
 	/*
 	 *  XXX Hack for TCP timeouts on exit of large, synchronized
@@ -544,6 +630,8 @@ _one_step_complete_msg(slurmd_job_t *job, int first, int last)
 	static bool acct_sent = false;
 
 	debug2("_one_step_complete_msg: first=%d, last=%d", first, last);
+
+	memset(&msg, 0, sizeof(step_complete_msg_t));
 	msg.job_id = job->jobid;
 	msg.job_step_id = job->stepid;
 	msg.range_first = first;
@@ -697,6 +785,7 @@ job_manager(slurmd_job_t *job)
 {
 	int  rc = 0;
 	bool io_initialized = false;
+	char *ckpt_type = slurm_get_checkpoint_type();
 
 	debug3("Entered job_manager for %u.%u pid=%lu",
 	       job->jobid, job->stepid, (unsigned long) job->jmgr_pid);
@@ -706,6 +795,7 @@ job_manager(slurmd_job_t *job)
 	if (switch_init() != SLURM_SUCCESS
 	    || slurmd_task_init() != SLURM_SUCCESS
 	    || slurm_proctrack_init() != SLURM_SUCCESS
+	    || checkpoint_init(ckpt_type) != SLURM_SUCCESS
 	    || slurm_jobacct_gather_init() != SLURM_SUCCESS) {
 		rc = SLURM_PLUGIN_NAME_INVALID;
 		goto fail1;
@@ -736,10 +826,11 @@ job_manager(slurmd_job_t *job)
 	if (prctl(PR_SET_DUMPABLE, 1) < 0)
 		debug ("Unable to set dumpable to 1");
 #  endif /* PR_SET_DUMPABLE */
-#endif   /* !NDEBUG         */
+#endif   /* !NDEBUG	 */
 
 	if (rc) {
 		error("IO setup failed: %m");
+		rc = SLURM_SUCCESS;	/* drains node otherwise */
 		goto fail2;
 	} else {
 		io_initialized = true;
@@ -754,6 +845,14 @@ job_manager(slurmd_job_t *job)
 		goto fail2;
 	}
 
+	/* fork necessary threads for checkpoint */
+	if (checkpoint_stepd_prefork(job) != SLURM_SUCCESS) {
+		error("Failed checkpoint_stepd_prefork");
+		rc = SLURM_FAILURE;
+		io_close_task_fds(job);
+		goto fail2;
+	}
+	
 	/* calls pam_setup() and requires pam_finish() if successful */
 	if (_fork_all_tasks(job) < 0) {
 		debug("_fork_all_tasks failed");
@@ -820,11 +919,9 @@ job_manager(slurmd_job_t *job)
 	/*
 	 * Wait for io thread to complete (if there is one)
 	 */
-	if (!job->batch && !job->user_managed_io && io_initialized) {
-		eio_signal_shutdown(job->eio);
+	if (!job->batch && !job->user_managed_io && io_initialized) 
 		_wait_for_io(job);
-	}
-
+	
 	debug2("Before call to spank_fini()");
 	if (spank_fini (job)  < 0) {
 		error ("spank_fini failed\n");
@@ -847,9 +944,22 @@ job_manager(slurmd_job_t *job)
 		_send_step_complete_msgs(job);
 	}
 
+	xfree(ckpt_type);
 	return(rc);
 }
 
+static int
+_spank_task_privileged(slurmd_job_t *job, int taskid, struct priv_state *sp)
+{
+	if (_reclaim_privileges(sp) < 0)
+		return SLURM_ERROR;
+
+	if (spank_task_privileged (job, taskid) < 0)
+		return error("spank_task_init_privileged failed");
+
+	return(_drop_privileges (job, true, sp));
+}
+
 
 /* fork and exec N tasks
  */ 
@@ -863,6 +973,7 @@ _fork_all_tasks(slurmd_job_t *job)
 	int fdpair[2];
 	struct priv_state sprivs;
 	jobacct_id_t jobacct_id;
+	char *oom_value;
 
 	xassert(job != NULL);
 
@@ -871,6 +982,13 @@ _fork_all_tasks(slurmd_job_t *job)
 		return SLURM_ERROR;
 	}
 
+#ifdef HAVE_CRAY_XT
+	if (basil_resv_conf(job->resv_id, job->jobid)) {
+		error("could not confirm reservation");
+		return SLURM_ERROR;
+	}
+#endif
+
 	debug2("Before call to spank_init()");
 	if (spank_init (job) < 0) {
 		error ("Plugin stack initialization failed.\n");
@@ -908,6 +1026,12 @@ _fork_all_tasks(slurmd_job_t *job)
 		writefds[i] = fdpair[1];
 	}
 
+	set_oom_adj(0);	/* the tasks may be killed by OOM */
+	if (pre_setuid(job)) {
+		error("Failed task affinity setup");
+		return SLURM_ERROR;
+	}
+
 	/* Temporarily drop effective privileges, except for the euid.
 	 * We need to wait until after pam_setup() to drop euid.
 	 */
@@ -965,7 +1089,13 @@ _fork_all_tasks(slurmd_job_t *job)
 			if (conf->propagate_prio == 1)
 				_set_prio_process(job);
 
-			(void) pre_setuid(job);
+			/*
+			 *  Reclaim privileges and call any plugin hooks
+			 *   that may require elevated privs
+			 */
+			if (_spank_task_privileged(job, i, &sprivs) < 0)
+				exit(1);
+
  			if (_become_user(job, &sprivs) < 0) {
  				error("_become_user failed: %m");
 				/* child process, should not return */
@@ -1006,6 +1136,12 @@ _fork_all_tasks(slurmd_job_t *job)
 		/* Don't bother erroring out here */
 	}
 
+	if ((oom_value = getenv("SLURMSTEPD_OOM_ADJ"))) {
+		int i = atoi(oom_value);
+		debug("Setting slurmstepd oom_adj to %d", i);
+		set_oom_adj(i);
+	}
+
 	if (chdir (sprivs.saved_cwd) < 0) {
 		error ("Unable to return to working directory");
 	}
@@ -1024,11 +1160,11 @@ _fork_all_tasks(slurmd_job_t *job)
 				i, job->task[i]->pid, job->pgid);
 		}
 
-                if (slurm_container_add(job, job->task[i]->pid)
+		if (slurm_container_add(job, job->task[i]->pid)
 		    == SLURM_ERROR) {
-                        error("slurm_container_add: %m");
+			error("slurm_container_add: %m");
 			goto fail1;
-                }
+		}
 		jobacct_id.nodeid = job->nodeid;
 		jobacct_id.taskid = job->task[i]->gtid;
 		jobacct_gather_g_add_task(job->task[i]->pid, 
@@ -1121,6 +1257,35 @@ _send_pending_exit_msgs(slurmd_job_t *job)
 	return nsent;
 }
 
+static inline void
+_log_task_exit(unsigned long taskid, unsigned long pid, int status)
+{
+	/*
+	 *  Print a nice message to the log describing the task exit status.
+	 *
+	 *  The final else is there just in case there is ever an exit status
+	 *   that isn't WIFEXITED || WIFSIGNALED. We'll probably never reach
+	 *   that code, but it is better than dropping a potentially useful
+	 *   exit status.
+	 */
+	if (WIFEXITED(status))
+		verbose("task %lu (%lu) exited with exit code %d.",
+		        taskid, pid, WEXITSTATUS(status));
+	else if (WIFSIGNALED(status))
+		/* WCOREDUMP isn't available on AIX */
+		verbose("task %lu (%lu) exited. Killed by signal %d%s.",
+		        taskid, pid, WTERMSIG(status),
+#ifdef WCOREDUMP
+		        WCOREDUMP(status) ? " (core dumped)" : ""
+#else
+			""
+#endif
+			);
+	else
+		verbose("task %lu (%lu) exited with status 0x%04x.",
+		        taskid, pid, status);
+}
+
 /*
  * If waitflag is true, perform a blocking wait for a single process
  * and then return.
@@ -1179,9 +1344,8 @@ _wait_for_any_task(slurmd_job_t *job, bool waitflag)
 			}
 		}
 		if (t != NULL) {
-			verbose("task %lu (%lu) exited status 0x%04x %M",
-				(unsigned long)job->task[i]->gtid,
-				(unsigned long)pid, status);
+			_log_task_exit(job->task[i]->gtid, pid, status);
+
 			t->exited  = true;
 			t->estatus = status;
 			job->envtp->env = job->env;
@@ -1189,6 +1353,7 @@ _wait_for_any_task(slurmd_job_t *job, bool waitflag)
 			job->envtp->localid = job->task[i]->id;
 			
 			job->envtp->distribution = -1;
+			job->envtp->batch_flag = job->batch;
 			setup_env(job->envtp);
 			job->env = job->envtp->env;
 			if (job->task_epilog) {
@@ -1305,6 +1470,9 @@ _wait_for_io(slurmd_job_t *job)
 	} else
 		info("_wait_for_io: ioid==0");
 
+	/* Close any files for stdout/stderr opened by the stepd */
+	io_close_local_fds(job);
+
 	return;
 }
 
@@ -1431,13 +1599,13 @@ _send_launch_resp(slurmd_job_t *job, int rc)
 	debug("Sending launch resp rc=%d", rc);
 
 	slurm_msg_t_init(&resp_msg);
-        resp_msg.address      = srun->resp_addr;
-	resp_msg.data         = &resp;
-	resp_msg.msg_type     = RESPONSE_LAUNCH_TASKS;
+	resp_msg.address	= srun->resp_addr;
+	resp_msg.data		= &resp;
+	resp_msg.msg_type	= RESPONSE_LAUNCH_TASKS;
 	
-	resp.node_name        = xstrdup(job->node_name);
-	resp.return_code      = rc;
-	resp.count_of_pids    = job->ntasks;
+	resp.node_name		= xstrdup(job->node_name);
+	resp.return_code	= rc;
+	resp.count_of_pids	= job->ntasks;
 
 	resp.local_pids = xmalloc(job->ntasks * sizeof(*resp.local_pids));
 	resp.task_ids = xmalloc(job->ntasks * sizeof(*resp.task_ids));
@@ -1457,9 +1625,9 @@ _send_launch_resp(slurmd_job_t *job, int rc)
 static int
 _send_complete_batch_script_msg(slurmd_job_t *job, int err, int status)
 {
-	int                      rc, i;
-	slurm_msg_t              req_msg;
-	complete_batch_script_msg_t  req;
+	int		rc, i;
+	slurm_msg_t	req_msg;
+	complete_batch_script_msg_t req;
 
 	req.job_id	= job->jobid;
 	req.job_rc      = status;
diff --git a/src/slurmd/slurmstepd/mgr.h b/src/slurmd/slurmstepd/mgr.h
index 1f9a52b9a1c361f8f6be6692635d34f1e4aab93e..c1867a622d27f8e7905de7c6edb37a11121e0768 100644
--- a/src/slurmd/slurmstepd/mgr.h
+++ b/src/slurmd/slurmstepd/mgr.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/multi_prog.c b/src/slurmd/slurmstepd/multi_prog.c
index 3dfd090f8e0ef138c1efba267f1d21b895ea4d94..4603d8f1c1288d3c0030404c41f4ee5558ac4690 100644
--- a/src/slurmd/slurmstepd/multi_prog.c
+++ b/src/slurmd/slurmstepd/multi_prog.c
@@ -11,10 +11,11 @@
  *  and
  *  Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>,
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/multi_prog.h b/src/slurmd/slurmstepd/multi_prog.h
index a36256b6f20463aaf87ccad3b456c1f229a60f9c..7444002a12505761e26a2e660a11165dc3a5bc10 100644
--- a/src/slurmd/slurmstepd/multi_prog.h
+++ b/src/slurmd/slurmstepd/multi_prog.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/pam_ses.c b/src/slurmd/slurmstepd/pam_ses.c
index c754492fe069ec2ae0b97b33287fba4e5d453bea..d4ab3c7adc810c672d1b395363722c06886cc604 100644
--- a/src/slurmd/slurmstepd/pam_ses.c
+++ b/src/slurmd/slurmstepd/pam_ses.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Donna Mecozzi <dmecozzi@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/pam_ses.h b/src/slurmd/slurmstepd/pam_ses.h
index f933cc27744ec4a4e2cca6eef29799e92a859c63..4a42db30918184a46dd859fab901b83f2309242d 100644
--- a/src/slurmd/slurmstepd/pam_ses.h
+++ b/src/slurmd/slurmstepd/pam_ses.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Donna Mecozzi <dmecozzi@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/pdebug.c b/src/slurmd/slurmstepd/pdebug.c
index c4a91e3f448d5e06127b494b278c3b5023e69e6a..0a47a3f0216464a6d9c001a451bc30c9dee81b4c 100644
--- a/src/slurmd/slurmstepd/pdebug.c
+++ b/src/slurmd/slurmstepd/pdebug.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/pdebug.h b/src/slurmd/slurmstepd/pdebug.h
index 228dcfb0293585b03fb02c7abe0dec6f6f9c7465..6cfbd30d6da745c608eae8db01456130c728671c 100644
--- a/src/slurmd/slurmstepd/pdebug.h
+++ b/src/slurmd/slurmstepd/pdebug.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/req.c b/src/slurmd/slurmstepd/req.c
index 4b28183a4586bf52f969b0aef8b3d8d51e39ebef..445ac05889aa241d18895a3b19bbf307472b468f 100644
--- a/src/slurmd/slurmstepd/req.c
+++ b/src/slurmd/slurmstepd/req.c
@@ -2,13 +2,14 @@
  *  src/slurmd/slurmstepd/req.c - slurmstepd domain socket request handling
  *****************************************************************************
  *  Copyright (C) 2005-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -798,27 +799,34 @@ rwfail:
 static int
 _handle_checkpoint_tasks(int fd, slurmd_job_t *job, uid_t uid)
 {
-	static time_t last_timestamp = 0;
 	int rc = SLURM_SUCCESS;
-	int signal;
 	time_t timestamp;
+	int len;
+	char *image_dir = NULL;
 
 	debug3("_handle_checkpoint_tasks for job %u.%u",
 	       job->jobid, job->stepid);
 
-	safe_read(fd, &signal, sizeof(int));
 	safe_read(fd, &timestamp, sizeof(time_t));
+	safe_read(fd, &len, sizeof(int));
+	if (len) {
+		image_dir = xmalloc (len);
+		safe_read(fd, image_dir, len); /* '\0' terminated */
+	}
 
 	debug3("  uid = %d", uid);
 	if (uid != job->uid && !_slurm_authorized_user(uid)) {
-		debug("checkpoint req from uid %ld for job %u.%u owned by uid %ld",
+		debug("checkpoint req from uid %ld for job %u.%u "
+		      "owned by uid %ld",
 		      (long)uid, job->jobid, job->stepid, (long)job->uid);
 		rc = EPERM;
 		goto done;
 	}
 
-	if (timestamp == last_timestamp) {
-		debug("duplicate checkpoint req for job %u.%u, timestamp %ld. discarded.",
+	if (job->ckpt_timestamp &&
+	    timestamp == job->ckpt_timestamp) {
+		debug("duplicate checkpoint req for job %u.%u, "
+		      "timestamp %ld. discarded.",
 		      job->jobid, job->stepid, (long)timestamp);
 		rc = ESLURM_ALREADY_DONE; /* EINPROGRESS? */
 		goto done;
@@ -844,17 +852,24 @@ _handle_checkpoint_tasks(int fd, slurmd_job_t *job, uid_t uid)
                goto done;
        }
 
-       /* TODO: send timestamp with signal */
-       if (killpg(job->pgid, signal) == -1) {
-               rc = -1;        /* Most probable ESRCH, resulting in ESLURMD_JOB_NOTRUNNING */
-               verbose("Error sending signal %d to %u.%u, pgid %d, errno: %d: %s",
-                       signal, job->jobid, job->stepid, job->pgid,
-                       errno, slurm_strerror(rc));
+       /* set timestamp in case another request comes */
+       job->ckpt_timestamp = timestamp;
+
+       /* TODO: do we need job->ckpt_dir any more, except for checkpoint/xlch? */
+/*	if (! image_dir) { */
+/*		image_dir = xstrdup(job->ckpt_dir); */
+/*	} */
+       
+       /* call the plugin to send the request */
+       if (checkpoint_signal_tasks(job, image_dir) != SLURM_SUCCESS) {
+               rc = -1;
+               verbose("Error sending checkpoint request to %u.%u: %s",
+                     job->jobid, job->stepid, slurm_strerror(rc));
        } else {
-               last_timestamp = timestamp;
-               verbose("Sent signal %d to %u.%u, pgid %d",
-                       signal, job->jobid, job->stepid, job->pgid);
+               verbose("Sent checkpoint request to %u.%u",
+                       job->jobid, job->stepid);
        }
+
        pthread_mutex_unlock(&suspend_mutex);
 
 done:
diff --git a/src/slurmd/slurmstepd/req.h b/src/slurmd/slurmstepd/req.h
index cc63c6fdbad9bcb3462049a10e2e260f8e457832..3f528b31184ea02d8efeb9ae154ea833f80a42f2 100644
--- a/src/slurmd/slurmstepd/req.h
+++ b/src/slurmd/slurmstepd/req.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  *  src/slurmd/slurmstepd/req.h - slurmstepd request handling
- *  $Id: req.h 13672 2008-03-19 23:10:58Z jette $
+ *  $Id: req.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/slurmstepd.c b/src/slurmd/slurmstepd/slurmstepd.c
index 3142bd71c179e6d7a635c5980253c5b448f5c742..10737eebd646ed4ce1e0bcfc0f30d89891ecc8e6 100644
--- a/src/slurmd/slurmstepd/slurmstepd.c
+++ b/src/slurmd/slurmstepd/slurmstepd.c
@@ -1,15 +1,17 @@
 /*****************************************************************************\
  *  src/slurmd/slurmstepd/slurmstepd.c - SLURM job-step manager.
- *  $Id: slurmstepd.c 17040 2009-03-26 15:03:18Z jette $
+ *  $Id: slurmstepd.c 17056 2009-03-26 23:35:52Z dbremer $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov> 
  *  and Christopher Morrone <morrone2@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -98,12 +100,13 @@ main (int argc, char *argv[])
 		_dump_user_env();
 		exit(0);
 	}
-
 	xsignal_block(slurmstepd_blocked_signals);
 	conf = xmalloc(sizeof(*conf));
 	conf->argv = &argv;
 	conf->argc = &argc;
 	init_setproctitle(argc, argv);
+
+	/* Receive job parameters from the slurmd */
 	_init_from_slurmd(STDIN_FILENO, argv, &cli, &self, &msg,
 			  &ngids, &gids);
 
@@ -112,12 +115,17 @@ main (int argc, char *argv[])
 	 * on STDERR_FILENO for us. */
 	dup2(STDERR_FILENO, STDIN_FILENO);
 
+	/* Create the slurmd_job_t, mostly from info in a 
+	   launch_tasks_request_msg_t or a batch_job_launch_msg_t */
 	job = _step_setup(cli, self, msg);
 	job->ngids = ngids;
 	job->gids = gids;
 
+	/* fork handlers cause mutexes on some global data structures 
+	   to be re-initialized after the fork. */
 	list_install_fork_handlers();
 	slurm_conf_install_fork_handlers();
+
 	/* sets job->msg_handle and job->msgid */
 	if (msg_thr_create(job) == SLURM_ERROR) {
 		_send_fail_to_slurmd(STDOUT_FILENO);
@@ -132,7 +140,9 @@ main (int argc, char *argv[])
 	 * on STDERR_FILENO for us. */
 	dup2(STDERR_FILENO, STDOUT_FILENO);
 
-	rc = job_manager(job); /* blocks until step is complete */
+	/* This does most of the stdio setup, then launches all the tasks,
+	   and blocks until the step is complete */
+	rc = job_manager(job); 
 
 	/* signal the message thread to shutdown, and wait for it */
 	eio_signal_shutdown(job->msg_handle);
diff --git a/src/slurmd/slurmstepd/slurmstepd.h b/src/slurmd/slurmstepd/slurmstepd.h
index d053aeb3c1815c9fe8345be64710c390fbb0443c..1aaf100bf971d18aa927a401e8b3348448ec77b0 100644
--- a/src/slurmd/slurmstepd/slurmstepd.h
+++ b/src/slurmd/slurmstepd/slurmstepd.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/slurmd/slurmstepd/slurmstepd.h - slurmstepd general header file
- * $Id: slurmstepd.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: slurmstepd.h 16867 2009-03-12 16:35:42Z jette $
  *****************************************************************************
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -63,4 +64,6 @@ typedef struct {
 
 extern step_complete_t step_complete;
 
+extern slurmd_conf_t *conf;
+
 #endif /* !_SLURMSTEPD_H */
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.c b/src/slurmd/slurmstepd/slurmstepd_job.c
index 14de1ec02e26ef47320ec367c844dd96270c7883..d55e06191b4f0a77f6bc538f69d6edf6d5644e39 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.c
+++ b/src/slurmd/slurmstepd/slurmstepd_job.c
@@ -1,14 +1,16 @@
 /*****************************************************************************\
- * src/slurmd/slurmstepd/slurmstepd_job.c - slurmd_job_t routines
- * $Id: slurmstepd_job.c 15043 2008-09-09 23:45:19Z jette $
+ *  src/slurmd/slurmstepd/slurmstepd_job.c - slurmd_job_t routines
+ *  $Id: slurmstepd_job.c 16867 2009-03-12 16:35:42Z jette $
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -44,24 +46,25 @@
 #  include <string.h>
 #endif
 
+#include <grp.h>
 #include <signal.h>
 #include <sys/types.h>
-#include <grp.h>
 
-#include "src/common/xmalloc.h"
-#include "src/common/xassert.h"
-#include "src/common/xstring.h"
+#include "src/common/eio.h"
 #include "src/common/fd.h"
 #include "src/common/log.h"
-#include "src/common/eio.h"
+#include "src/common/node_select.h"
 #include "src/common/slurm_jobacct_gather.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/xassert.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
 
 #include "src/slurmd/slurmd/slurmd.h"
-#include "src/slurmd/slurmstepd/slurmstepd_job.h"
 #include "src/slurmd/slurmstepd/io.h"
 #include "src/slurmd/slurmstepd/fname.h"
 #include "src/slurmd/slurmstepd/multi_prog.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
 static char ** _array_copy(int n, char **src);
 static void _array_free(char ***array);
@@ -196,25 +199,20 @@ job_create(launch_tasks_request_msg_t *msg)
 	job->stepid	= msg->job_step_id;
 
 	job->job_mem	= msg->job_mem;
-	job->task_mem	= msg->task_mem;
 	if (job->job_mem)
 		jobacct_common_set_mem_limit(job->jobid, job->job_mem);
-	else if (job->task_mem && job->ntasks) {
-		jobacct_common_set_mem_limit(job->jobid, 
-					     (job->task_mem * job->ntasks));
-	}
 	
 	job->uid	= (uid_t) msg->uid;
 	job->gid	= (gid_t) msg->gid;
 	job->cwd	= xstrdup(msg->cwd);
 	job->task_dist	= msg->task_dist;
-	job->plane_size	= msg->plane_size;
 	
 	job->cpu_bind_type = msg->cpu_bind_type;
 	job->cpu_bind = xstrdup(msg->cpu_bind);
 	job->mem_bind_type = msg->mem_bind_type;
 	job->mem_bind = xstrdup(msg->mem_bind);
-	job->ckpt_path = xstrdup(msg->ckpt_path);
+	job->ckpt_dir = xstrdup(msg->ckpt_dir);
+	job->restart_dir = xstrdup(msg->restart_dir);
 	job->cpus_per_task = msg->cpus_per_task;
 
 	job->env     = _array_copy(msg->envc, msg->env);
@@ -237,13 +235,11 @@ job_create(launch_tasks_request_msg_t *msg)
 	job->envtp->nodeid = -1;
 
 	job->envtp->distribution = 0;
-	job->envtp->plane_size = 0;
-
 	job->envtp->cpu_bind_type = 0;
 	job->envtp->cpu_bind = NULL;
 	job->envtp->mem_bind_type = 0;
 	job->envtp->mem_bind = NULL;
-	job->envtp->ckpt_path = NULL;
+	job->envtp->ckpt_dir = NULL;
 	
 	memcpy(&resp_addr, &msg->orig_addr, sizeof(slurm_addr));
 	slurm_set_addr(&resp_addr,
@@ -260,6 +256,7 @@ job_create(launch_tasks_request_msg_t *msg)
 	srun = srun_info_create(msg->cred, &resp_addr, &io_addr);
 
 	job->buffered_stdio = msg->buffered_stdio;
+	job->labelio = msg->labelio;
 
 	job->task_prolog = xstrdup(msg->task_prolog);
 	job->task_epilog = xstrdup(msg->task_epilog);
@@ -350,6 +347,9 @@ job_batch_job_create(batch_job_launch_msg_t *msg)
 	job->gid     = (gid_t) msg->gid;
 	job->cwd     = xstrdup(msg->work_dir);
 
+	job->ckpt_dir = xstrdup(msg->ckpt_dir);
+	job->restart_dir = xstrdup(msg->restart_dir);
+
 	job->env     = _array_copy(msg->envc, msg->environment);
 	job->eio     = eio_handle_create();
 	job->sruns   = list_create((ListDelF) _srun_info_destructor);
@@ -361,13 +361,13 @@ job_batch_job_create(batch_job_launch_msg_t *msg)
 	job->envtp->nodeid = -1;
 
 	job->envtp->distribution = 0;
-	job->envtp->plane_size = 0;
-
-	job->envtp->cpu_bind_type = 0;
-	job->envtp->cpu_bind = NULL;
+	job->cpu_bind_type = msg->cpu_bind_type;
+	job->cpu_bind = xstrdup(msg->cpu_bind);
 	job->envtp->mem_bind_type = 0;
 	job->envtp->mem_bind = NULL;
-	job->envtp->ckpt_path = NULL;
+	job->envtp->ckpt_dir = NULL;
+	job->envtp->restart_cnt = msg->restart_cnt;
+
 	job->cpus_per_task = msg->cpus_per_node[0];
 
 	srun = srun_info_create(NULL, NULL, NULL);
@@ -402,6 +402,11 @@ job_batch_job_create(batch_job_launch_msg_t *msg)
 	job->task[0]->argc = job->argc;
 	job->task[0]->argv = job->argv;
 
+#ifdef HAVE_CRAY_XT
+	select_g_get_jobinfo(msg->select_jobinfo, SELECT_DATA_RESV_ID,
+			     &job->resv_id);
+#endif
+
 	return job;
 }
 
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.h b/src/slurmd/slurmstepd/slurmstepd_job.h
index 2bdbb46bd49ece8c16c63ab47d486c989b05a3a9..37f9b9f34a938e0e584ea33d7fe0656b9fdc56bc 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.h
+++ b/src/slurmd/slurmstepd/slurmstepd_job.h
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  *  src/slurmd/slurmstepd/slurmstepd_job.h  slurmd_job_t definition
- *  $Id: slurmstepd_job.h 14702 2008-08-05 22:18:13Z jette $
+ *  $Id: slurmstepd_job.h 17056 2009-03-26 23:35:52Z dbremer $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -119,14 +121,12 @@ typedef struct slurmd_job {
 	uint32_t       cpus_per_task;	/* number of cpus desired per task  */
 	uint32_t       debug;  /* debug level for job slurmd                */
 	uint32_t       job_mem;  /* MB of memory reserved for the job       */
-	uint32_t       task_mem; /* MB of memory reserved for each task     */ 
 	uint16_t       cpus;   /* number of cpus to use for this job        */
 	uint16_t       argc;   /* number of commandline arguments           */
 	char         **env;    /* job environment                           */
 	char         **argv;   /* job argument vector                       */
 	char          *cwd;    /* path to current working directory         */
-       	task_dist_states_t task_dist;/* -m distribution                     */
-        uint32_t       plane_size; /* -m plane=plane_size                   */
+	task_dist_states_t task_dist;/* -m distribution                     */
 	char          *node_name; /* node name of node running job
 				   * needed for front-end systems           */
 	cpu_bind_type_t cpu_bind_type; /* --cpu_bind=                       */
@@ -150,8 +150,8 @@ typedef struct slurmd_job {
 	eio_handle_t  *eio;
 	List 	       sruns; /* List of srun_info_t pointers               */
 	List           clients; /* List of struct client_io_info pointers   */
-	List stdout_eio_objs;
-	List stderr_eio_objs;
+	List stdout_eio_objs; /* List of objs that gather stdout from tasks */
+	List stderr_eio_objs; /* List of objs that gather stderr from tasks */
 	List free_incoming;   /* List of free struct io_buf * for incoming
 			       * traffic. "incoming" means traffic from srun
 			       * to the tasks.
@@ -164,8 +164,8 @@ typedef struct slurmd_job {
 			       * including free_incoming buffers and
 			       * buffers in use.
 			       */
-	int outgoing_count;   /* Count of total incoming message buffers
-			       * including free_incoming buffers and
+	int outgoing_count;   /* Count of total outgoing message buffers
+			       * including free_outgoing buffers and
 			       * buffers in use.
 			       */
 
@@ -176,6 +176,7 @@ typedef struct slurmd_job {
 	uint8_t	buffered_stdio; /* stdio buffering flag, 1 for line-buffering,
 				 * 0 for no buffering
 				 */
+	uint8_t labelio;	/* 1 for labelling output with the task id */
 
 	pthread_t      ioid;  /* pthread id of IO thread                    */
 	pthread_t      msgid; /* pthread id of message thread               */
@@ -193,9 +194,13 @@ typedef struct slurmd_job {
 	char          *batchdir;
 	jobacctinfo_t *jobacct;
 	uint8_t        open_mode;	/* stdout/err append or truncate */
-	uint8_t        pty;		/* set if creating pseudo tty       */
+	uint8_t        pty;		/* set if creating pseudo tty	*/
 	job_options_t  options;
-	char          *ckpt_path;
+	char          *ckpt_dir;
+	time_t         ckpt_timestamp;
+	char          *restart_dir;	/* restart from context */
+	char          *resv_id;		/* Cray/BASIL reservation ID	*/
+	uint16_t       restart_cnt;	/* batch job restart count	*/
 } slurmd_job_t;
 
 
diff --git a/src/slurmd/slurmstepd/step_terminate_monitor.c b/src/slurmd/slurmstepd/step_terminate_monitor.c
index 3438374f82389da12397cf924de2317c7ab71dd6..6e46fb75a96468ffd9935ed23d887fabf091080a 100644
--- a/src/slurmd/slurmstepd/step_terminate_monitor.c
+++ b/src/slurmd/slurmstepd/step_terminate_monitor.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/step_terminate_monitor.h b/src/slurmd/slurmstepd/step_terminate_monitor.h
index ee2caf4be952cf6576f65dfddd5b5c07efabb7d5..fcda666ae5f2c8d3c9ad6d11de0ca35acad881bb 100644
--- a/src/slurmd/slurmstepd/step_terminate_monitor.h
+++ b/src/slurmd/slurmstepd/step_terminate_monitor.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c
index dc0a6f1e389bc8f22d54caf3dde837024a1ade77..8493d7322d092737ce52db0e52a9f34fc7545e43 100644
--- a/src/slurmd/slurmstepd/task.c
+++ b/src/slurmd/slurmstepd/task.c
@@ -2,13 +2,14 @@
  *  slurmd/slurmstepd/task.c - task launching functions for slurmstepd
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark A. Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -76,23 +77,24 @@
 
 #include <slurm/slurm_errno.h>
 
+#include "src/common/checkpoint.h"
 #include "src/common/env.h"
 #include "src/common/fd.h"
 #include "src/common/log.h"
+#include "src/common/mpi.h"
+#include "src/common/plugstack.h"
+#include "src/slurmd/common/proctrack.h"
 #include "src/common/switch.h"
+#include "src/slurmd/common/task_plugin.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
-#include "src/common/mpi.h"
 #include "src/common/xmalloc.h"
-#include "src/common/plugstack.h"
 
 #include "src/slurmd/slurmd/slurmd.h"
-#include "src/slurmd/common/proctrack.h"
-#include "src/slurmd/common/task_plugin.h"
-#include "src/slurmd/slurmstepd/task.h"
-#include "src/slurmd/slurmstepd/ulimits.h"
 #include "src/slurmd/slurmstepd/io.h"
 #include "src/slurmd/slurmstepd/pdebug.h"
+#include "src/slurmd/slurmstepd/task.h"
+#include "src/slurmd/slurmstepd/ulimits.h"
 
 /*
  * Static prototype definitions.
@@ -345,11 +347,11 @@ exec_task(slurmd_job_t *job, int i, int waitfd)
 	if (i == 0)
 		_make_tmpdir(job);
 
-        /*
+	/*
 	 * Stall exec until all tasks have joined the same process group
 	 */
-        if ((rc = read (waitfd, &c, sizeof (c))) != 1) {
-	        error ("_exec_task read failed, fd = %d, rc=%d: %m", waitfd, rc);
+	if ((rc = read (waitfd, &c, sizeof (c))) != 1) {
+		error ("_exec_task read failed, fd = %d, rc=%d: %m", waitfd, rc);
 		log_fini();
 		exit(1);
 	}
@@ -370,13 +372,13 @@ exec_task(slurmd_job_t *job, int i, int waitfd)
 	job->envtp->localid = task->id;
 	job->envtp->task_pid = getpid();
 	job->envtp->distribution = job->task_dist;
-	job->envtp->plane_size   = job->plane_size;
 	job->envtp->cpu_bind = xstrdup(job->cpu_bind);
 	job->envtp->cpu_bind_type = job->cpu_bind_type;
 	job->envtp->mem_bind = xstrdup(job->mem_bind);
 	job->envtp->mem_bind_type = job->mem_bind_type;
 	job->envtp->distribution = -1;
-	job->envtp->ckpt_path = xstrdup(job->ckpt_path);
+	job->envtp->ckpt_dir = xstrdup(job->ckpt_dir);
+	job->envtp->batch_flag = job->batch;
 	setup_env(job->envtp);
 	setenvf(&job->envtp->env, "SLURMD_NODENAME", "%s", conf->node_name);
 	job->env = job->envtp->env;
@@ -426,7 +428,11 @@ exec_task(slurmd_job_t *job, int i, int waitfd)
 	}
 
 	/* task plugin hook */
-	pre_launch(job);
+	if (pre_launch(job)) {
+		error ("Failed task affinity setup");
+		exit (1);
+	}
+
 	if (conf->task_prolog) {
 		char *my_prolog;
 		slurm_mutex_lock(&conf->config_mutex);
@@ -449,6 +455,14 @@ exec_task(slurmd_job_t *job, int i, int waitfd)
 		job->env[0] = (char *)NULL;
 	}
 
+	if (job->restart_dir) {
+		info("restart from %s", job->restart_dir);
+		/* no return on success */
+		checkpoint_restart_task(job, job->restart_dir, task->gtid); 
+		error("Restart task failed: %m");
+		exit(errno);
+	}
+
 	if (task->argv[0] == NULL) {
 		error("No executable program specified for this task");
 		exit(2);
diff --git a/src/slurmd/slurmstepd/task.h b/src/slurmd/slurmstepd/task.h
index 9daa80dfac1c14b020c087973aa83323f7a37648..94fca74020000410c27b5cc3fc84b8be6daeb5b9 100644
--- a/src/slurmd/slurmstepd/task.h
+++ b/src/slurmd/slurmstepd/task.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/slurmd/slurmstepd/task.h - task launching functions for slurmstepd
- * $Id: task.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: task.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmd/slurmstepd/ulimits.c b/src/slurmd/slurmstepd/ulimits.c
index a46d0c605bfcad9f828eb5fa1068e7cf36c24cce..66b14ce2c7c27155c2ebfbdf22814dad3ba0a1db 100644
--- a/src/slurmd/slurmstepd/ulimits.c
+++ b/src/slurmd/slurmstepd/ulimits.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/slurmd/slurmstepd/ulimits.c - set user limits for job
- * $Id: ulimits.c 15505 2008-10-27 17:39:44Z jette $
+ * $Id: ulimits.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -83,8 +84,8 @@ int set_user_limits(slurmd_job_t *job)
 		_set_limit( job->env, rli );
 
 	/* Set soft and hard memory and data size limit for this process, 
-	 * try to handle job and task limit (all spawned processes) in slurmd */
-	task_mem_bytes  = job->task_mem;	/* MB */
+	 * handle job limit (for all spawned processes) in slurmd */
+	task_mem_bytes  = job->job_mem;	/* MB */
 	task_mem_bytes *= (1024 * 1024);
 #ifdef RLIMIT_AS
 	if ((task_mem_bytes) && (getrlimit(RLIMIT_AS, &r) == 0) &&
@@ -92,9 +93,9 @@ int set_user_limits(slurmd_job_t *job)
 		r.rlim_max =  r.rlim_cur = task_mem_bytes;
 		if (setrlimit(RLIMIT_AS, &r)) {
 			/* Indicates that limit has already been exceeded */
-			fatal("setrlimit(RLIMIT_AS, %u MB): %m", job->task_mem);
+			fatal("setrlimit(RLIMIT_AS, %u MB): %m", job->job_mem);
 		} else
-			info("Set task_mem(%u MB)", job->task_mem);
+			info("Set task_mem(%u MB)", job->job_mem);
 #if 0
 		getrlimit(RLIMIT_AS, &r);
 		info("task memory limits: %u %u", r.rlim_cur, r.rlim_max);
@@ -107,9 +108,9 @@ int set_user_limits(slurmd_job_t *job)
 		r.rlim_max =  r.rlim_cur = task_mem_bytes;
 		if (setrlimit(RLIMIT_DATA, &r)) {
 			/* Indicates that limit has already been exceeded */
-			fatal("setrlimit(RLIMIT_DATA, %u MB): %m", job->task_mem);
+			fatal("setrlimit(RLIMIT_DATA, %u MB): %m", job->job_mem);
 		} else
-			info("Set task_data(%u MB)", job->task_mem);
+			info("Set task_data(%u MB)", job->job_mem);
 #if 0
 		getrlimit(RLIMIT_DATA, &r);
 		info("task DATA limits: %u %u", r.rlim_cur, r.rlim_max);
diff --git a/src/slurmd/slurmstepd/ulimits.h b/src/slurmd/slurmstepd/ulimits.h
index 6a1656b605294d909a91915f24814d82012cd019..1d35faaa7aa9b5fc0bc0adef36a5f22303d920dd 100644
--- a/src/slurmd/slurmstepd/ulimits.h
+++ b/src/slurmd/slurmstepd/ulimits.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmdbd/Makefile.am b/src/slurmdbd/Makefile.am
index e2f99b404ffe6c4d0cc8e16db2f2efb806fc4534..76308e09457417689c95abb7327ffcad71e7251a 100644
--- a/src/slurmdbd/Makefile.am
+++ b/src/slurmdbd/Makefile.am
@@ -16,6 +16,8 @@ slurmdbd_LDADD = 					\
 slurmdbd_SOURCES = 		\
 	agent.c			\
 	agent.h			\
+	backup.c		\
+	backup.h		\
 	proc_req.c		\
 	proc_req.h		\
 	read_config.c		\
diff --git a/src/slurmdbd/Makefile.in b/src/slurmdbd/Makefile.in
index a8dd62a790c8fac175c28704d4e47e979e193da6..af33d72fcbc10e822b106569575333e6f7533209 100644
--- a/src/slurmdbd/Makefile.in
+++ b/src/slurmdbd/Makefile.in
@@ -45,14 +45,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -72,8 +76,9 @@ CONFIG_CLEAN_FILES =
 am__installdirs = "$(DESTDIR)$(sbindir)"
 sbinPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(sbin_PROGRAMS)
-am_slurmdbd_OBJECTS = agent.$(OBJEXT) proc_req.$(OBJEXT) \
-	read_config.$(OBJEXT) rpc_mgr.$(OBJEXT) slurmdbd.$(OBJEXT)
+am_slurmdbd_OBJECTS = agent.$(OBJEXT) backup.$(OBJEXT) \
+	proc_req.$(OBJEXT) read_config.$(OBJEXT) rpc_mgr.$(OBJEXT) \
+	slurmdbd.$(OBJEXT)
 slurmdbd_OBJECTS = $(am_slurmdbd_OBJECTS)
 slurmdbd_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \
 	$(top_builddir)/src/api/libslurm.o
@@ -107,6 +112,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -276,6 +285,8 @@ slurmdbd_LDADD = \
 slurmdbd_SOURCES = \
 	agent.c			\
 	agent.h			\
+	backup.c		\
+	backup.h		\
 	proc_req.c		\
 	proc_req.h		\
 	read_config.c		\
@@ -358,6 +369,7 @@ distclean-compile:
 	-rm -f *.tab.c
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backup.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_req.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_config.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/rpc_mgr.Po@am__quote@
diff --git a/src/slurmdbd/agent.c b/src/slurmdbd/agent.c
index 006160d498d64e52782fac824bd0179223397b64..3ce1111d56a938c95b3894fed7fb554c37937c2d 100644
--- a/src/slurmdbd/agent.c
+++ b/src/slurmdbd/agent.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmdbd/agent.h b/src/slurmdbd/agent.h
index 70c133abf33262b10826567d5d7cb1c623fcd15d..c6c3941287f21e9490c20268206ff530ccff25e1 100644
--- a/src/slurmdbd/agent.h
+++ b/src/slurmdbd/agent.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmdbd/backup.c b/src/slurmdbd/backup.c
new file mode 100644
index 0000000000000000000000000000000000000000..1aac5b1c51e984d959f7afe581c5de9d2056c40f
--- /dev/null
+++ b/src/slurmdbd/backup.c
@@ -0,0 +1,129 @@
+/*****************************************************************************\
+ *  backup.c - backup slurm dbd
+ *****************************************************************************
+ *  Copyright (C) 2009  Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include <sys/poll.h>
+
+#include "src/common/xmalloc.h"
+#include "src/common/slurm_protocol_defs.h"
+#include "src/common/fd.h"
+#include "src/common/log.h"
+#include "src/common/slurmdbd_defs.h"
+
+#include "src/slurmdbd/backup.h"
+
+bool primary_resumed = false;
+bool backup = false;
+bool have_control = false;
+
+static slurm_fd  slurmdbd_fd         = -1;
+
+/* Open a connection to the Slurm DBD and set slurmdbd_fd */
+static void _open_slurmdbd_fd(slurm_addr dbd_addr)
+{
+	if(dbd_addr.sin_port == 0) {
+		error("sin_port == 0 in the slurmdbd backup");
+		return;
+	}
+
+       	slurmdbd_fd = slurm_open_msg_conn(&dbd_addr);
+	
+	if (slurmdbd_fd >= 0)
+		fd_set_nonblocking(slurmdbd_fd);
+}
+
+/* Close the SlurmDbd connection */
+static void _close_slurmdbd_fd(void)
+{
+	if (slurmdbd_fd >= 0) {
+		close(slurmdbd_fd);
+		slurmdbd_fd = -1;
+	}
+}
+
+/* Reopen the Slurm DBD connection due to some error */
+static void _reopen_slurmdbd_fd(slurm_addr dbd_addr)
+{
+	_close_slurmdbd_fd();
+	_open_slurmdbd_fd(dbd_addr);
+}
+
+/* run_backup - this is the backup controller, it should run in standby 
+ *	mode, assuming control when the primary controller stops responding */
+extern void run_backup(void)
+{
+	slurm_addr dbd_addr;
+		
+	primary_resumed = false;
+
+	/* get a connection */
+	slurm_set_addr(&dbd_addr, slurmdbd_conf->dbd_port,
+		       slurmdbd_conf->dbd_host);
+
+	if (dbd_addr.sin_port == 0)
+		error("Unable to locate SlurmDBD host %s:%u", 
+		      slurmdbd_conf->dbd_host, slurmdbd_conf->dbd_port);
+	else 
+		_open_slurmdbd_fd(dbd_addr);
+	
+
+	/* repeatedly ping Primary */
+	while (!shutdown_time) {
+		bool writeable = fd_writeable(slurmdbd_fd);
+		//info("%d %d", have_control, writeable);
+
+		if (have_control && writeable) {
+			info("Primary has come back");
+			primary_resumed = true;
+			shutdown_threads();
+			have_control = false;
+			break;
+		} else if(!have_control && !writeable) {
+			have_control = true;
+			info("Taking Control");
+			break;
+		}
+		
+		sleep(1);
+		if(!writeable) 
+			_reopen_slurmdbd_fd(dbd_addr);
+	}
+
+	_close_slurmdbd_fd();
+
+	return;
+}
diff --git a/src/slurmdbd/backup.h b/src/slurmdbd/backup.h
new file mode 100644
index 0000000000000000000000000000000000000000..9980f3e969506d58a4a34c4d527a872b3867fde5
--- /dev/null
+++ b/src/slurmdbd/backup.h
@@ -0,0 +1,55 @@
+/*****************************************************************************\
+ *  backup.h - backup slurm dbd
+ *****************************************************************************
+ *  Copyright (C) 2009  Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _DBDBACKUP_H
+#define _DBDBACKUP_H
+
+#include "src/slurmdbd/read_config.h"
+#include "src/slurmdbd/rpc_mgr.h"
+#include "src/slurmdbd/slurmdbd.h"
+
+extern bool primary_resumed;
+extern bool backup;
+extern bool have_control;
+
+/* run_backup - this is the backup dbd, it should run in standby 
+ *	mode, assuming control when the primary dbd stops responding */
+extern void run_backup(void);
+
+
+#endif
diff --git a/src/slurmdbd/proc_req.c b/src/slurmdbd/proc_req.c
index 4fba0133438caf32ad1416d7d5bea63a19c658ba..a052d53970038c188d8d273db7f90d07c90c6407 100644
--- a/src/slurmdbd/proc_req.c
+++ b/src/slurmdbd/proc_req.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -63,6 +64,8 @@ static int   _add_users(slurmdbd_conn_t *slurmdbd_conn,
 			Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _add_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
+static int   _add_reservation(slurmdbd_conn_t *slurmdbd_conn,
+			      Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _archive_dump(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _archive_load(slurmdbd_conn_t *slurmdbd_conn,
@@ -75,6 +78,8 @@ static int   _get_assocs(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_clusters(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
+static int   _get_config(slurmdbd_conn_t *slurmdbd_conn,
+			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_jobs(slurmdbd_conn_t *slurmdbd_conn,
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_jobs_cond(slurmdbd_conn_t *slurmdbd_conn,
@@ -89,6 +94,8 @@ static int   _get_users(slurmdbd_conn_t *slurmdbd_conn,
 			Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
+static int   _get_reservations(slurmdbd_conn_t *slurmdbd_conn,
+			       Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _flush_jobs(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _init_conn(slurmdbd_conn_t *slurmdbd_conn, 
@@ -113,6 +120,8 @@ static int   _modify_users(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _modify_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
+static int   _modify_reservation(slurmdbd_conn_t *slurmdbd_conn,
+				 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _node_state(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static char *_node_state_string(uint16_t node_state);
@@ -133,14 +142,14 @@ static int   _remove_users(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _remove_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
+static int   _remove_reservation(slurmdbd_conn_t *slurmdbd_conn,
+				 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _roll_usage(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _step_complete(slurmdbd_conn_t *slurmdbd_conn,
 			    Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _step_start(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _update_shares_used(slurmdbd_conn_t *slurmdbd_conn,
-				 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 
 /* Process an incoming RPC
  * slurmdbd_conn IN/OUT - in will that the newsockfd set before
@@ -200,6 +209,10 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			rc = _add_wckeys(slurmdbd_conn, 
 					 in_buffer, out_buffer, uid);
 			break;
+		case DBD_ADD_RESV:
+			rc = _add_reservation(slurmdbd_conn,
+					      in_buffer, out_buffer, uid);
+			break;
 		case DBD_ARCHIVE_DUMP:
 			rc = _archive_dump(slurmdbd_conn, 
 					   in_buffer, out_buffer, uid);
@@ -229,6 +242,10 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			rc = _get_clusters(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
+		case DBD_GET_CONFIG:
+			rc = _get_config(slurmdbd_conn,
+					 in_buffer, out_buffer, uid);
+			break;
 		case DBD_GET_JOBS:
 			rc = _get_jobs(slurmdbd_conn,
 				       in_buffer, out_buffer, uid);
@@ -249,6 +266,10 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			rc = _get_wckeys(slurmdbd_conn, 
 					 in_buffer, out_buffer, uid);
 			break;
+		case DBD_GET_RESVS:
+			rc = _get_reservations(slurmdbd_conn, 
+					       in_buffer, out_buffer, uid);
+			break;
 		case DBD_GET_USERS:
 			rc = _get_users(slurmdbd_conn,
 					in_buffer, out_buffer, uid);
@@ -307,7 +328,11 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			break;
 		case DBD_MODIFY_WCKEYS:
 			rc = _modify_wckeys(slurmdbd_conn, 
-					 in_buffer, out_buffer, uid);
+					    in_buffer, out_buffer, uid);
+			break;
+		case DBD_MODIFY_RESV:
+			rc = _modify_reservation(slurmdbd_conn,
+						 in_buffer, out_buffer, uid);
 			break;
 		case DBD_NODE_STATE:
 			rc = _node_state(slurmdbd_conn,
@@ -345,6 +370,10 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			rc = _remove_wckeys(slurmdbd_conn, 
 					    in_buffer, out_buffer, uid);
 			break;
+		case DBD_REMOVE_RESV:
+			rc = _remove_reservation(slurmdbd_conn,
+						 in_buffer, out_buffer, uid);
+			break;
 		case DBD_ROLL_USAGE:
 			rc = _roll_usage(slurmdbd_conn, 
 					 in_buffer, out_buffer, uid);
@@ -357,10 +386,6 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			rc = _step_start(slurmdbd_conn,
 					 in_buffer, out_buffer, uid);
 			break;
-		case DBD_UPDATE_SHARES_USED:
-			rc = _update_shares_used(slurmdbd_conn,
-						 in_buffer, out_buffer, uid);
-			break;
 		default:
 			comment = "Invalid RPC";
 			error("%s msg_type=%d", comment, msg_type);
@@ -384,6 +409,23 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+/* replace \" with \` return is the same as what is given */
+static char * _replace_double_quotes(char *option)
+{
+	int i=0;
+
+	if(!option)
+		return NULL;
+
+	while(option[i]) {
+		if(option[i] == '\"')
+			option[i] = '`';
+		i++;
+	}
+	return option;
+}
+
+
 static int _add_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
@@ -399,7 +441,8 @@ static int _add_accounts(slurmdbd_conn_t *slurmdbd_conn,
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(assoc_mgr_fill_in_user(slurmdbd_conn->db_conn, &user, 1)
+		if(assoc_mgr_fill_in_user(
+			   slurmdbd_conn->db_conn, &user, 1, NULL)
 		   != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
@@ -466,7 +509,8 @@ static int _add_account_coords(slurmdbd_conn_t *slurmdbd_conn,
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(assoc_mgr_fill_in_user(slurmdbd_conn->db_conn, &user, 1) 
+		if(assoc_mgr_fill_in_user(
+			   slurmdbd_conn->db_conn, &user, 1, NULL) 
 		   != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
@@ -542,7 +586,8 @@ static int _add_assocs(slurmdbd_conn_t *slurmdbd_conn,
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(assoc_mgr_fill_in_user(slurmdbd_conn->db_conn, &user, 1)
+		if(assoc_mgr_fill_in_user(
+			   slurmdbd_conn->db_conn, &user, 1, NULL)
 		   != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
@@ -683,7 +728,8 @@ static int _add_users(slurmdbd_conn_t *slurmdbd_conn,
 
 		memset(&user, 0, sizeof(acct_user_rec_t));
 		user.uid = *uid;
-		if(assoc_mgr_fill_in_user(slurmdbd_conn->db_conn, &user, 1) 
+		if(assoc_mgr_fill_in_user(
+			   slurmdbd_conn->db_conn, &user, 1, NULL) 
 		   != SLURM_SUCCESS) {
 			comment = "Your user has not been added to the accounting system yet.";
 			error("%s", comment);
@@ -760,6 +806,39 @@ end_it:
 	return rc;
 }
 
+static int _add_reservation(slurmdbd_conn_t *slurmdbd_conn,
+			     Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+{
+	int rc = SLURM_SUCCESS;
+	dbd_rec_msg_t *rec_msg = NULL;
+	char *comment = NULL;
+
+	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)) {
+		comment = "DBD_ADD_RESV message from invalid uid";
+		error("DBD_ADD_RESV message from invalid uid %u", *uid);
+		rc = ESLURM_ACCESS_DENIED;
+		goto end_it;
+	}
+	if (slurmdbd_unpack_rec_msg(slurmdbd_conn->rpc_version, DBD_ADD_RESV,
+				    &rec_msg, in_buffer) != SLURM_SUCCESS) {
+		comment = "Failed to unpack DBD_ADD_RESV message";
+		error("%s", comment);
+		rc = SLURM_ERROR;
+		goto end_it;
+	}
+	debug2("DBD_ADD_RESV: called");
+
+	rc = acct_storage_g_add_reservation(slurmdbd_conn->db_conn,
+					     rec_msg->rec);
+
+end_it:
+	slurmdbd_free_rec_msg(slurmdbd_conn->rpc_version,
+			      DBD_ADD_RESV, rec_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_ADD_RESV);
+	return rc;
+}
+
 static int _archive_dump(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
@@ -790,6 +869,8 @@ static int _archive_dump(slurmdbd_conn_t *slurmdbd_conn,
 	/* set up some defaults */
 	if(!arch_cond->archive_dir)
 		arch_cond->archive_dir = xstrdup(slurmdbd_conf->archive_dir);
+	if(arch_cond->archive_events == (uint16_t)NO_VAL)
+		arch_cond->archive_events = slurmdbd_conf->archive_events;
 	if(arch_cond->archive_jobs == (uint16_t)NO_VAL)
 		arch_cond->archive_jobs = slurmdbd_conf->archive_jobs;
 	if(!arch_cond->archive_script)
@@ -797,10 +878,16 @@ static int _archive_dump(slurmdbd_conn_t *slurmdbd_conn,
 			xstrdup(slurmdbd_conf->archive_script);
 	if(arch_cond->archive_steps == (uint16_t)NO_VAL)
 		arch_cond->archive_steps = slurmdbd_conf->archive_steps;
-	if(arch_cond->job_purge == (uint16_t)NO_VAL)
-		arch_cond->job_purge = slurmdbd_conf->job_purge;
-	if(arch_cond->step_purge == (uint16_t)NO_VAL)
-		arch_cond->step_purge = slurmdbd_conf->step_purge;
+	if(arch_cond->archive_suspend == (uint16_t)NO_VAL)
+		arch_cond->archive_suspend = slurmdbd_conf->archive_suspend;
+	if(arch_cond->purge_event == (uint16_t)NO_VAL)
+		arch_cond->purge_event = slurmdbd_conf->purge_event;
+	if(arch_cond->purge_job == (uint16_t)NO_VAL)
+		arch_cond->purge_job = slurmdbd_conf->purge_job;
+	if(arch_cond->purge_step == (uint16_t)NO_VAL)
+		arch_cond->purge_step = slurmdbd_conf->purge_step;
+	if(arch_cond->purge_suspend == (uint16_t)NO_VAL)
+		arch_cond->purge_suspend = slurmdbd_conf->purge_suspend;
 
 	rc = jobacct_storage_g_archive(slurmdbd_conn->db_conn, arch_cond);
 	if(rc != SLURM_SUCCESS) {
@@ -844,10 +931,12 @@ static int _archive_load(slurmdbd_conn_t *slurmdbd_conn,
 	}
 	
 	rc = jobacct_storage_g_archive_load(slurmdbd_conn->db_conn, arch_rec);
+
 	if(rc == ENOENT) 
 		comment = "No archive file given to recover.";
 	else if(rc != SLURM_SUCCESS)
 		comment = "Error with request.";
+	
 end_it:
 	destroy_acct_archive_rec(arch_rec);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
@@ -883,6 +972,7 @@ static int _cluster_procs(slurmdbd_conn_t *slurmdbd_conn,
 	rc = clusteracct_storage_g_cluster_procs(
 		slurmdbd_conn->db_conn,
 		cluster_procs_msg->cluster_name,
+		cluster_procs_msg->cluster_nodes,
 		cluster_procs_msg->proc_count,
 		cluster_procs_msg->event_time);
 end_it:
@@ -1004,6 +1094,25 @@ static int _get_clusters(slurmdbd_conn_t *slurmdbd_conn,
 	return SLURM_SUCCESS;
 }
 
+static int _get_config(slurmdbd_conn_t *slurmdbd_conn, 
+			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+{
+	dbd_list_msg_t list_msg = { NULL };
+
+	debug2("DBD_GET_CONFIG: called");
+	/* No message body to unpack */
+
+	list_msg.my_list = dump_config();
+	*out_buffer = init_buf(1024);
+	pack16((uint16_t) DBD_GOT_CONFIG, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_CONFIG, &list_msg, *out_buffer);
+	if(list_msg.my_list)
+		list_destroy(list_msg.my_list);
+
+	return SLURM_SUCCESS;
+}
+
 static int _get_jobs(slurmdbd_conn_t *slurmdbd_conn, 
 		     Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
@@ -1342,6 +1451,44 @@ static int _get_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 	return SLURM_SUCCESS;
 }
 
+static int _get_reservations(slurmdbd_conn_t *slurmdbd_conn, 
+			     Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+{
+	dbd_cond_msg_t *get_msg = NULL;
+	dbd_list_msg_t list_msg;
+	char *comment = NULL;
+
+	debug2("DBD_GET_RESVS: called");
+
+	if (slurmdbd_unpack_cond_msg(slurmdbd_conn->rpc_version, 
+				     DBD_GET_RESVS, &get_msg, in_buffer) !=
+	    SLURM_SUCCESS) {
+		comment = "Failed to unpack DBD_GET_RESVS message";
+		error("%s", comment);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+					      SLURM_ERROR, comment,
+					      DBD_GET_RESVS);
+		return SLURM_ERROR;
+	}
+	
+	list_msg.my_list = acct_storage_g_get_reservations(
+		slurmdbd_conn->db_conn, *uid, get_msg->cond);
+	slurmdbd_free_cond_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GET_RESVS, get_msg);
+
+	if(errno == ESLURM_ACCESS_DENIED && !list_msg.my_list)
+		list_msg.my_list = list_create(NULL);
+
+	*out_buffer = init_buf(1024);
+	pack16((uint16_t) DBD_GOT_RESVS, *out_buffer);
+	slurmdbd_pack_list_msg(slurmdbd_conn->rpc_version, 
+			       DBD_GOT_RESVS, &list_msg, *out_buffer);
+	if(list_msg.my_list)
+		list_destroy(list_msg.my_list);
+	
+	return SLURM_SUCCESS;
+}
+
 static int _flush_jobs(slurmdbd_conn_t *slurmdbd_conn,
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
@@ -1497,6 +1644,9 @@ static int  _job_complete(slurmdbd_conn_t *slurmdbd_conn,
 
 	if(rc && errno == 740) /* meaning data is already there */
 		rc = SLURM_SUCCESS;
+
+	/* just incase this gets set we need to clear it */
+	xfree(job.wckey);
 end_it:
 	slurmdbd_free_job_complete_msg(slurmdbd_conn->rpc_version, 
 				       job_comp_msg);
@@ -1509,7 +1659,7 @@ static int  _job_start(slurmdbd_conn_t *slurmdbd_conn,
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
 	dbd_job_start_msg_t *job_start_msg = NULL;
-	dbd_job_start_rc_msg_t job_start_rc_msg;
+	dbd_id_rc_msg_t id_rc_msg;
 	struct job_record job;
 	struct job_details details;
 	char *comment = NULL;
@@ -1534,10 +1684,11 @@ static int  _job_start(slurmdbd_conn_t *slurmdbd_conn,
 	}
 	memset(&job, 0, sizeof(struct job_record));
 	memset(&details, 0, sizeof(struct job_details));
-	memset(&job_start_rc_msg, 0, sizeof(dbd_job_start_rc_msg_t));
+	memset(&id_rc_msg, 0, sizeof(dbd_id_rc_msg_t));
 
 	job.total_procs = job_start_msg->alloc_cpus;
-	job.account = job_start_msg->account;
+	job.node_cnt = job_start_msg->alloc_nodes;
+	job.account = _replace_double_quotes(job_start_msg->account);
 	job.assoc_id = job_start_msg->assoc_id;
 	job.comment = job_start_msg->block_id;
 	job.db_index = job_start_msg->db_index;
@@ -1546,12 +1697,16 @@ static int  _job_start(slurmdbd_conn_t *slurmdbd_conn,
 	job.group_id = job_start_msg->gid;
 	job.job_id = job_start_msg->job_id;
 	job.job_state = job_start_msg->job_state;
-	job.name = job_start_msg->name;
+	job.name = _replace_double_quotes(job_start_msg->name);
 	job.nodes = job_start_msg->nodes;
+	job.network = job_start_msg->node_inx;
 	job.partition = job_start_msg->partition;
 	job.num_procs = job_start_msg->req_cpus;
+	job.resv_id = job_start_msg->resv_id;
 	job.priority = job_start_msg->priority;
 	job.start_time = job_start_msg->start_time;
+	job.time_limit = job_start_msg->timelimit;
+	job.wckey = _replace_double_quotes(job_start_msg->wckey);
 	details.submit_time = job_start_msg->submit_time;
 
 	job.details = &details;
@@ -1564,16 +1719,20 @@ static int  _job_start(slurmdbd_conn_t *slurmdbd_conn,
 		debug2("DBD_JOB_START: ELIGIBLE CALL ID:%u NAME:%s", 
 		       job_start_msg->job_id, job_start_msg->name);
 	}
-	job_start_rc_msg.return_code = jobacct_storage_g_job_start(
+	id_rc_msg.return_code = jobacct_storage_g_job_start(
 		slurmdbd_conn->db_conn, job_start_msg->cluster, &job);
-	job_start_rc_msg.db_index = job.db_index;
+	id_rc_msg.id = job.db_index;
+
+	/* just incase job.wckey was set because we didn't send one */
+	if(!job_start_msg->wckey)
+		xfree(job.wckey);
 
 	slurmdbd_free_job_start_msg(slurmdbd_conn->rpc_version, 
 				    job_start_msg);
 	*out_buffer = init_buf(1024);
-	pack16((uint16_t) DBD_JOB_START_RC, *out_buffer);
-	slurmdbd_pack_job_start_rc_msg(slurmdbd_conn->rpc_version, 
-				       &job_start_rc_msg, *out_buffer);
+	pack16((uint16_t) DBD_ID_RC, *out_buffer);
+	slurmdbd_pack_id_rc_msg(slurmdbd_conn->rpc_version, 
+				       &id_rc_msg, *out_buffer);
 	return SLURM_SUCCESS;
 }
 
@@ -1620,6 +1779,9 @@ static int  _job_suspend(slurmdbd_conn_t *slurmdbd_conn,
 
 	if(rc && errno == 740) /* meaning data is already there */
 		rc = SLURM_SUCCESS;
+
+	/* just incase this gets set we need to clear it */
+	xfree(job.wckey);
 end_it:
 	slurmdbd_free_job_suspend_msg(slurmdbd_conn->rpc_version, 
 				      job_suspend_msg);
@@ -2078,6 +2240,39 @@ static int   _modify_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 	return rc;
 }
 
+static int _modify_reservation(slurmdbd_conn_t *slurmdbd_conn,
+			     Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+{
+	int rc = SLURM_SUCCESS;
+	dbd_rec_msg_t *rec_msg = NULL;
+	char *comment = NULL;
+
+	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)) {
+		comment = "DBD_MODIFY_RESV message from invalid uid";
+		error("DBD_MODIFY_RESV message from invalid uid %u", *uid);
+		rc = ESLURM_ACCESS_DENIED;
+		goto end_it;
+	}
+	if (slurmdbd_unpack_rec_msg(slurmdbd_conn->rpc_version, DBD_MODIFY_RESV,
+				    &rec_msg, in_buffer) != SLURM_SUCCESS) {
+		comment = "Failed to unpack DBD_MODIFY_RESV message";
+		error("%s", comment);
+		rc = SLURM_ERROR;
+		goto end_it;
+	}
+	debug2("DBD_MODIFY_RESV: called");
+
+	rc = acct_storage_g_modify_reservation(slurmdbd_conn->db_conn,
+					       rec_msg->rec);
+
+end_it:
+	slurmdbd_free_rec_msg(slurmdbd_conn->rpc_version,
+			      DBD_MODIFY_RESV, rec_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_MODIFY_RESV);
+	return rc;
+}
+
 static int _node_state(slurmdbd_conn_t *slurmdbd_conn,
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
@@ -2086,7 +2281,6 @@ static int _node_state(slurmdbd_conn_t *slurmdbd_conn,
 	int rc = SLURM_SUCCESS;
 	char *comment = NULL;
 
-	memset(&node_ptr, 0, sizeof(struct node_record));
 
 	if (*uid != slurmdbd_conf->slurm_user_id) {
 		comment = "DBD_NODE_STATE message from invalid uid";
@@ -2103,38 +2297,37 @@ static int _node_state(slurmdbd_conn_t *slurmdbd_conn,
 		goto end_it;
 	}
 
-	if(node_state_msg->new_state == DBD_NODE_STATE_UP)
+	memset(&node_ptr, 0, sizeof(struct node_record));
+	node_ptr.name = node_state_msg->hostlist;
+	node_ptr.cpus = node_state_msg->cpu_count;
+	node_ptr.node_state = node_state_msg->state;
+
+	slurmctld_conf.fast_schedule = 0;
+
+	if(node_state_msg->new_state == DBD_NODE_STATE_UP) {
 		debug3("DBD_NODE_STATE: NODE:%s STATE:%s REASON:%s TIME:%u", 
 		       node_state_msg->hostlist,
 		       _node_state_string(node_state_msg->new_state),
 		       node_state_msg->reason, 
 		       node_state_msg->event_time);
-	else
+		rc = clusteracct_storage_g_node_up(
+			slurmdbd_conn->db_conn,
+			node_state_msg->cluster_name,
+			&node_ptr,
+			node_state_msg->event_time);
+	} else {
 		debug2("DBD_NODE_STATE: NODE:%s STATE:%s REASON:%s TIME:%u", 
 		       node_state_msg->hostlist,
 		       _node_state_string(node_state_msg->new_state),
 		       node_state_msg->reason, 
 		       node_state_msg->event_time);
-	node_ptr.name = node_state_msg->hostlist;
-	node_ptr.cpus = node_state_msg->cpu_count;
-
-	slurmctld_conf.fast_schedule = 0;
-
-	if(node_state_msg->new_state == DBD_NODE_STATE_DOWN)
 		rc = clusteracct_storage_g_node_down(
 			slurmdbd_conn->db_conn,
 			node_state_msg->cluster_name,
 			&node_ptr,
 			node_state_msg->event_time,
 			node_state_msg->reason);
-	else
-		rc = clusteracct_storage_g_node_up(slurmdbd_conn->db_conn,
-						   node_state_msg->cluster_name,
-						   &node_ptr,
-						   node_state_msg->event_time);
-	
-	if(rc && errno == 740) /* meaning data is already there */
-		rc = SLURM_SUCCESS;
+	}
 
 end_it:
 	slurmdbd_free_node_state_msg(slurmdbd_conn->rpc_version, 
@@ -2707,6 +2900,39 @@ static int   _remove_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 	return rc;
 }
 
+static int _remove_reservation(slurmdbd_conn_t *slurmdbd_conn,
+			     Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+{
+	int rc = SLURM_SUCCESS;
+	dbd_rec_msg_t *rec_msg = NULL;
+	char *comment = NULL;
+
+	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)) {
+		comment = "DBD_REMOVE_RESV message from invalid uid";
+		error("DBD_REMOVE_RESV message from invalid uid %u", *uid);
+		rc = ESLURM_ACCESS_DENIED;
+		goto end_it;
+	}
+	if (slurmdbd_unpack_rec_msg(slurmdbd_conn->rpc_version, DBD_REMOVE_RESV,
+				    &rec_msg, in_buffer) != SLURM_SUCCESS) {
+		comment = "Failed to unpack DBD_REMOVE_RESV message";
+		error("%s", comment);
+		rc = SLURM_ERROR;
+		goto end_it;
+	}
+	debug2("DBD_REMOVE_RESV: called");
+
+	rc = acct_storage_g_remove_reservation(slurmdbd_conn->db_conn,
+					     rec_msg->rec);
+
+end_it:
+	slurmdbd_free_rec_msg(slurmdbd_conn->rpc_version,
+			      DBD_REMOVE_RESV, rec_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
+				      rc, comment, DBD_REMOVE_RESV);
+	return rc;
+}
+
 static int   _roll_usage(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
@@ -2734,7 +2960,9 @@ static int   _roll_usage(slurmdbd_conn_t *slurmdbd_conn,
 		goto end_it;
 	}
 
-	rc = acct_storage_g_roll_usage(slurmdbd_conn->db_conn, get_msg->start);
+	rc = acct_storage_g_roll_usage(slurmdbd_conn->db_conn,
+				       get_msg->start, get_msg->end,
+				       get_msg->archive_data);
 
 end_it:
 	slurmdbd_free_roll_usage_msg(slurmdbd_conn->rpc_version, 
@@ -2787,7 +3015,8 @@ static int  _step_complete(slurmdbd_conn_t *slurmdbd_conn,
 	job.start_time = step_comp_msg->start_time;
 	details.submit_time = step_comp_msg->job_submit_time;
 	step.step_id = step_comp_msg->step_id;
-	job.total_procs = step_comp_msg->total_procs;
+	step.cpu_count = step_comp_msg->total_procs;
+	details.num_tasks = step_comp_msg->total_tasks;
 
 	job.details = &details;
 	step.job_ptr = &job;
@@ -2796,7 +3025,8 @@ static int  _step_complete(slurmdbd_conn_t *slurmdbd_conn,
 
 	if(rc && errno == 740) /* meaning data is already there */
 		rc = SLURM_SUCCESS;
-
+	/* just incase this gets set we need to clear it */
+	xfree(job.wckey);
 end_it:
 	slurmdbd_free_step_complete_msg(slurmdbd_conn->rpc_version, 
 					step_comp_msg);
@@ -2812,6 +3042,7 @@ static int  _step_start(slurmdbd_conn_t *slurmdbd_conn,
 	struct step_record step;
 	struct job_record job;
 	struct job_details details;
+	slurm_step_layout_t layout;
 	int rc = SLURM_SUCCESS;
 	char *comment = NULL;
 
@@ -2837,24 +3068,34 @@ static int  _step_start(slurmdbd_conn_t *slurmdbd_conn,
 	memset(&step, 0, sizeof(struct step_record));
 	memset(&job, 0, sizeof(struct job_record));
 	memset(&details, 0, sizeof(struct job_details));
+	memset(&layout, 0, sizeof(slurm_step_layout_t));
 
 	job.assoc_id = step_start_msg->assoc_id;
 	job.db_index = step_start_msg->db_index;
 	job.job_id = step_start_msg->job_id;
 	step.name = step_start_msg->name;
 	job.nodes = step_start_msg->nodes;
+	step.network = step_start_msg->node_inx;
 	step.start_time = step_start_msg->start_time;
 	details.submit_time = step_start_msg->job_submit_time;
 	step.step_id = step_start_msg->step_id;
-	job.total_procs = step_start_msg->total_procs;
+	step.cpu_count = step_start_msg->total_procs;
+	details.num_tasks = step_start_msg->total_tasks;
+
+	layout.node_cnt = step_start_msg->node_cnt;
+	layout.task_dist = step_start_msg->task_dist;
 
 	job.details = &details;
 	step.job_ptr = &job;
+	step.step_layout = &layout;
 
 	rc = jobacct_storage_g_step_start(slurmdbd_conn->db_conn, &step);
 
 	if(rc && errno == 740) /* meaning data is already there */
 		rc = SLURM_SUCCESS;
+
+	/* just incase this gets set we need to clear it */
+	xfree(job.wckey);
 end_it:
 	slurmdbd_free_step_start_msg(slurmdbd_conn->rpc_version, 
 				     step_start_msg);
@@ -2863,49 +3104,3 @@ end_it:
 	return rc;
 }
 
-static int  _update_shares_used(slurmdbd_conn_t *slurmdbd_conn,
-				Buf in_buffer, Buf *out_buffer, uint32_t *uid)
-{
-	int rc = SLURM_SUCCESS;
-	dbd_list_msg_t *used_shares_msg = NULL;
-	char *comment = NULL;
-
-	if (*uid != slurmdbd_conf->slurm_user_id) {
-		comment = "DBD_UPDATE_SHARES_USED message from invalid uid";
-		error("%s %u", comment, *uid);
-		rc = ESLURM_ACCESS_DENIED;
-		goto end_it;
-	}
-	debug2("DBD_UPDATE_SHARES_USED");
-	if (slurmdbd_unpack_list_msg(slurmdbd_conn->rpc_version, 
-				     DBD_UPDATE_SHARES_USED, &used_shares_msg, 
-				     in_buffer) != SLURM_SUCCESS) {
-		comment = "Failed to unpack DBD_UPDATE_SHARES_USED message";
-		error("%s", comment);
-		rc = SLURM_ERROR;
-		goto end_it;
-	} else {
-#if 0
-		/* This was only added to verify the logic. 
-		 * It is not useful for production use */
-		ListIterator itr = NULL;
-		shares_used_object_t *usage;
-		itr = list_iterator_create(used_shares_msg->my_list);
-		while((usage = list_next(itr))) {
-			debug2("assoc_id:%u shares_used:%u", 
-			       usage->assoc_id, usage->shares_used);
-		}
-		list_iterator_destroy(itr);
-#endif
-	}
-
-	rc = acct_storage_g_update_shares_used(slurmdbd_conn->db_conn, 
-					       used_shares_msg->my_list);
-
-end_it:
-	slurmdbd_free_list_msg(slurmdbd_conn->rpc_version, 
-			       used_shares_msg);
-	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version, 
-				      rc, comment, DBD_UPDATE_SHARES_USED);
-	return rc;
-}
diff --git a/src/slurmdbd/proc_req.h b/src/slurmdbd/proc_req.h
index faca952d04957e20d3a3971eaf62dd0cfc069de2..82c831b758d1ef55b7d4de59338c14f7c4d770bb 100644
--- a/src/slurmdbd/proc_req.h
+++ b/src/slurmdbd/proc_req.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/slurmdbd/read_config.c b/src/slurmdbd/read_config.c
index aaa1c213dfe9670111cc2aeece30ddff2651b86a..e5819561f68ec9931f97ca52ecc6492d668679e0 100644
--- a/src/slurmdbd/read_config.c
+++ b/src/slurmdbd/read_config.c
@@ -2,13 +2,14 @@
  *  read_config.c - functions for reading slurmdbd.conf
  *****************************************************************************
  *  Copyright (C) 2003-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -39,15 +40,19 @@
 #include <pwd.h>
 #include <stdlib.h>
 #include <string.h>
+#include <time.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <unistd.h>
 #include <slurm/slurm_errno.h>
 
-#include "src/common/macros.h"
 #include "src/common/log.h"
+#include "src/common/list.h"
+#include "src/common/macros.h"
 #include "src/common/parse_config.h"
+#include "src/common/parse_time.h"
 #include "src/common/read_config.h"
+#include "src/common/slurm_accounting_storage.h"
 #include "src/common/uid.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
@@ -61,6 +66,8 @@ pthread_mutex_t conf_mutex = PTHREAD_MUTEX_INITIALIZER;
 static void _clear_slurmdbd_conf(void);
 static char * _get_conf_path(void);
 
+static time_t boot_time;
+
 /*
  * free_slurmdbd_conf - free storage associated with the global variable 
  *	slurmdbd_conf
@@ -77,24 +84,30 @@ static void _clear_slurmdbd_conf(void)
 {
 	if (slurmdbd_conf) {
 		xfree(slurmdbd_conf->archive_dir);
+		slurmdbd_conf->archive_events = 0;
 		slurmdbd_conf->archive_jobs = 0;
 		xfree(slurmdbd_conf->archive_script);
 		slurmdbd_conf->archive_steps = 0;
+		slurmdbd_conf->archive_suspend = 0;
 		xfree(slurmdbd_conf->auth_info);
 		xfree(slurmdbd_conf->auth_type);
 		xfree(slurmdbd_conf->dbd_addr);
+		xfree(slurmdbd_conf->dbd_backup);
 		xfree(slurmdbd_conf->dbd_host);
 		slurmdbd_conf->dbd_port = 0;
 		slurmdbd_conf->debug_level = 0;
 		xfree(slurmdbd_conf->default_qos);
-		slurmdbd_conf->job_purge = 0;
 		xfree(slurmdbd_conf->log_file);
 		xfree(slurmdbd_conf->pid_file);
 		xfree(slurmdbd_conf->plugindir);
 		slurmdbd_conf->private_data = 0;
+		slurmdbd_conf->purge_event = 0;
+		slurmdbd_conf->purge_job = 0;
+		slurmdbd_conf->purge_step = 0;
+		slurmdbd_conf->purge_suspend = 0;
 		slurmdbd_conf->slurm_user_id = NO_VAL;
 		xfree(slurmdbd_conf->slurm_user_name);
-		slurmdbd_conf->step_purge = 0;
+		xfree(slurmdbd_conf->storage_backup_host);
 		xfree(slurmdbd_conf->storage_host);
 		xfree(slurmdbd_conf->storage_loc);
 		xfree(slurmdbd_conf->storage_pass);
@@ -115,12 +128,15 @@ extern int read_slurmdbd_conf(void)
 {
 	s_p_options_t options[] = {
 		{"ArchiveDir", S_P_STRING},
+		{"ArchiveEvents", S_P_BOOLEAN},
 		{"ArchiveJobs", S_P_BOOLEAN},
 		{"ArchiveScript", S_P_STRING},
 		{"ArchiveSteps", S_P_BOOLEAN},
+		{"ArchiveSuspend", S_P_BOOLEAN},
 		{"AuthInfo", S_P_STRING},
 		{"AuthType", S_P_STRING},
 		{"DbdAddr", S_P_STRING},
+		{"DbdBackupHost", S_P_STRING},
 		{"DbdHost", S_P_STRING},
 		{"DbdPort", S_P_UINT16},
 		{"DebugLevel", S_P_UINT16},
@@ -131,8 +147,13 @@ extern int read_slurmdbd_conf(void)
 		{"PidFile", S_P_STRING},
 		{"PluginDir", S_P_STRING},
 		{"PrivateData", S_P_STRING},
+		{"PurgeEventMonths", S_P_UINT16},
+		{"PurgeJobMonths", S_P_UINT16},
+		{"PurgeStepMonths", S_P_UINT16},
+		{"PurgeSuspendMonths", S_P_UINT16},
 		{"SlurmUser", S_P_STRING},
 		{"StepPurge", S_P_UINT16},
+		{"StorageBackupHost", S_P_STRING},
 		{"StorageHost", S_P_STRING},
 		{"StorageLoc", S_P_STRING},
 		{"StoragePass", S_P_STRING},
@@ -148,8 +169,10 @@ extern int read_slurmdbd_conf(void)
 
 	/* Set initial values */
 	slurm_mutex_lock(&conf_mutex);
-	if (slurmdbd_conf == NULL)
+	if (slurmdbd_conf == NULL) {
 		slurmdbd_conf = xmalloc(sizeof(slurm_dbd_conf_t));
+		boot_time = time(NULL);
+	}
 	slurmdbd_conf->debug_level = LOG_LEVEL_INFO;
 	_clear_slurmdbd_conf();
 
@@ -163,13 +186,15 @@ extern int read_slurmdbd_conf(void)
 		tbl = s_p_hashtbl_create(options);
 		if (s_p_parse_file(tbl, conf_path) == SLURM_ERROR) {
 			fatal("Could not open/read/parse slurmdbd.conf file %s",
-		 	     conf_path);
+			      conf_path);
 		}
 
 		if(!s_p_get_string(&slurmdbd_conf->archive_dir, "ArchiveDir",
 				   tbl))
 			slurmdbd_conf->archive_dir =
 				xstrdup(DEFAULT_SLURMDBD_ARCHIVE_DIR);
+		s_p_get_boolean((bool *)&slurmdbd_conf->archive_events,
+				"ArchiveEvents", tbl);
 		s_p_get_boolean((bool *)&slurmdbd_conf->archive_jobs,
 				"ArchiveJobs", tbl);
 		s_p_get_string(&slurmdbd_conf->archive_script, "ArchiveScript",
@@ -178,12 +203,14 @@ extern int read_slurmdbd_conf(void)
 				"ArchiveSteps", tbl);
 		s_p_get_string(&slurmdbd_conf->auth_info, "AuthInfo", tbl);
 		s_p_get_string(&slurmdbd_conf->auth_type, "AuthType", tbl);
+		s_p_get_string(&slurmdbd_conf->dbd_backup,
+			       "DbdBackupHost", tbl);
 		s_p_get_string(&slurmdbd_conf->dbd_host, "DbdHost", tbl);
 		s_p_get_string(&slurmdbd_conf->dbd_addr, "DbdAddr", tbl);
 		s_p_get_uint16(&slurmdbd_conf->dbd_port, "DbdPort", tbl);
 		s_p_get_uint16(&slurmdbd_conf->debug_level, "DebugLevel", tbl);
 		s_p_get_string(&slurmdbd_conf->default_qos, "DefaultQOS", tbl);
-		s_p_get_uint16(&slurmdbd_conf->job_purge, "JobPurge", tbl);
+		s_p_get_uint16(&slurmdbd_conf->purge_job, "JobPurge", tbl);
 		s_p_get_string(&slurmdbd_conf->log_file, "LogFile", tbl);
 		if (!s_p_get_uint16(&slurmdbd_conf->msg_timeout,
 				    "MessageTimeout", tbl))
@@ -195,6 +222,9 @@ extern int read_slurmdbd_conf(void)
 		s_p_get_string(&slurmdbd_conf->pid_file, "PidFile", tbl);
 		s_p_get_string(&slurmdbd_conf->plugindir, "PluginDir", tbl);
 		if (s_p_get_string(&temp_str, "PrivateData", tbl)) {
+			if (strstr(temp_str, "account"))
+				slurmdbd_conf->private_data 
+					|= PRIVATE_DATA_ACCOUNTS;
 			if (strstr(temp_str, "job"))
 				slurmdbd_conf->private_data 
 					|= PRIVATE_DATA_JOBS;
@@ -204,37 +234,48 @@ extern int read_slurmdbd_conf(void)
 			if (strstr(temp_str, "partition"))
 				slurmdbd_conf->private_data 
 					|= PRIVATE_DATA_PARTITIONS;
+			if (strstr(temp_str, "reservation"))
+				slurmdbd_conf->private_data
+					|= PRIVATE_DATA_RESERVATIONS;
 			if (strstr(temp_str, "usage"))
 				slurmdbd_conf->private_data
 					|= PRIVATE_DATA_USAGE;
-			if (strstr(temp_str, "users"))
+			if (strstr(temp_str, "user"))
 				slurmdbd_conf->private_data 
 					|= PRIVATE_DATA_USERS;
-			if (strstr(temp_str, "accounts"))
-				slurmdbd_conf->private_data 
-					|= PRIVATE_DATA_ACCOUNTS;
 			if (strstr(temp_str, "all"))
 				slurmdbd_conf->private_data = 0xffff;
 			xfree(temp_str);
 		}
 
+		s_p_get_uint16(&slurmdbd_conf->purge_event,
+			       "PurgeEventMonths", tbl);
+		s_p_get_uint16(&slurmdbd_conf->purge_job,
+			       "PurgeJobMonths", tbl);
+		s_p_get_uint16(&slurmdbd_conf->purge_step,
+			       "PurgeStepMonths", tbl);
+		s_p_get_uint16(&slurmdbd_conf->purge_suspend,
+			       "PurgeSuspendMonths", tbl);
+
 		s_p_get_string(&slurmdbd_conf->slurm_user_name, "SlurmUser",
 			       tbl);
-		s_p_get_uint16(&slurmdbd_conf->step_purge, "StepPurge", tbl);
+		s_p_get_uint16(&slurmdbd_conf->purge_step, "StepPurge", tbl);
 
+		s_p_get_string(&slurmdbd_conf->storage_backup_host,
+			       "StorageBackupHost", tbl);
 		s_p_get_string(&slurmdbd_conf->storage_host,
-				"StorageHost", tbl);
+			       "StorageHost", tbl);
 		s_p_get_string(&slurmdbd_conf->storage_loc,
-				"StorageLoc", tbl);
+			       "StorageLoc", tbl);
 		s_p_get_string(&slurmdbd_conf->storage_pass,
-				"StoragePass", tbl);
+			       "StoragePass", tbl);
 		s_p_get_uint16(&slurmdbd_conf->storage_port,
 			       "StoragePort", tbl);
 		s_p_get_string(&slurmdbd_conf->storage_type,
 			       "StorageType", tbl);
 		s_p_get_string(&slurmdbd_conf->storage_user,
-				"StorageUser", tbl);
-
+			       "StorageUser", tbl);
+		
 		if(!s_p_get_boolean((bool *)&slurmdbd_conf->track_wckey, 
 				    "TrackWCKey", tbl))
 			slurmdbd_conf->track_wckey = false;
@@ -269,9 +310,38 @@ extern int read_slurmdbd_conf(void)
 		slurmdbd_conf->slurm_user_name = xstrdup("root");
 		slurmdbd_conf->slurm_user_id = 0;
 	}
+	
 	if (slurmdbd_conf->storage_type == NULL)
 		fatal("StorageType must be specified");
+
+	if (!slurmdbd_conf->storage_host)
+		slurmdbd_conf->storage_host = xstrdup(DEFAULT_STORAGE_HOST);
+
+	if (!slurmdbd_conf->storage_user) 		
+		slurmdbd_conf->storage_user = xstrdup(getlogin());
 	
+	if(!strcmp(slurmdbd_conf->storage_type, 
+			  "accounting_storage/mysql")) {
+		if(!slurmdbd_conf->storage_port)
+			slurmdbd_conf->storage_port = DEFAULT_MYSQL_PORT;
+		if(!slurmdbd_conf->storage_loc)
+			slurmdbd_conf->storage_loc =
+				xstrdup(DEFAULT_ACCOUNTING_DB);
+	} else if(!strcmp(slurmdbd_conf->storage_type,
+			  "accounting_storage/pgsql")) {
+		if(!slurmdbd_conf->storage_port)
+			slurmdbd_conf->storage_port = DEFAULT_PGSQL_PORT;
+		if(!slurmdbd_conf->storage_loc)
+			slurmdbd_conf->storage_loc =
+				xstrdup(DEFAULT_ACCOUNTING_DB);
+	} else {
+		if(!slurmdbd_conf->storage_port)
+			slurmdbd_conf->storage_port = DEFAULT_STORAGE_PORT;
+		if(!slurmdbd_conf->storage_loc)
+			slurmdbd_conf->storage_loc =
+				xstrdup(DEFAULT_STORAGE_LOC);
+	}
+
 	if (slurmdbd_conf->archive_dir) {
 		if(stat(slurmdbd_conf->archive_dir, &buf) < 0) 
 			fatal("Failed to stat the archive directory %s: %m",
@@ -309,21 +379,20 @@ extern void log_config(void)
 	char tmp_str[128];
 
 	debug2("ArchiveDir        = %s", slurmdbd_conf->archive_dir);
+	debug2("ArchiveEvents     = %u", slurmdbd_conf->archive_events);
+	debug2("ArchiveJobs       = %u", slurmdbd_conf->archive_jobs);
 	debug2("ArchiveScript     = %s", slurmdbd_conf->archive_script);
+	debug2("ArchiveSteps      = %u", slurmdbd_conf->archive_steps);
+	debug2("ArchiveSuspend    = %u", slurmdbd_conf->archive_suspend);
 	debug2("AuthInfo          = %s", slurmdbd_conf->auth_info);
 	debug2("AuthType          = %s", slurmdbd_conf->auth_type);
 	debug2("DbdAddr           = %s", slurmdbd_conf->dbd_addr);
+	debug2("DbdBackupHost     = %s", slurmdbd_conf->dbd_backup);
 	debug2("DbdHost           = %s", slurmdbd_conf->dbd_host);
 	debug2("DbdPort           = %u", slurmdbd_conf->dbd_port);
 	debug2("DebugLevel        = %u", slurmdbd_conf->debug_level);
 	debug2("DefaultQOS        = %s", slurmdbd_conf->default_qos);
 
-	if(slurmdbd_conf->job_purge)
-		debug2("JobPurge          = %u months",
-		       slurmdbd_conf->job_purge);
-	else
-		debug2("JobPurge          = NONE");
-		
 	debug2("LogFile           = %s", slurmdbd_conf->log_file);
 	debug2("MessageTimeout    = %u", slurmdbd_conf->msg_timeout);
 	debug2("PidFile           = %s", slurmdbd_conf->pid_file);
@@ -333,21 +402,42 @@ extern void log_config(void)
 			    tmp_str, sizeof(tmp_str));
 
 	debug2("PrivateData       = %s", tmp_str);
-	debug2("SlurmUser         = %s(%u)", 
-		slurmdbd_conf->slurm_user_name, slurmdbd_conf->slurm_user_id);
 
-	if(slurmdbd_conf->step_purge)
-		debug2("StepPurge         = %u months", 
-		       slurmdbd_conf->step_purge); 
+	if(slurmdbd_conf->purge_job)
+		debug2("PurgeJobMonths    = %u months",
+		       slurmdbd_conf->purge_job);
 	else
-		debug2("StepPurge         = NONE"); 
+		debug2("PurgeJobMonths    = NONE");
 		
+	if(slurmdbd_conf->purge_event)
+		debug2("PurgeEventMonths  = %u months",
+		       slurmdbd_conf->purge_event);
+	else
+		debug2("PurgeEventMonths  = NONE");
+		
+	if(slurmdbd_conf->purge_step)
+		debug2("PurgeStepMonths   = %u months",
+		       slurmdbd_conf->purge_step);
+	else
+		debug2("PurgeStepMonths   = NONE");
+		
+	if(slurmdbd_conf->purge_suspend)
+		debug2("PurgeSuspendMonths= %u months",
+		       slurmdbd_conf->purge_suspend);
+	else
+		debug2("PurgeSuspendMonths= NONE");
+		
+	debug2("SlurmUser         = %s(%u)", 
+	       slurmdbd_conf->slurm_user_name, slurmdbd_conf->slurm_user_id);
+
+	debug2("StorageBackupHost = %s", slurmdbd_conf->storage_backup_host);
 	debug2("StorageHost       = %s", slurmdbd_conf->storage_host);
 	debug2("StorageLoc        = %s", slurmdbd_conf->storage_loc);
 	debug2("StoragePass       = %s", slurmdbd_conf->storage_pass);
 	debug2("StoragePort       = %u", slurmdbd_conf->storage_port);
 	debug2("StorageType       = %s", slurmdbd_conf->storage_type);
 	debug2("StorageUser       = %s", slurmdbd_conf->storage_user);
+
 	debug2("TrackWCKey        = %u", slurmdbd_conf->track_wckey);
 }
 
@@ -397,3 +487,222 @@ static char * _get_conf_path(void)
 
 	return path;
 }
+
+/* Dump the configuration in name,value pairs for output to 
+ *	"sacctmgr show config", caller must call list_destroy() */
+extern List dump_config(void)
+{
+	config_key_pair_t *key_pair;
+	List my_list = list_create(destroy_config_key_pair);
+
+	if (!my_list)
+		fatal("malloc failure on list_create");
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("ArchiveDir");
+	key_pair->value = xstrdup(slurmdbd_conf->archive_dir);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("ArchiveEvents");
+	key_pair->value = xmalloc(16);
+	snprintf(key_pair->value, 16, "%u", slurmdbd_conf->archive_events);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("ArchiveJobs");
+	key_pair->value = xmalloc(16);
+	snprintf(key_pair->value, 16, "%u", slurmdbd_conf->archive_jobs);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("ArchiveScript");
+	key_pair->value = xstrdup(slurmdbd_conf->archive_script);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("ArchiveSteps");
+	key_pair->value = xmalloc(16);
+	snprintf(key_pair->value, 16, "%u", slurmdbd_conf->archive_steps);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("ArchiveSupend");
+	key_pair->value = xmalloc(16);
+	snprintf(key_pair->value, 16, "%u", slurmdbd_conf->archive_suspend);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("AuthInfo");
+	key_pair->value = xstrdup(slurmdbd_conf->auth_info);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("AuthType");
+	key_pair->value = xstrdup(slurmdbd_conf->auth_type);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("BOOT_TIME");
+	key_pair->value = xmalloc(128);
+	slurm_make_time_str ((time_t *)&boot_time, key_pair->value, 128);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("DbdAddr");
+	key_pair->value = xstrdup(slurmdbd_conf->dbd_addr);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("DbdBackupHost");
+	key_pair->value = xstrdup(slurmdbd_conf->dbd_backup);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("DbdHost");
+	key_pair->value = xstrdup(slurmdbd_conf->dbd_host);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("DbdPort");
+	key_pair->value = xmalloc(32);
+	snprintf(key_pair->value, 32, "%u", slurmdbd_conf->dbd_port);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("DebugLevel");
+	key_pair->value = xmalloc(32);
+	snprintf(key_pair->value, 32, "%u", slurmdbd_conf->debug_level);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("DefaultQOS");
+	key_pair->value = xstrdup(slurmdbd_conf->default_qos);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("LogFile");
+	key_pair->value = xstrdup(slurmdbd_conf->log_file);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("MessageTimeout");
+	key_pair->value = xmalloc(32);
+	snprintf(key_pair->value, 32, "%u secs", slurmdbd_conf->msg_timeout);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PidFile");
+	key_pair->value = xstrdup(slurmdbd_conf->pid_file);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PluginDir");
+	key_pair->value = xstrdup(slurmdbd_conf->plugindir);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PrivateData");
+	key_pair->value = xmalloc(128);
+	private_data_string(slurmdbd_conf->private_data,
+			    key_pair->value, 128);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PurgeEventMonths");
+	if(slurmdbd_conf->purge_event) {
+		key_pair->value = xmalloc(32);
+		snprintf(key_pair->value, 32, "%u months", 
+			 slurmdbd_conf->purge_event);
+	} else
+		key_pair->value = xstrdup("NONE");
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PurgeJobMonths");
+	if(slurmdbd_conf->purge_job) {
+		key_pair->value = xmalloc(32);
+		snprintf(key_pair->value, 32, "%u months", 
+			 slurmdbd_conf->purge_job);
+	} else
+		key_pair->value = xstrdup("NONE");
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PurgeStepMonths");
+	if(slurmdbd_conf->purge_step) {
+		key_pair->value = xmalloc(32);
+		snprintf(key_pair->value, 32, "%u months", 
+			 slurmdbd_conf->purge_step);
+	} else
+		key_pair->value = xstrdup("NONE");
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PurgeSuspendMonths");
+	if(slurmdbd_conf->purge_suspend) {
+		key_pair->value = xmalloc(32);
+		snprintf(key_pair->value, 32, "%u months", 
+			 slurmdbd_conf->purge_suspend);
+	} else
+		key_pair->value = xstrdup("NONE");
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("SLURMDBD_CONF");
+	key_pair->value = _get_conf_path();
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("SLURMDBD_VERSION");
+	key_pair->value = xstrdup(SLURM_VERSION);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("SlurmUser");
+	key_pair->value = xmalloc(128);
+	snprintf(key_pair->value, 128, "%s(%u)",
+		 slurmdbd_conf->slurm_user_name, slurmdbd_conf->slurm_user_id);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("StorageBackupHost");
+	key_pair->value = xstrdup(slurmdbd_conf->storage_backup_host);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("StorageHost");
+	key_pair->value = xstrdup(slurmdbd_conf->storage_host);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("StorageLoc");
+	key_pair->value = xstrdup(slurmdbd_conf->storage_loc);
+	list_append(my_list, key_pair);
+
+	/* StoragePass should NOT be passed due to security reasons */
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("StoragePort");
+	key_pair->value = xmalloc(32);
+	snprintf(key_pair->value, 32, "%u", slurmdbd_conf->storage_port);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("StorageType");
+	key_pair->value = xstrdup(slurmdbd_conf->storage_type);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("StorageUser");
+	key_pair->value = xstrdup(slurmdbd_conf->storage_user);
+	list_append(my_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("TrackWCKey");
+	key_pair->value = xmalloc(32);
+	snprintf(key_pair->value, 32, "%u", slurmdbd_conf->track_wckey);
+	list_append(my_list, key_pair);
+
+	return my_list;
+}
diff --git a/src/slurmdbd/read_config.h b/src/slurmdbd/read_config.h
index 20d4c27de8d2d0b7651fcbfb4ea8353092661e72..9f198eb640f8bcaed4f51a95501ee93ba0ebb9e6 100644
--- a/src/slurmdbd/read_config.h
+++ b/src/slurmdbd/read_config.h
@@ -2,13 +2,14 @@
  *  read_config.h - functions and declarations for reading slurmdbd.conf
  *****************************************************************************
  *  Copyright (C) 2003-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -53,6 +54,7 @@
 #endif  /* HAVE_CONFIG_H */
 
 #include <time.h>
+#include "src/common/list.h"
 
 #define DEFAULT_SLURMDBD_AUTHTYPE	"auth/none"
 //#define DEFAULT_SLURMDBD_JOB_PURGE	12
@@ -63,6 +65,8 @@
 /* SlurmDBD configuration parameters */
 typedef struct slurm_dbd_conf {
 	time_t		last_update;	/* time slurmdbd.conf read	*/
+	uint16_t	archive_events;	/* flag if we are to
+					 * archive events */
 	uint16_t	archive_jobs;	/* flag if we are to
 					 * archive jobs	*/
 	char *		archive_dir;    /* location to localy
@@ -71,23 +75,34 @@ typedef struct slurm_dbd_conf {
 	char *		archive_script;	/* script to archive old data	*/
 	uint16_t	archive_steps;	/* flag if we are to
 					 * archive steps	        */
+	uint16_t	archive_suspend;/* flag if we are to
+					 * archive suspend data         */
 	char *		auth_info;	/* authentication info		*/
 	char *		auth_type;	/* authentication mechanism	*/
+	uint16_t        control_timeout;/* how long to wait before
+					 * backup takes control         */   
 	char *		dbd_addr;	/* network address of Slurm DBD	*/
+	char *		dbd_backup;	/* hostname of Slurm DBD backup */
 	char *		dbd_host;	/* hostname of Slurm DBD	*/
 	uint16_t	dbd_port;	/* port number for RPCs to DBD	*/
 	uint16_t	debug_level;	/* Debug level, default=3	*/
 	char *   	default_qos;	/* default qos setting when
 					 * adding clusters              */
-	uint16_t	job_purge;	/* purge time for job info	*/ 
 	char *		log_file;	/* Log file			*/
 	uint16_t        msg_timeout;    /* message timeout		*/   
 	char *		pid_file;	/* where to store current PID	*/
 	char *		plugindir;	/* dir to look for plugins	*/
 	uint16_t        private_data;   /* restrict information         */
+	uint16_t        purge_event;    /* purge events older than
+					 * this in months */
+	uint16_t	purge_job;	/* purge time for job info	*/ 
+	uint16_t	purge_step;	/* purge time for step info	*/
+	uint16_t        purge_suspend;  /* purge suspend data older than this
+					 * in months */
 	uint32_t	slurm_user_id;	/* uid of slurm_user_name	*/
 	char *		slurm_user_name;/* user that slurmcdtld runs as	*/
-	uint16_t	step_purge;	/* purge time for step info	*/
+	char *		storage_backup_host;/* backup host where DB is
+					     * running */
 	char *		storage_host;	/* host where DB is running	*/
 	char *		storage_loc;	/* database name		*/
 	char *		storage_pass;   /* password for DB write	*/
@@ -124,4 +139,8 @@ extern void log_config(void);
  */
 extern int read_slurmdbd_conf(void);
 
+/* Dump the configuration in name,value pairs for output to 
+ *	"sacctmgr show config", caller must call list_destroy() */
+extern List dump_config(void);
+
 #endif /* !_DBD_READ_CONFIG_H */
diff --git a/src/slurmdbd/rpc_mgr.c b/src/slurmdbd/rpc_mgr.c
index c487f3d00388af0880867507f1b698eb308f0551..18f7a882fc94d31e6893b4c4079d4771781b7f62 100644
--- a/src/slurmdbd/rpc_mgr.c
+++ b/src/slurmdbd/rpc_mgr.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -62,7 +63,6 @@
 
 /* Local functions */
 static bool   _fd_readable(slurm_fd fd);
-static bool   _fd_writeable(slurm_fd fd);
 static void   _free_server_thread(pthread_t my_tid);
 static int    _send_resp(slurm_fd fd, Buf buffer);
 static void * _service_connection(void *arg);
@@ -273,12 +273,12 @@ static int _send_resp(slurm_fd fd, Buf buffer)
 	ssize_t msg_wrote;
 	char *out_buf;
 
-	if ((fd < 0) || (!_fd_writeable(fd)))
+	if ((fd < 0) || (!fd_writeable(fd)))
 		goto io_err;
 
 	msg_size = get_buf_offset(buffer);
 	nw_size = htonl(msg_size);
-	if (!_fd_writeable(fd))
+	if (!fd_writeable(fd))
 		goto io_err;
 	msg_wrote = write(fd, &nw_size, sizeof(nw_size));
 	if (msg_wrote != sizeof(nw_size))
@@ -286,7 +286,7 @@ static int _send_resp(slurm_fd fd, Buf buffer)
 
 	out_buf = get_buf_data(buffer);
 	while (msg_size > 0) {
-		if (!_fd_writeable(fd))
+		if (!fd_writeable(fd))
 			goto io_err;
 		msg_wrote = write(fd, out_buf, msg_size);
 		if (msg_wrote <= 0)
@@ -355,17 +355,18 @@ static bool _fd_readable(slurm_fd fd)
 
 /* Wait until a file is writeable, 
  * RET false if can not be written to within 5 seconds */
-static bool _fd_writeable(slurm_fd fd)
+extern bool fd_writeable(slurm_fd fd)
 {
 	struct pollfd ufds;
 	int msg_timeout = 5000;
 	int rc, time_left;
 	struct timeval tstart;
+	char temp[2];
 
 	ufds.fd     = fd;
 	ufds.events = POLLOUT;
 	gettimeofday(&tstart, NULL);
-	while (1) {
+	while (shutdown_time == 0) {
 		time_left = msg_timeout - _tot_wait(&tstart);
 		rc = poll(&ufds, 1, time_left);
 		if (shutdown_time)
@@ -377,10 +378,18 @@ static bool _fd_writeable(slurm_fd fd)
 			return false;
 		}
 		if (rc == 0) {
-			error("write timeout");
+			debug2("write timeout");
 			return false;
 		}
-		if (ufds.revents & POLLHUP) {
+
+		/*
+		 * Check here to make sure the socket really is there.
+		 * If not then exit out and notify the sender.  This
+ 		 * is here since a write doesn't always tell you the
+		 * socket is gone, but getting 0 back from a
+		 * nonblocking read means just that. 
+		 */
+		if (ufds.revents & POLLHUP || (recv(fd, &temp, 1, 0) == 0)) {
 			debug3("Write connection %d closed", fd);
 			return false;
 		}
diff --git a/src/slurmdbd/rpc_mgr.h b/src/slurmdbd/rpc_mgr.h
index b91dcca0fd4a7b1a85475cc4c994fcfdffc36ac5..3fc62c66f83b47dbc94d91b029b0522f8e84dd53 100644
--- a/src/slurmdbd/rpc_mgr.h
+++ b/src/slurmdbd/rpc_mgr.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -41,6 +42,8 @@
 #include "src/common/pack.h"
 #include "src/common/assoc_mgr.h"
 
+extern bool fd_writeable(slurm_fd fd);
+
 /* Return a buffer containing a DBD_RC (return code) message
  * caller must free returned buffer */
 extern Buf make_dbd_rc_msg(uint16_t rpc_version, 
diff --git a/src/slurmdbd/slurmdbd.c b/src/slurmdbd/slurmdbd.c
index 9ee4fcc3690e15ec38bf718e9e171b5f9d5b9501..ead780afd932f6399b22be92b90694cfa55276d5 100644
--- a/src/slurmdbd/slurmdbd.c
+++ b/src/slurmdbd/slurmdbd.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -59,6 +60,7 @@
 #include "src/common/xstring.h"
 #include "src/slurmdbd/read_config.h"
 #include "src/slurmdbd/rpc_mgr.h"
+#include "src/slurmdbd/backup.h"
 
 /* Global variables */
 time_t shutdown_time = 0;		/* when shutdown request arrived */
@@ -85,7 +87,7 @@ static void  _init_config(void);
 static void  _init_pidfile(void);
 static void  _kill_old_slurmdbd(void);
 static void  _parse_commandline(int argc, char *argv[]);
-static void _rollup_handler_cancel();
+static void  _rollup_handler_cancel();
 static void *_rollup_handler(void *no_data);
 static void *_signal_handler(void *no_data);
 static void  _update_logging(void);
@@ -106,14 +108,6 @@ int main(int argc, char *argv[])
 	_parse_commandline(argc, argv);
 	_update_logging();
 
-	if (gethostname_short(node_name, sizeof(node_name)))
-		fatal("getnodename: %m");
-	if (slurmdbd_conf->dbd_host &&
-	    strcmp(slurmdbd_conf->dbd_host, node_name) &&
-	    strcmp(slurmdbd_conf->dbd_host, "localhost")) {
-		fatal("This host not configured to run SlurmDBD (%s != %s)",
-		      node_name, slurmdbd_conf->dbd_host);
-	}
 	if (slurm_auth_init(NULL) != SLURM_SUCCESS) {
 		fatal("Unable to initialize %s authentication plugin",
 			slurmdbd_conf->auth_type);
@@ -147,53 +141,93 @@ int main(int argc, char *argv[])
 	assoc_init_arg.cache_level = ASSOC_MGR_CACHE_USER;
 	if(slurmdbd_conf->track_wckey)
 		assoc_init_arg.cache_level |= ASSOC_MGR_CACHE_WCKEY;
-
+	
 	if(assoc_mgr_init(db_conn, &assoc_init_arg) == SLURM_ERROR) {
 		error("Problem getting cache of data");
 		acct_storage_g_close_connection(&db_conn);
 		goto end_it;
 	}
 
-	if(!shutdown_time) {
-		/* Create attached thread to process incoming RPCs */
-		slurm_attr_init(&thread_attr);
-		if (pthread_create(&rpc_handler_thread, &thread_attr, 
-				   rpc_mgr, NULL))
-			fatal("pthread_create error %m");
-		slurm_attr_destroy(&thread_attr);
-	}
+	if (gethostname_short(node_name, sizeof(node_name)))
+		fatal("getnodename: %m");
 
-	if(!shutdown_time) {
-		/* Create attached thread to do usage rollup */
-		slurm_attr_init(&thread_attr);
-		if (pthread_create(&rollup_handler_thread, &thread_attr,
-				   _rollup_handler, db_conn))
-			fatal("pthread_create error %m");
-		slurm_attr_destroy(&thread_attr);
-	}
+	while(1) {
+		if (slurmdbd_conf->dbd_backup &&
+		    (!strcmp(node_name, slurmdbd_conf->dbd_backup) ||
+		     !strcmp(slurmdbd_conf->dbd_backup, "localhost"))) {
+			info("slurmdbd running in background mode");
+			have_control = false;
+			backup = true;
+			run_backup();
+			if(!shutdown_time)
+				assoc_mgr_refresh_lists(db_conn, NULL);		
+		} else if (slurmdbd_conf->dbd_host &&
+			   (!strcmp(slurmdbd_conf->dbd_host, node_name) ||
+			    !strcmp(slurmdbd_conf->dbd_host, "localhost"))) {
+			backup = false;
+			have_control = true;
+		} else {
+			fatal("This host not configured to run SlurmDBD "
+			      "(%s != %s | (backup) %s)",
+			      node_name, slurmdbd_conf->dbd_host,
+			      slurmdbd_conf->dbd_backup);
+		}
+		
+		if(!shutdown_time) {
+			/* Create attached thread to process incoming RPCs */
+			slurm_attr_init(&thread_attr);
+			if (pthread_create(&rpc_handler_thread, &thread_attr, 
+					   rpc_mgr, NULL))
+				fatal("pthread_create error %m");
+			slurm_attr_destroy(&thread_attr);
+		}
 
-	/* Daemon is fully operational here */
-	info("slurmdbd version %s started", SLURM_VERSION);
+		if(!shutdown_time) {
+			/* Create attached thread to do usage rollup */
+			slurm_attr_init(&thread_attr);
+			if (pthread_create(&rollup_handler_thread,
+					   &thread_attr,
+					   _rollup_handler, db_conn))
+				fatal("pthread_create error %m");
+			slurm_attr_destroy(&thread_attr);
+		}
 
-	/* Daemon termination handled here */
-	if(rollup_handler_thread)
-		pthread_join(rollup_handler_thread, NULL);
+		/* Daemon is fully operational here */
+		if(!shutdown_time || primary_resumed) {
+			shutdown_time = 0;
+			info("slurmdbd version %s started", SLURM_VERSION);
+			if(backup)
+				run_backup();
+		}
 
-	if(rpc_handler_thread)
-		pthread_join(rpc_handler_thread, NULL);
+		/* this is only ran if not backup */
+		if(rollup_handler_thread)
+			pthread_join(rollup_handler_thread, NULL);
+		if(rpc_handler_thread)
+			pthread_join(rpc_handler_thread, NULL);
 
+		if(backup && primary_resumed) { 
+			shutdown_time = 0;
+			info("Backup has given up control");
+		}
+
+		if(shutdown_time)
+			break;
+	}
+	/* Daemon termination handled here */
+	
 	if(signal_handler_thread)
 		pthread_join(signal_handler_thread, NULL);
-
+	
 end_it:
 	acct_storage_g_close_connection(&db_conn);
-
+	
 	if (slurmdbd_conf->pid_file &&
 	    (unlink(slurmdbd_conf->pid_file) < 0)) {
 		verbose("Unable to remove pidfile '%s': %m",
 			slurmdbd_conf->pid_file);
 	}
-
+	
 	assoc_mgr_fini(NULL);
 	slurm_acct_storage_fini();
 	slurm_auth_fini();
@@ -202,6 +236,13 @@ end_it:
 	exit(0);
 }
 
+extern void shutdown_threads()
+{
+	shutdown_time = time(NULL);
+	rpc_mgr_wake();
+	_rollup_handler_cancel();
+}
+
 /* Reset some of the processes resource limits to the hard limits */
 static void  _init_config(void)
 {
@@ -398,7 +439,7 @@ static void *_rollup_handler(void *db_conn)
 		slurm_mutex_lock(&rollup_lock);
 		running_rollup = 1;
 		debug2("running rollup at %s", ctime(&start_time));
-		acct_storage_g_roll_usage(db_conn, 0);
+		acct_storage_g_roll_usage(db_conn, 0, 0, 1);
 		running_rollup = 0;
 		slurm_mutex_unlock(&rollup_lock);	
 
@@ -457,17 +498,12 @@ static void *_signal_handler(void *no_data)
 		case SIGINT:	/* kill -2  or <CTRL-C> */
 		case SIGTERM:	/* kill -15 */
 			info("Terminate signal (SIGINT or SIGTERM) received");
-			shutdown_time = time(NULL);
-			rpc_mgr_wake();
-			_rollup_handler_cancel();
-
+			shutdown_threads();
 			return NULL;	/* Normal termination */
 		case SIGABRT:	/* abort */
 			info("SIGABRT received");
 			abort();	/* Should terminate here */
-			shutdown_time = time(NULL);
-			rpc_mgr_wake();
-			_rollup_handler_cancel();
+			shutdown_threads();
 			return NULL;
 		default:
 			error("Invalid signal (%d) received", sig);
diff --git a/src/slurmdbd/slurmdbd.h b/src/slurmdbd/slurmdbd.h
index a5fb94502960cb30173c28379182d5e127b739ba..0ad104f1cec6a955e0d4b60dd3828271260615ee 100644
--- a/src/slurmdbd/slurmdbd.h
+++ b/src/slurmdbd/slurmdbd.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -41,4 +42,6 @@
 
 extern time_t shutdown_time;		/* when shutdown request arrived */
 
+extern void shutdown_threads();
+
 #endif /* !_SLURM_DBD_H */
diff --git a/src/smap/Makefile.am b/src/smap/Makefile.am
index 24f328c3d6426e375797e86e513315200221c1b2..cb5115a52981ea84a9296b1c4536ceaa00071643 100644
--- a/src/smap/Makefile.am
+++ b/src/smap/Makefile.am
@@ -20,7 +20,8 @@ smap_LDADD = \
 noinst_HEADERS = smap.h 
 smap_SOURCES = smap.c \
 	job_functions.c partition_functions.c \
-	configure_functions.c grid_functions.c opts.c
+	configure_functions.c grid_functions.c \
+	reservation_functions.c opts.c
 
 force:
 $(smap_LDADD) : force
@@ -32,7 +33,8 @@ else
 
 EXTRA_smap_SOURCES = smap.h smap.c \
 	job_functions.c partition_functions.c \
-	configure_functions.c grid_functions.c opts.c
+	configure_functions.c grid_functions.c \
+	reservation_functions.c opts.c
 
 endif
 
diff --git a/src/smap/Makefile.in b/src/smap/Makefile.in
index 5b0c51a3e07c228b2532a942cb761e108553b4e8..bba9c44ed61c0c648277e7d989f8c0a5a86526a7 100644
--- a/src/smap/Makefile.in
+++ b/src/smap/Makefile.in
@@ -50,14 +50,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -78,15 +82,18 @@ am__installdirs = "$(DESTDIR)$(bindir)"
 binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(bin_PROGRAMS)
 am__smap_SOURCES_DIST = smap.c job_functions.c partition_functions.c \
-	configure_functions.c grid_functions.c opts.c
+	configure_functions.c grid_functions.c reservation_functions.c \
+	opts.c
 @HAVE_SOME_CURSES_TRUE@am_smap_OBJECTS = smap.$(OBJEXT) \
 @HAVE_SOME_CURSES_TRUE@	job_functions.$(OBJEXT) \
 @HAVE_SOME_CURSES_TRUE@	partition_functions.$(OBJEXT) \
 @HAVE_SOME_CURSES_TRUE@	configure_functions.$(OBJEXT) \
-@HAVE_SOME_CURSES_TRUE@	grid_functions.$(OBJEXT) opts.$(OBJEXT)
+@HAVE_SOME_CURSES_TRUE@	grid_functions.$(OBJEXT) \
+@HAVE_SOME_CURSES_TRUE@	reservation_functions.$(OBJEXT) \
+@HAVE_SOME_CURSES_TRUE@	opts.$(OBJEXT)
 am__EXTRA_smap_SOURCES_DIST = smap.h smap.c job_functions.c \
 	partition_functions.c configure_functions.c grid_functions.c \
-	opts.c
+	reservation_functions.c opts.c
 smap_OBJECTS = $(am_smap_OBJECTS)
 @HAVE_SOME_CURSES_TRUE@smap_DEPENDENCIES = $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \
 @HAVE_SOME_CURSES_TRUE@	$(top_builddir)/src/api/libslurm.o
@@ -122,6 +129,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -290,12 +301,14 @@ INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 @HAVE_SOME_CURSES_TRUE@noinst_HEADERS = smap.h 
 @HAVE_SOME_CURSES_TRUE@smap_SOURCES = smap.c \
 @HAVE_SOME_CURSES_TRUE@	job_functions.c partition_functions.c \
-@HAVE_SOME_CURSES_TRUE@	configure_functions.c grid_functions.c opts.c
+@HAVE_SOME_CURSES_TRUE@	configure_functions.c grid_functions.c \
+@HAVE_SOME_CURSES_TRUE@	reservation_functions.c opts.c
 
 @HAVE_SOME_CURSES_TRUE@smap_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 @HAVE_SOME_CURSES_FALSE@EXTRA_smap_SOURCES = smap.h smap.c \
 @HAVE_SOME_CURSES_FALSE@	job_functions.c partition_functions.c \
-@HAVE_SOME_CURSES_FALSE@	configure_functions.c grid_functions.c opts.c
+@HAVE_SOME_CURSES_FALSE@	configure_functions.c grid_functions.c \
+@HAVE_SOME_CURSES_FALSE@	reservation_functions.c opts.c
 
 all: all-am
 
@@ -373,6 +386,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opts.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/partition_functions.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reservation_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/smap.Po@am__quote@
 
 .c.o:
diff --git a/src/smap/configure_functions.c b/src/smap/configure_functions.c
index 7e0807f7a36fe2fa235289bb6fde8d563becef2f..b098c5297f732349beee2099bb63aeed7227d438 100644
--- a/src/smap/configure_functions.c
+++ b/src/smap/configure_functions.c
@@ -6,10 +6,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -1599,6 +1600,6 @@ void get_command(void)
 	main_xcord = 1;
 	main_ycord = 1;
 	print_date();
-	get_job(0);
+	get_job();
 	return;
 }
diff --git a/src/smap/grid_functions.c b/src/smap/grid_functions.c
index 8698330aa85f21820e73709bd0ced670b166075e..12ac8380aecb63bac9394dd54f74d400b86ab95d 100644
--- a/src/smap/grid_functions.c
+++ b/src/smap/grid_functions.c
@@ -6,10 +6,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/smap/job_functions.c b/src/smap/job_functions.c
index 459ed82b1a4e3c9ab70fa755cdfb77cb4e587a2f..3ac60f7dbce721cbcbcce22dec6c838f6284b114 100644
--- a/src/smap/job_functions.c
+++ b/src/smap/job_functions.c
@@ -2,14 +2,15 @@
  *  job_functions.c - Functions related to job display mode of smap.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -48,7 +49,7 @@ static int  _nodes_in_list(char *node_list);
 static void _print_header_job(void);
 static int  _print_text_job(job_info_t * job_ptr);
 
-extern void get_job()
+extern void get_job(void)
 {
 	int error_code = -1, i, recs;
 	static int printed_jobs = 0;
@@ -203,6 +204,11 @@ static void _print_header_job(void)
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "BG_BLOCK");
 		main_xcord += 18;
+#endif
+#ifdef HAVE_CRAY_XT
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "RESV_ID");
+		main_xcord += 18;
 #endif
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "USER");
@@ -260,18 +266,7 @@ static int _print_text_job(job_info_t * job_ptr)
 	uint32_t node_cnt = 0;
 	char *ionodes = NULL, *uname;
 	time_t now_time = time(NULL);
-	char *temp = NULL;
-	/* first set the jname to the job_ptr->name */
-	char *jname = NULL;
-
-	if(job_ptr->name) {
-		jname = xstrdup(job_ptr->name);
-		/* then grep for " since that is the delimiter for
-		   the wckey */
-		if((temp = strchr(jname, '\"')))
-			temp[0] = '\0';
-	}
-
+	
 #ifdef HAVE_BG
 	select_g_get_jobinfo(job_ptr->select_jobinfo, 
 			     SELECT_DATA_IONODES, 
@@ -309,6 +304,15 @@ static int _print_text_job(job_info_t * job_ptr)
 						  sizeof(time_buf), 
 						  SELECT_PRINT_BG_ID));
 		main_xcord += 18;
+#endif
+#ifdef HAVE_CRAY_XT
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%.16s", 
+			  select_g_sprint_jobinfo(job_ptr->select_jobinfo, 
+						  time_buf, 
+						  sizeof(time_buf), 
+						  SELECT_PRINT_RESV_ID));
+		main_xcord += 18;
 #endif
 		uname = uid_to_string((uid_t) job_ptr->user_id);
 		mvwprintw(text_win, main_ycord,
@@ -316,7 +320,7 @@ static int _print_text_job(job_info_t * job_ptr)
 		xfree(uname);
 		main_xcord += 9;
 		mvwprintw(text_win, main_ycord,
-			  main_xcord, "%.9s", jname);
+			  main_xcord, "%.9s", job_ptr->name);
 		main_xcord += 10;
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%.2s",
@@ -383,11 +387,18 @@ static int _print_text_job(job_info_t * job_ptr)
 					       time_buf, 
 					       sizeof(time_buf), 
 					       SELECT_PRINT_BG_ID));
+#endif
+#ifdef HAVE_CRAY_XT
+		printf("%16.16s ", 
+		       select_g_sprint_jobinfo(job_ptr->select_jobinfo, 
+					       time_buf, 
+					       sizeof(time_buf), 
+					       SELECT_PRINT_RESV_ID));
 #endif
 		uname = uid_to_string((uid_t) job_ptr->user_id);
 		printf("%8.8s ", uname);
 		xfree(uname);
-		printf("%6.6s ", jname);
+		printf("%6.6s ", job_ptr->name);
 		printf("%2.2s ",
 		       job_state_string_compact(job_ptr->job_state));
 		if(!strcasecmp(job_ptr->nodes,"waiting...")) {
@@ -410,7 +421,6 @@ static int _print_text_job(job_info_t * job_ptr)
 		printf("\n");
 		
 	}
-	xfree(jname);
 
 	return printed;
 }
diff --git a/src/smap/opts.c b/src/smap/opts.c
index 40a73dbc5b7b0a37f931bfdea81fb2d837fa35dd..6c7b395713cf7c6cfc45fc357be2310ee8965733 100644
--- a/src/smap/opts.c
+++ b/src/smap/opts.c
@@ -1,13 +1,15 @@
 /****************************************************************************\
  *  opts.c - smap command line option processing functions
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -82,6 +84,8 @@ extern void parse_command_line(int argc, char *argv[])
 				tmp = BGPART;
 			else if (!strcmp(optarg, "c"))
 				tmp = COMMANDS;
+			else if (!strcmp(optarg, "r"))
+				tmp = RESERVATIONS;
 
 			params.display = tmp;
 			break;
@@ -154,29 +158,33 @@ static void _print_version(void)
 
 static void _usage(void)
 {
-	printf("\
-Usage: smap [-hVcp] [-D jsbc] [-i seconds]\n");
+#ifdef HAVE_BG
+	printf("Usage: smap [-chVp] [-D bcjrs] [-i seconds]\n");
+#else
+	printf("Usage: smap [-chVp] [-D jrs] [-i seconds]\n");
+#endif
 }
 
 static void _help(void)
 {
 	printf("\
 Usage: smap [OPTIONS]\n\
-  -D, --display              set which Display mode to use\n\
-      j=jobs\n\
-      s=slurm partitions\n\
-      b=Bluegene blocks\n\
-      c=set configuration\n\
+  -c, --commandline          output written with straight to the\n\
+                             commandline.\n\
+  -D, --display              set which display mode to use\n\
+                             b=bluegene blocks\n\
+                             c=set bluegene configuration\n\
+                             j=jobs\n\
+                             r=reservations\n\
+                             s=slurm partitions\n\
   -h, --noheader             no headers on output\n\
   -i, --iterate=seconds      specify an interation period\n\
-  -V, --version              output version information and exit\n\
-  -c, --commandline          output written with straight to the \
-commandline.\n\
-  -p, --parse                used with -c to not format output, but use \
-single tab delimitation.\n\
-  -R, --resolve              resolve an XYZ coord from a Rack/Midplane id \
-or vice versa.\n\
+  -p, --parse                used with -c to not format output, but use\n\
+                             single tab delimitation.\n\
+  -R, --resolve              resolve an XYZ coord from a Rack/Midplane id \n\
+                             or vice versa.\n\
                              (i.e. -R R101 for R/M input -R 101 for XYZ).\n\
+  -V, --version              output version information and exit\n\
 \nHelp options:\n\
   --help                     show this help message\n\
   --usage                    display brief usage message\n");
diff --git a/src/smap/partition_functions.c b/src/smap/partition_functions.c
index 7850adf62d5708f2b32558b13439f9f59ebec709..9477f8003ad0dddf5ad19ddfbd94728ac1c62975 100644
--- a/src/smap/partition_functions.c
+++ b/src/smap/partition_functions.c
@@ -7,10 +7,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  * 
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -303,7 +304,7 @@ extern void get_bg_part()
 			_marknodes(block_ptr, last_count);
 		}
 		
-		if(block_ptr->bg_conn_type == SELECT_SMALL)
+		if(block_ptr->bg_conn_type >= SELECT_SMALL)
 			block_ptr->size = 0;
 
 		list_append(block_list, block_ptr);
diff --git a/src/smap/reservation_functions.c b/src/smap/reservation_functions.c
new file mode 100644
index 0000000000000000000000000000000000000000..7d0d48e047825cf9f6510707ff67e43aa51f7b9a
--- /dev/null
+++ b/src/smap/reservation_functions.c
@@ -0,0 +1,255 @@
+/*****************************************************************************\
+ *  reservation_functions.c - Functions related to reservation display mode 
+ *  of smap.
+ *****************************************************************************
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/common/parse_time.h"
+#include "src/smap/smap.h"
+
+static void _print_header_resv(void);
+static void _print_text_resv(reserve_info_t * resv_ptr);
+
+extern void get_reservation(void)
+{
+	int error_code = -1, active, i, recs;
+	reserve_info_t resv;
+	time_t now = time(NULL);
+	static int printed_resv = 0;
+	static int count = 0;
+	static reserve_info_msg_t *resv_info_ptr = NULL, *new_resv_ptr = NULL;
+
+	if (resv_info_ptr) {
+		error_code = slurm_load_reservations(resv_info_ptr->last_update,
+						     &new_resv_ptr);
+		if (error_code == SLURM_SUCCESS)
+			 slurm_free_reservation_info_msg(resv_info_ptr);
+		else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA) {
+			error_code = SLURM_SUCCESS;
+			new_resv_ptr = resv_info_ptr;
+		}
+	} else
+		error_code = slurm_load_reservations((time_t) NULL,
+						     &new_resv_ptr);
+
+	if (error_code) {
+		if (quiet_flag != 1) {
+			if(!params.commandline) {
+				mvwprintw(text_win,
+					  main_ycord, 1,
+					  "slurm_load_reservations: %s", 
+					  slurm_strerror(slurm_get_errno()));
+				main_ycord++;
+			} else {
+				printf("slurm_load_reservations: %s\n",
+				       slurm_strerror(slurm_get_errno()));
+			}
+		}
+	}
+
+	if (!params.no_header)
+		_print_header_resv();
+
+	if (new_resv_ptr)
+		recs = new_resv_ptr->record_count;
+	else
+		recs = 0;
+
+	if (!params.commandline) {
+		if((text_line_cnt+printed_resv) > count) 
+			text_line_cnt--;
+	}
+	printed_resv = 0;
+	count = 0;
+	for (i = 0; i < recs; i++) {
+		resv = new_resv_ptr->reservation_array[i];
+		if ((resv.start_time <= now) && (resv.end_time >= now))
+			active = 1;
+		else
+			active = 0;
+
+		if (active && (resv.node_inx[0] != -1)) {
+#ifdef HAVE_SUN_CONST
+			set_grid_name(resv.node_list, count);
+#else
+			int j = 0;
+			resv.node_cnt = 0;
+			while (resv.node_inx[j] >= 0) {
+				resv.node_cnt +=
+				    (resv.node_inx[j + 1] + 1) -
+				    resv.node_inx[j];
+				set_grid_inx(resv.node_inx[j],
+					     resv.node_inx[j + 1], count);
+				j += 2;
+			}
+#endif
+		}
+
+		if (resv.node_inx[0] != -1) {
+			if (!params.commandline) {
+				if ((count >= text_line_cnt) &&
+				    (printed_resv  < (text_win->_maxy-3))) {
+					resv.flags = (int)letters[count%62];
+					wattron(text_win,
+						COLOR_PAIR(colors[count%6]));
+					_print_text_resv(&resv);
+					wattroff(text_win,
+						 COLOR_PAIR(colors[count%6]));
+					printed_resv++;
+				} 
+			} else {
+				/* put the letter code into "flags" field */
+				resv.flags = (int)letters[count%62];
+				_print_text_resv(&resv);
+			}
+			count++;			
+		}
+		if (count==128)
+			count=0;
+	}
+
+	if (params.commandline && params.iterate)
+		printf("\n");
+
+	if (!params.commandline)
+		main_ycord++;
+	
+	resv_info_ptr = new_resv_ptr;
+	return;
+}
+
+static void _print_header_resv(void)
+{
+	if (!params.commandline) {
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "ID ");
+		main_xcord += 3;
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%12.12s  ", "NAME");
+		main_xcord += 14;
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%19.19s  ", "START_TIME");
+		main_xcord += 21;
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%19.19s  ", "END_TIME");
+		main_xcord += 21;
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%5.5s  ", "NODES");
+		main_xcord += 7;
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%30.30s  ", 
+			  "ACCESS_CONTROL(Accounts,Users)");
+		main_xcord += 32;
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%s",    "NODELIST");
+		main_xcord = 1;
+		main_ycord++;
+	} else {
+		printf("%12.12s  ", "NAME");
+		printf("%19.19s  ", "START_TIME");
+		printf("%19.19s  ", "END_TIME");
+		printf("%5.5s  ",   "NODES");
+		printf("%30.30s  ", "ACCESS_CONTROL(Accounts,Users)");
+		printf("%s",        "NODELIST\n");
+	}
+}
+
+static void _print_text_resv(reserve_info_t * resv_ptr)
+{
+	char start_str[32], end_str[32], acl[32];
+
+	slurm_make_time_str(&resv_ptr->start_time, start_str, 
+			    sizeof(start_str));
+	slurm_make_time_str(&resv_ptr->end_time, end_str, 
+			    sizeof(end_str));
+
+	if (resv_ptr->accounts && resv_ptr->accounts[0] &&
+	    resv_ptr->users && resv_ptr->users[0])
+		snprintf(acl, sizeof(acl), "A:%s,U:%s", resv_ptr->accounts,
+			 resv_ptr->users);
+	else if (resv_ptr->accounts && resv_ptr->accounts[0])
+		snprintf(acl, sizeof(acl), "A:%s", resv_ptr->accounts);
+	else if (resv_ptr->users && resv_ptr->users[0])
+		snprintf(acl, sizeof(acl), "U:%s", resv_ptr->users);
+	else
+		snprintf(acl, sizeof(acl), "NONE");
+
+
+	if (!params.commandline) {
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%c", resv_ptr->flags);
+		main_xcord += 3;
+
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%12.12s  ", resv_ptr->name);
+		main_xcord += 14;
+
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%19.19s  ", start_str);
+		main_xcord += 21;
+
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%19.19s  ", end_str);
+		main_xcord += 21;
+
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%5.d  ", resv_ptr->node_cnt);
+		main_xcord += 7;
+
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%30.30s  ", acl);
+		main_xcord += 33;
+
+		mvwprintw(text_win, main_ycord,
+			  main_xcord, "%s", resv_ptr->node_list);
+
+		main_xcord = 1;
+		main_ycord++;
+	} else {
+		printf("%12.12s  ", resv_ptr->name);
+		printf("%19.19s  ", start_str);
+		printf("%19.19s  ", end_str);
+		printf("%5.d  ",    resv_ptr->node_cnt);
+		printf("%30.30s  ", acl);
+		printf("%s ",       resv_ptr->node_list);
+
+		printf("\n");
+		
+	}
+}
+
+
diff --git a/src/smap/smap.c b/src/smap/smap.c
index 0516274e8c5f3e7b548127eb7872c3bd3d4778bf..c238509caaa1d39bcbf5ccde81a3ecb8390d9b46 100644
--- a/src/smap/smap.c
+++ b/src/smap/smap.c
@@ -2,14 +2,15 @@
  *  smap.c - Report overall state the system
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -230,6 +231,9 @@ part_fini:
 		case JOBS:
 			get_job();
 			break;
+		case RESERVATIONS:
+			get_reservation();
+			break;
 		case SLURMPART:
 			get_slurm_part();
 			break;
@@ -247,7 +251,8 @@ part_fini:
 			get_bg_part();
 			break;
 #else
-		default:
+		case COMMANDS:
+		case BGPART:
 			error("Must be on a BG SYSTEM to run this command");
 			if(!params.commandline)
 				endwin();
@@ -367,6 +372,12 @@ static int _get_option()
 		params.display = JOBS;
 		return 1;
 		break;
+	case 'r':
+		text_line_cnt = 0;
+		grid_line_cnt = 0;
+		params.display = RESERVATIONS;
+		return 1;
+		break;
 #ifdef HAVE_BG
 	case 'b':
 		text_line_cnt = 0;
@@ -474,6 +485,9 @@ static void *_resize_handler(int sig)
 	case JOBS:
 		get_job();
 		break;
+	case RESERVATIONS:
+		get_reservation();
+		break;
 	case SLURMPART:
 		get_slurm_part();
 		break;
diff --git a/src/smap/smap.h b/src/smap/smap.h
index 7bb97e87da742ae71102cea1f416dfc0826f5aaf..158d12f167b8dcf732024024c92ada2cf5196b7e 100644
--- a/src/smap/smap.h
+++ b/src/smap/smap.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -102,7 +103,7 @@
 #define OPT_LONG_USAGE	0x101
 #define OPT_LONG_HIDE	0x102
 
-enum { JOBS, SLURMPART, BGPART, COMMANDS };
+enum { JOBS, RESERVATIONS, SLURMPART, BGPART, COMMANDS };
 
 //typedef void (*sighandler_t) (int);
 
@@ -152,12 +153,13 @@ extern int set_grid_bg(int *start, int *end, int count, int set);
 extern void print_grid(int dir);
 
 extern void parse_command_line(int argc, char *argv[]);
-extern void print_date();
+extern void print_date(void);
 extern void clear_window(WINDOW *win);
 
-extern void get_slurm_part();
-extern void get_bg_part();
-extern void get_job();
-extern void get_command();
+extern void get_slurm_part(void);
+extern void get_bg_part(void);
+extern void get_job(void);
+extern void get_command(void);
+extern void get_reservation(void);
 
 #endif
diff --git a/src/sprio/Makefile.am b/src/sprio/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..abadc1aa1ed6c9c0f3c322da36579bcc84f1e237
--- /dev/null
+++ b/src/sprio/Makefile.am
@@ -0,0 +1,20 @@
+#
+# Makefile for sprio
+
+AUTOMAKE_OPTIONS = foreign
+
+INCLUDES = -I$(top_srcdir)
+
+bin_PROGRAMS = sprio
+
+sprio_LDADD = 	$(top_builddir)/src/api/libslurm.o -ldl
+
+noinst_HEADERS = sprio.h print.h
+sprio_SOURCES = sprio.c print.c opts.c
+
+force:
+$(sprio_LDADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
+sprio_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
+
diff --git a/src/sprio/Makefile.in b/src/sprio/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..6c1bbbe40bee57ded042bf5f52ef2a3dba71ee84
--- /dev/null
+++ b/src/sprio/Makefile.in
@@ -0,0 +1,570 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+# Makefile for sprio
+
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+bin_PROGRAMS = sprio$(EXEEXT)
+subdir = src/sprio
+DIST_COMMON = $(noinst_HEADERS) $(srcdir)/Makefile.am \
+	$(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)"
+binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
+PROGRAMS = $(bin_PROGRAMS)
+am_sprio_OBJECTS = sprio.$(OBJEXT) print.$(OBJEXT) opts.$(OBJEXT)
+sprio_OBJECTS = $(am_sprio_OBJECTS)
+sprio_DEPENDENCIES = $(top_builddir)/src/api/libslurm.o
+sprio_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sprio_LDFLAGS) \
+	$(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(sprio_SOURCES)
+DIST_SOURCES = $(sprio_SOURCES)
+HEADERS = $(noinst_HEADERS)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+INCLUDES = -I$(top_srcdir)
+sprio_LDADD = $(top_builddir)/src/api/libslurm.o -ldl
+noinst_HEADERS = sprio.h print.h
+sprio_SOURCES = sprio.c print.c opts.c
+sprio_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/sprio/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  src/sprio/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+install-binPROGRAMS: $(bin_PROGRAMS)
+	@$(NORMAL_INSTALL)
+	test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+	@list='$(bin_PROGRAMS)'; for p in $$list; do \
+	  p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \
+	  if test -f $$p \
+	     || test -f $$p1 \
+	  ; then \
+	    f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \
+	   echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \
+	   $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \
+	  else :; fi; \
+	done
+
+uninstall-binPROGRAMS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_PROGRAMS)'; for p in $$list; do \
+	  f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \
+	  echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \
+	  rm -f "$(DESTDIR)$(bindir)/$$f"; \
+	done
+
+clean-binPROGRAMS:
+	@list='$(bin_PROGRAMS)'; for p in $$list; do \
+	  f=`echo $$p|sed 's/$(EXEEXT)$$//'`; \
+	  echo " rm -f $$p $$f"; \
+	  rm -f $$p $$f ; \
+	done
+sprio$(EXEEXT): $(sprio_OBJECTS) $(sprio_DEPENDENCIES) 
+	@rm -f sprio$(EXEEXT)
+	$(sprio_LINK) $(sprio_OBJECTS) $(sprio_LDADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opts.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sprio.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS) $(HEADERS)
+installdirs:
+	for dir in "$(DESTDIR)$(bindir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-binPROGRAMS
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \
+	clean-generic clean-libtool ctags distclean distclean-compile \
+	distclean-generic distclean-libtool distclean-tags distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-binPROGRAMS install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags uninstall uninstall-am \
+	uninstall-binPROGRAMS
+
+
+force:
+$(sprio_LDADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/sprio/opts.c b/src/sprio/opts.c
new file mode 100644
index 0000000000000000000000000000000000000000..f4c0c92765c17b2b76e9de3449e698ea24a78404
--- /dev/null
+++ b/src/sprio/opts.c
@@ -0,0 +1,475 @@
+/****************************************************************************\
+ *  opts.c - sprio command line option parsing
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Don Lipari <lipari1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _GNU_SOURCE
+#  define _GNU_SOURCE
+#endif
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#if HAVE_GETOPT_H
+#  include <getopt.h>
+#else
+#  include "src/common/getopt.h"
+#endif
+
+#include <pwd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "src/common/read_config.h"
+#include "src/common/uid.h"
+#include "src/common/xstring.h"
+#include "src/sprio/sprio.h"
+
+/* getopt_long options, integers but not characters */
+#define OPT_LONG_HELP  0x100
+#define OPT_LONG_USAGE 0x101
+
+/* FUNCTIONS */
+static List  _build_job_list( char* str );
+static List  _build_user_list( char* str );
+static char *_get_prefix(char *token);
+static void  _help( void );
+static void  _parse_token( char *token, char *field, int *field_size, 
+                           bool *right_justify, char **suffix);
+static void  _print_options( void );
+static void  _print_version( void );
+static void  _usage( void );
+
+/*
+ * parse_command_line
+ */
+extern void
+parse_command_line( int argc, char* argv[] )
+{
+	int opt_char;
+	int option_index;
+	static struct option long_options[] = {
+		{"noheader",   no_argument,       0, 'h'},
+		{"jobs",       optional_argument, 0, 'j'},
+		{"long",       no_argument,       0, 'l'},
+		{"norm",       no_argument,       0, 'n'},
+		{"format",     required_argument, 0, 'o'},
+		{"user",       required_argument, 0, 'u'},
+		{"users",      required_argument, 0, 'u'},
+		{"verbose",    no_argument,       0, 'v'},
+		{"version",    no_argument,       0, 'V'},
+		{"weights",    no_argument,       0, 'w'},
+		{"help",       no_argument,       0, OPT_LONG_HELP},
+		{"usage",      no_argument,       0, OPT_LONG_USAGE},
+		{NULL,         0,                 0, 0}
+	};
+
+	while((opt_char = getopt_long(argc, argv, "hj::lno:u:vVw",
+				      long_options, &option_index)) != -1) {
+		switch (opt_char) {
+		case (int)'?':
+			fprintf(stderr, "Try \"sprio --help\" "
+				"for more information\n");
+			exit(1);
+		case (int)'h':
+			params.no_header = true;
+			break;
+		case (int) 'j':
+			if (optarg) {
+				params.jobs = xstrdup(optarg);
+				params.job_list = _build_job_list(params.jobs);
+			}
+			params.job_flag = true;
+			break;
+		case (int) 'l':
+			params.long_list = true;
+			break;
+		case (int) 'n':
+			params.normalized = true;
+			break;
+		case (int) 'o':
+			xfree(params.format);
+			params.format = xstrdup(optarg);
+			break;
+		case (int) 'u':
+			xfree(params.users);
+			params.users = xstrdup(optarg);
+			params.user_list = _build_user_list(params.users);
+			break;
+		case (int) 'v':
+			params.verbose++;
+			break;
+		case (int) 'V':
+			_print_version();
+			exit(0);
+		case (int) 'w':
+			params.weights = true;
+			break;
+		case OPT_LONG_HELP:
+			_help();
+			exit(0);
+		case OPT_LONG_USAGE:
+			_usage();
+			exit(0);
+		}
+	}
+
+	if (optind < argc) {
+		if (params.job_flag) {
+			params.jobs = xstrdup(argv[optind++]);
+			params.job_list = _build_job_list(params.jobs);
+		}
+		if (optind < argc) {
+			error("Unrecognized option: %s",argv[optind]);
+			_usage();
+			exit(1);
+		}
+	}
+
+	if ( params.verbose )
+		_print_options();
+}
+
+/*
+ * parse_format - Take the user's format specification and use it to build
+ *	build the format specifications (internalize it to print.c data
+ *	structures)
+ * IN format - user's format specification
+ * RET zero or error code
+ */
+extern int parse_format( char* format )
+{
+	int field_size;
+	bool right_justify;
+	char *prefix = NULL, *suffix = NULL, *token = NULL;
+	char *tmp_char = NULL, *tmp_format = NULL;
+	char field[1];
+
+	if (format == NULL) {
+		error ("Format option lacks specification.");
+		exit( 1 );
+	}
+
+	params.format_list = list_create( NULL );
+	if ((prefix = _get_prefix(format))) {
+		job_format_add_prefix( params.format_list, 0, 0, prefix);
+	}
+
+	field_size = strlen( format );
+	tmp_format = xmalloc( field_size + 1 );
+	strcpy( tmp_format, format );
+
+	token = strtok_r( tmp_format, "%", &tmp_char);
+	if (token && (format[0] != '%'))	/* toss header */
+		token = strtok_r( NULL, "%", &tmp_char );
+	while (token) {
+		_parse_token( token, field, &field_size, &right_justify,
+			      &suffix);
+		if (field[0] == 'a')
+			job_format_add_age_priority_normalized(params.format_list,
+							       field_size,
+							       right_justify,
+							       suffix );
+		else if (field[0] == 'A')
+			job_format_add_age_priority_weighted(params.format_list,
+							     field_size,
+							     right_justify,
+							     suffix );
+		else if (field[0] == 'f')
+			job_format_add_fs_priority_normalized(params.format_list,
+							      field_size,
+							      right_justify,
+							      suffix );
+		else if (field[0] == 'F')
+			job_format_add_fs_priority_weighted(params.format_list,
+							    field_size,
+							    right_justify,
+							    suffix );
+		else if (field[0] == 'i')
+			job_format_add_job_id(params.format_list,
+					      field_size,
+					      right_justify,
+					      suffix );
+		else if (field[0] == 'j')
+			job_format_add_js_priority_normalized(params.format_list,
+							      field_size,
+							      right_justify,
+							      suffix );
+		else if (field[0] == 'J')
+			job_format_add_js_priority_weighted(params.format_list,
+							    field_size,
+							    right_justify,
+							    suffix );
+		else if (field[0] == 'N')
+			job_format_add_job_nice(params.format_list,
+						field_size,
+						right_justify,
+						suffix );
+		else if (field[0] == 'p')
+			job_format_add_part_priority_normalized(params.format_list,
+								field_size,
+								right_justify,
+								suffix );
+		else if (field[0] == 'P')
+			job_format_add_part_priority_weighted(params.format_list,
+							      field_size,
+							      right_justify,
+							      suffix );
+		else if (field[0] == 'q')
+			job_format_add_qos_priority_normalized(params.format_list,
+							       field_size,
+							       right_justify,
+							       suffix );
+		else if (field[0] == 'Q')
+			job_format_add_qos_priority_weighted(params.format_list,
+							     field_size,
+							     right_justify,
+							     suffix );
+		else if (field[0] == 'u')
+			job_format_add_user_name(params.format_list,
+						 field_size,
+						 right_justify,
+						 suffix );
+		else if (field[0] == 'y')
+			job_format_add_job_priority_normalized(params.format_list,
+							       field_size,
+							       right_justify,
+							       suffix );
+		else if (field[0] == 'Y')
+			job_format_add_job_priority_weighted(params.format_list,
+							     field_size,
+							     right_justify,
+							     suffix );
+		else
+			error( "Invalid job format specification: %c",
+			       field[0] );
+
+		token = strtok_r( NULL, "%", &tmp_char);
+	}
+
+	xfree( tmp_format );
+	return SLURM_SUCCESS;
+}
+
+/* Take a format specification and copy out it's prefix
+ * IN/OUT token - input specification, everything before "%" is removed
+ * RET - everything before "%" in the token
+ */
+static char *
+_get_prefix( char *token )
+{
+	char *pos, *prefix;
+
+	if (token == NULL)
+		return NULL;
+
+	pos = strchr(token, (int) '%');
+	if (pos == NULL)	/* everything is prefix */
+		return xstrdup(token);
+	if (pos == token)	/* no prefix */
+		return NULL;
+
+	pos[0] = '\0';		/* some prefix */
+	prefix = xstrdup(token);
+	pos[0] = '%';
+	memmove(token, pos, (strlen(pos)+1));
+	return prefix;
+}
+
+/* Take a format specification and break it into its components
+ * IN token - input specification without leading "%", eg. ".5u"
+ * OUT field - the letter code for the data type
+ * OUT field_size - byte count
+ * OUT right_justify - true of field to be right justified
+ * OUT suffix - string containing everthing after the field specification
+ */
+static void
+_parse_token( char *token, char *field, int *field_size, bool *right_justify, 
+	      char **suffix)
+{
+	int i = 0;
+
+	assert (token != NULL);
+
+	if (token[i] == '.') {
+		*right_justify = true;
+		i++;
+	} else
+		*right_justify = false;
+
+	*field_size = 0;
+	while ((token[i] >= '0') && (token[i] <= '9'))
+		*field_size = (*field_size * 10) + (token[i++] - '0');
+
+	field[0] = token[i++];
+
+	*suffix = xstrdup(&token[i]);
+}
+
+/* print the parameters specified */
+static void
+_print_options()
+{
+	ListIterator iterator;
+	int i;
+	uint32_t *job_id;
+	uint32_t *user;
+
+	printf( "-----------------------------\n" );
+	printf( "format     = %s\n", params.format );
+	printf( "job_flag   = %d\n", params.job_flag );
+	printf( "jobs       = %s\n", params.jobs );
+	printf( "users      = %s\n", params.users );
+	printf( "verbose    = %d\n", params.verbose );
+
+	if ((params.verbose > 1) && params.job_list) {
+		i = 0;
+		iterator = list_iterator_create( params.job_list );
+		while ( (job_id = list_next( iterator )) ) {
+			printf( "job_list[%d] = %u\n", i++, *job_id);
+		}
+		list_iterator_destroy( iterator );
+	}
+
+	if ((params.verbose > 1) && params.user_list) {
+		i = 0;
+		iterator = list_iterator_create( params.user_list );
+		while ( (user = list_next( iterator )) ) {
+			printf( "user_list[%d] = %u\n", i++, *user);
+		}
+		list_iterator_destroy( iterator );
+	}
+
+	printf( "-----------------------------\n\n\n" );
+} ;
+
+
+/*
+ * _build_job_list- build a list of job_ids
+ * IN str - comma separated list of job_ids
+ * RET List of job_ids (uint32_t)
+ */
+static List
+_build_job_list( char* str )
+{
+	List my_list;
+	char *job = NULL, *tmp_char = NULL, *my_job_list = NULL;
+	int i;
+	uint32_t *job_id = NULL;
+
+	if ( str == NULL)
+		return NULL;
+	my_list = list_create( NULL );
+	my_job_list = xstrdup( str );
+	job = strtok_r( my_job_list, ",", &tmp_char );
+	while (job)
+	{
+		i = strtol( job, (char **) NULL, 10 );
+		if (i <= 0) {
+			error( "Invalid job id: %s", job );
+			exit( 1 );
+		}
+		job_id = xmalloc( sizeof( uint32_t ) );
+		*job_id = (uint32_t) i;
+		list_append( my_list, job_id );
+		job = strtok_r (NULL, ",", &tmp_char);
+	}
+	return my_list;
+}
+
+/*
+ * _build_user_list- build a list of UIDs
+ * IN str - comma separated list of user names
+ * RET List of UIDs (uint32_t)
+ */
+static List
+_build_user_list( char* str )
+{
+	List my_list;
+	char *user = NULL;
+	char *tmp_char = NULL, *my_user_list = NULL;
+	uint32_t *uid = NULL;
+
+	if ( str == NULL)
+		return NULL;
+	my_list = list_create( NULL );
+	my_user_list = xstrdup( str );
+	user = strtok_r( my_user_list, ",", &tmp_char );
+	while (user) {
+		uid = xmalloc( sizeof( uint32_t ));
+		*uid = uid_from_string(user);
+		if (*uid == -1) {
+			error( "Invalid user: %s\n", user);
+			xfree(uid);
+		} else {
+			list_append( my_list, uid );
+		}
+		user = strtok_r (NULL, ",", &tmp_char);
+	}
+	return my_list;
+}
+
+static void _print_version(void)
+{
+	printf("%s %s\n", PACKAGE, SLURM_VERSION);
+}
+
+static void _usage(void)
+{
+	printf("Usage: sprio [-j jid[s]] [-u user_name[s]] [-o format] [--usage] [-hlnvVw]\n");
+}
+
+static void _help(void)
+{
+	printf("\
+Usage: sprio [OPTIONS]\n\
+  -h, --noheader                  no headers on output\n\
+  -j, --jobs                      comma separated list of jobs\n\
+                                  to view, default is all\n\
+  -l, --long                      long report\n\
+  -n, --norm                      display normalized values\n\
+  -o, --format=format             format specification\n\
+  -u, --user=user_name            comma separated list of users to view\n\
+  -v, --verbose                   verbosity level\n\
+  -V, --version                   output version information and exit\n\
+  -w, --weights                   show the weights for each priority factor\n\
+\nHelp options:\n\
+  --help                          show this help message\n\
+  --usage                         display a brief summary of sprio options\n");
+}
diff --git a/src/sprio/print.c b/src/sprio/print.c
new file mode 100644
index 0000000000000000000000000000000000000000..c46789ed4755a1c211f04a6b0e1977f0e394c93c
--- /dev/null
+++ b/src/sprio/print.c
@@ -0,0 +1,438 @@
+/*****************************************************************************\
+ *  print.c - sprio print job functions
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Don Lipari <lipari1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *    
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif /* HAVE_CONFIG_H */
+
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <sys/types.h>
+
+#include "src/common/list.h"
+#include "src/common/macros.h"
+#include "src/slurmctld/slurmctld.h"
+#include "src/sprio/print.h"
+#include "src/sprio/sprio.h"
+#include "src/common/uid.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
+static int	_print_str(char *str, int width, bool right, bool cut_output);
+
+/********************
+ * Global Variables *
+ ********************/
+extern uint32_t max_age; /* time when not to add any more */
+extern uint32_t weight_age; /* weight for age factor */
+extern uint32_t weight_fs; /* weight for Fairshare factor */
+extern uint32_t weight_js; /* weight for Job Size factor */
+extern uint32_t weight_part; /* weight for Partition factor */
+extern uint32_t weight_qos; /* weight for QOS factor */
+
+
+/*****************************************************************************
+ * Global Print Functions
+ *****************************************************************************/
+
+int print_jobs_array(List jobs, List format)
+{
+	if (!params.no_header)
+		print_job_from_format(NULL, format);
+
+	if (params.weights) {
+		print_job_from_format((priority_factors_object_t *) -1, format);
+		return SLURM_SUCCESS;
+	}
+
+	/* Print the jobs of interest */
+	if (jobs)
+		list_for_each (jobs, (ListForF) print_job_from_format,
+			       (void *) format);
+
+	return SLURM_SUCCESS;
+}
+
+static int _print_str(char *str, int width, bool right, bool cut_output)
+{
+	char format[64];
+	int printed = 0;
+
+	if (right == true && width != 0)
+		snprintf(format, 64, "%%%ds", width);
+	else if (width != 0)
+		snprintf(format, 64, "%%.%ds", width);
+	else {
+		format[0] = '%';
+		format[1] = 's';
+		format[2] = '\0';
+	}
+
+	if ((width == 0) || (cut_output == false)) {
+		if ((printed = printf(format, str)) < 0)
+			return printed;
+	} else {
+		char temp[width + 1];
+		snprintf(temp, width + 1, format, str);
+		if ((printed = printf("%s",temp)) < 0)
+			return printed;
+	}
+
+	while (printed++ < width)
+		printf(" ");
+
+	return printed;
+}
+
+int _print_int(int number, int width, bool right, bool cut_output)
+{
+	char buf[32];
+
+	snprintf(buf, 32, "%d", number);
+	return _print_str(buf, width, right, cut_output);
+}
+
+int _print_norm(double number, int width, bool right, bool cut_output)
+{
+	char buf[32];
+
+	snprintf(buf, 32, "%.7lf", number);
+	return _print_str(buf, width, right, cut_output);
+}
+
+
+/*****************************************************************************
+ * Job Print Functions
+ *****************************************************************************/
+int print_job_from_format(priority_factors_object_t * job, List list)
+{
+	ListIterator i = list_iterator_create(list);
+	job_format_t *current;
+	int total_width = 0;
+
+	while ((current = (job_format_t *) list_next(i)) != NULL) {
+		if (current->
+		    function(job, current->width, current->right_justify,
+			     current->suffix)
+		    != SLURM_SUCCESS)
+			return SLURM_ERROR;
+		if (current->width)
+			total_width += (current->width + 1);
+		else
+			total_width += 10;
+	}
+	list_iterator_destroy(i);
+
+	printf("\n");
+
+	return SLURM_SUCCESS;
+}
+
+int job_format_add_function(List list, int width, bool right, char *suffix,
+			    int (*function) (priority_factors_object_t *,
+			    int, bool, char*))
+{
+	job_format_t *tmp = (job_format_t *) xmalloc(sizeof(job_format_t));
+	tmp->function = function;
+	tmp->width = width;
+	tmp->right_justify = right;
+	tmp->suffix = suffix;
+
+	if (list_append(list, tmp) == NULL) {
+		fprintf(stderr, "Memory exhausted\n");
+		exit(1);
+	}
+	return SLURM_SUCCESS;
+}
+
+
+int _print_job_job_id(priority_factors_object_t * job, int width,
+		      bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("JOBID", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_str("Weights", width, right, true);
+	else {
+		char id[FORMAT_STRING_SIZE];
+		snprintf(id, FORMAT_STRING_SIZE, "%u", job->job_id);
+		_print_str(id, width, right, true);
+	}
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_job_prefix(priority_factors_object_t * job, int width,
+		      bool right, char* suffix)
+{
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_age_priority_normalized(priority_factors_object_t * job, int width,
+				   bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("AGE", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_age, width, right, true);
+	else
+		_print_norm(job->priority_age, width, right, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_age_priority_weighted(priority_factors_object_t * job, int width,
+				 bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("AGE", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_age, width, right, true);
+	else
+		_print_int(job->priority_age * weight_age, width, right, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_fs_priority_normalized(priority_factors_object_t * job, int width,
+				  bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("FAIRSHARE", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_fs, width, right, true);
+	else
+		_print_norm(job->priority_fs, width, right, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_fs_priority_weighted(priority_factors_object_t * job, int width,
+				bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("FAIRSHARE", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_fs, width, right, true);
+	else
+		_print_int(job->priority_fs * weight_fs, width, right, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_job_priority_normalized(priority_factors_object_t * job, int width,
+				   bool right, char* suffix)
+{
+	char temp[FORMAT_STRING_SIZE];
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("PRIORITY", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_str("", width, right, true);
+	else {
+		double age_priority = job->priority_age * (double)weight_age;
+		double fs_priority = job->priority_fs * (double)weight_fs;
+		double js_priority = job->priority_js * (double)weight_js;
+		double part_priority = job->priority_part * (double)weight_part;
+		double qos_priority = job->priority_qos * (double)weight_qos;
+		double priority = age_priority + fs_priority + js_priority +
+				  part_priority + qos_priority;
+		priority -= (double)(job->nice - NICE_OFFSET);
+		double prio = priority / (double) ((uint32_t) 0xffffffff);
+
+		sprintf(temp, "%16.14f", prio);
+		_print_str(temp, width, right, true);
+	}
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_job_priority_weighted(priority_factors_object_t * job, int width,
+				 bool right, char* suffix)
+{
+	char temp[FORMAT_STRING_SIZE];
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("PRIORITY", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_str("", width, right, true);
+	else {
+		double age_priority = job->priority_age * (double)weight_age;
+		double fs_priority = job->priority_fs * (double)weight_fs;
+		double js_priority = job->priority_js * (double)weight_js;
+		double part_priority = job->priority_part * (double)weight_part;
+		double qos_priority = job->priority_qos * (double)weight_qos;
+		uint32_t priority = (uint32_t) (age_priority + fs_priority +
+						js_priority + part_priority +
+						qos_priority);
+		priority -= (uint32_t)(job->nice - NICE_OFFSET);
+
+		sprintf(temp, "%u", priority);
+		_print_str(temp, width, right, true);
+	}
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_js_priority_normalized(priority_factors_object_t * job, int width,
+				  bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("JOBSIZE", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_js, width, right, true);
+	else {
+		_print_norm(job->priority_js, width, right, true);
+	}
+	if (suffix)
+		printf("%s", suffix);
+
+	return SLURM_SUCCESS;
+}
+
+int _print_js_priority_weighted(priority_factors_object_t * job, int width,
+				bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("JOBSIZE", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_js, width, right, true);
+	else {
+		_print_int(job->priority_js * weight_js, width, right, true);
+	}
+	if (suffix)
+		printf("%s", suffix);
+
+	return SLURM_SUCCESS;
+}
+
+int _print_part_priority_normalized(priority_factors_object_t * job, int width,
+				    bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("PARTITION", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_part, width, right, true);
+	else
+		_print_norm(job->priority_part, width, right, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_part_priority_weighted(priority_factors_object_t * job, int width,
+				  bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("PARTITION", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_part, width, right, true);
+	else
+		_print_int(job->priority_part * weight_part, width, right, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_qos_priority_normalized(priority_factors_object_t * job, int width,
+				   bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("QOS", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_qos, width, right, true);
+	else
+		_print_norm(job->priority_qos, width, right, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_qos_priority_weighted(priority_factors_object_t * job, int width,
+				 bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("QOS", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_int(weight_qos, width, right, true);
+	else
+		_print_int(job->priority_qos * weight_qos, width, right, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_job_nice(priority_factors_object_t * job, int width,
+				bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("NICE", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_str("", width, right, true);
+	else
+		_print_int(job->nice - NICE_OFFSET, width, right, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_job_user_name(priority_factors_object_t * job, int width,
+			 bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("USER", width, right, true);
+	else if (job == (priority_factors_object_t *) -1)
+		_print_str("", width, right, true);
+	else {
+		char *uname = uid_to_string((uid_t) job->user_id);
+		_print_str(uname, width, right, true);
+		xfree(uname);
+	}
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
diff --git a/src/sprio/print.h b/src/sprio/print.h
new file mode 100644
index 0000000000000000000000000000000000000000..fa33426885868976f4c03ac0972c33a54836a74c
--- /dev/null
+++ b/src/sprio/print.h
@@ -0,0 +1,138 @@
+/*****************************************************************************\
+ *  print.h - sprio print job definitions
+ *****************************************************************************
+ *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Joey Ekstrom <ekstrom1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _SPRIO_PRINT_H_
+#define _SPRIO_PRINT_H_
+
+#include <slurm/slurm.h>
+
+#include "src/common/list.h"
+
+#define FORMAT_STRING_SIZE 32
+
+/*****************************************************************************
+ * Format Structures
+ *****************************************************************************/
+typedef struct job_format {
+	int (*function) (priority_factors_object_t *, int, bool, char*);
+	uint32_t width;
+	bool right_justify;
+	char *suffix;
+} job_format_t;
+
+int print_jobs_array(List factors, List format);
+int print_job_from_format(priority_factors_object_t * job, List list);
+
+/*****************************************************************************
+ * Job Line Format Options
+ *****************************************************************************/
+int job_format_add_function(List list, int width, bool right_justify,
+			    char *suffix,
+			    int (*function) (priority_factors_object_t *,
+			    int, bool, char*));
+
+#define job_format_add_job_id(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_job_id)
+#define job_format_add_prefix(list,wid,right,suffix) \
+	job_format_add_function(list,0,0,suffix,_print_job_prefix)
+#define job_format_add_age_priority_normalized(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_age_priority_normalized)
+#define job_format_add_age_priority_weighted(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_age_priority_weighted)
+#define job_format_add_fs_priority_normalized(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_fs_priority_normalized)
+#define job_format_add_fs_priority_weighted(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_fs_priority_weighted)
+#define job_format_add_job_priority_normalized(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_priority_normalized)
+#define job_format_add_job_priority_weighted(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_priority_weighted)
+#define job_format_add_js_priority_normalized(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_js_priority_normalized)
+#define job_format_add_js_priority_weighted(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_js_priority_weighted)
+#define job_format_add_part_priority_normalized(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_part_priority_normalized)
+#define job_format_add_part_priority_weighted(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_part_priority_weighted)
+#define job_format_add_qos_priority_normalized(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_qos_priority_normalized)
+#define job_format_add_qos_priority_weighted(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_qos_priority_weighted)
+#define job_format_add_job_nice(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_nice)
+#define job_format_add_user_name(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_user_name)
+
+/*****************************************************************************
+ * Job Line Print Functions
+ *****************************************************************************/
+int _print_job_job_id(priority_factors_object_t * job, int width,
+		      bool right_justify, char* suffix);
+int _print_job_prefix(priority_factors_object_t * job, int width,
+		      bool right_justify, char* suffix);
+int _print_age_priority_normalized(priority_factors_object_t * job, int width,
+				   bool right_justify, char* suffix);
+int _print_age_priority_weighted(priority_factors_object_t * job, int width,
+				 bool right_justify, char* suffix);
+int _print_fs_priority_normalized(priority_factors_object_t * job, int width,
+				  bool right_justify, char* suffix);
+int _print_fs_priority_weighted(priority_factors_object_t * job, int width,
+				bool right_justify, char* suffix);
+int _print_job_priority_normalized(priority_factors_object_t * job, int width,
+				   bool right_justify, char* suffix);
+int _print_job_priority_weighted(priority_factors_object_t * job, int width,
+				 bool right_justify, char* suffix);
+int _print_js_priority_normalized(priority_factors_object_t * job, int width,
+				  bool right_justify, char* suffix);
+int _print_js_priority_weighted(priority_factors_object_t * job, int width,
+				bool right_justify, char* suffix);
+int _print_part_priority_normalized(priority_factors_object_t * job, int width,
+				    bool right_justify,	char* suffix);
+int _print_part_priority_weighted(priority_factors_object_t * job, int width,
+				  bool right_justify, char* suffix);
+int _print_qos_priority_normalized(priority_factors_object_t * job, int width,
+				   bool right_justify, char* suffix);
+int _print_qos_priority_weighted(priority_factors_object_t * job, int width,
+				 bool right_justify, char* suffix);
+int _print_job_nice(priority_factors_object_t * job, int width,
+		    bool right_justify, char* suffix);
+int _print_job_user_name(priority_factors_object_t * job, int width,
+			 bool right_justify, char* suffix);
+
+#endif
diff --git a/src/sprio/sprio.c b/src/sprio/sprio.c
new file mode 100644
index 0000000000000000000000000000000000000000..f2e7f75498ad4cf6857349905a82a69f54f03d40
--- /dev/null
+++ b/src/sprio/sprio.c
@@ -0,0 +1,227 @@
+/*****************************************************************************\
+ *  sprio.c - Display the priority components of jobs in the slurm system
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Don Lipari <lipari1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif /* HAVE_CONFIG_H */
+
+#if HAVE_STDINT_H
+#  include <stdint.h>
+#endif
+
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>
+#endif
+
+#ifdef HAVE_TERMCAP_H
+#  include <termcap.h>
+#endif
+
+#include <sys/ioctl.h>
+#include <termios.h>
+
+#include "src/common/slurm_priority.h"
+#include "src/common/xstring.h"
+#include "src/sprio/sprio.h"
+
+
+/********************
+ * Global Variables *
+ ********************/
+struct sprio_parameters params;
+uint32_t weight_age; /* weight for age factor */
+uint32_t weight_fs; /* weight for Fairshare factor */
+uint32_t weight_js; /* weight for Job Size factor */
+uint32_t weight_part; /* weight for Partition factor */
+uint32_t weight_qos; /* weight for QOS factor */
+
+static int _get_info(priority_factors_request_msg_t *factors_req,
+		     priority_factors_response_msg_t **factors_resp);
+
+int main (int argc, char *argv[])
+{
+	char *temp = NULL;
+	int error_code = SLURM_SUCCESS;
+	priority_factors_request_msg_t req_msg;
+	priority_factors_response_msg_t *resp_msg = NULL;
+	log_options_t opts = LOG_OPTS_STDERR_ONLY ;
+
+	/* Check to see if we are running a supported accounting plugin */
+	temp = slurm_get_priority_type();
+	if(strcasecmp(temp, "priority/multifactor")) {
+		fprintf (stderr, "You are not running a supported "
+			 "priority plugin\n(%s).\n"
+			 "Only 'priority/multifactor' is supported.\n",
+			temp);
+		xfree(temp);
+		exit(1);
+	}
+	xfree(temp);
+
+	log_init(xbasename(argv[0]), opts, SYSLOG_FACILITY_USER, NULL);
+
+	weight_age  = slurm_get_priority_weight_age();
+	weight_fs   = slurm_get_priority_weight_fairshare();
+	weight_js   = slurm_get_priority_weight_job_size();
+	weight_part = slurm_get_priority_weight_partition();
+	weight_qos  = slurm_get_priority_weight_qos();
+
+	parse_command_line( argc, argv );
+	if (params.verbose) {
+		opts.stderr_level += params.verbose;
+		log_alter(opts, SYSLOG_FACILITY_USER, NULL);
+	}
+
+	memset(&req_msg, 0, sizeof(priority_factors_request_msg_t));
+
+	if (params.jobs)
+		req_msg.job_id_list = params.job_list;
+	else
+		req_msg.job_id_list = NULL;
+
+	if (params.users)
+		req_msg.uid_list = params.user_list;
+	else
+		req_msg.uid_list = NULL;
+
+	error_code = _get_info(&req_msg, &resp_msg);
+
+	if (error_code) {
+		slurm_perror("Couldn't get priority factors from controller");
+		exit(error_code);
+	}
+
+	if (params.format == NULL) {
+		if (params.normalized) {
+			if (params.long_list)
+				params.format = "%.7i %.8u %10y %10a %10f %10j "
+					"%10p %10q";
+			else{
+				params.format = xstrdup("%.7i");
+				if (params.users)
+					xstrcat(params.format, " %.8u");
+				xstrcat(params.format, " %10y");
+				if (weight_age)
+					xstrcat(params.format, " %10a");
+				if (weight_fs)
+					xstrcat(params.format, " %10f");
+				if (weight_js)
+					xstrcat(params.format, " %10j");
+				if (weight_part)
+					xstrcat(params.format, " %10p");
+				if (weight_qos)
+					xstrcat(params.format, " %10q");
+			}
+		} else {
+			if (params.long_list)
+				params.format = "%.7i %.8u %.10Y %.10A %.10F "
+					"%.10J %.10P %.10Q %.6N";
+			else{
+				params.format = xstrdup("%.7i");
+				if (params.users)
+					xstrcat(params.format, " %.8u");
+				xstrcat(params.format, " %.10Y");
+				if (weight_age)
+					xstrcat(params.format, " %.10A");
+				if (weight_fs)
+					xstrcat(params.format, " %.10F");
+				if (weight_js)
+					xstrcat(params.format, " %.10J");
+				if (weight_part)
+					xstrcat(params.format, " %.10P");
+				if (weight_qos)
+					xstrcat(params.format, " %.10Q");
+			}
+		}
+	}
+
+	/* create the format list from the format */
+	parse_format(params.format);
+
+	if (params.jobs && (!resp_msg->priority_factors_list ||
+			    !list_count(resp_msg->priority_factors_list)))
+		printf("Unable to find jobs matching user/id(s) specified\n");
+	else
+		print_jobs_array(resp_msg->priority_factors_list,
+				 params.format_list);
+
+#if 0
+	/* Free storage here if we want to verify that logic.
+	 * Since we exit next, this is not important */
+ 	list_destroy(params.format_list);
+	slurm_free_priority_factors_response_msg(resp_msg);
+#endif
+
+	exit (error_code);
+}
+
+static int _get_info(priority_factors_request_msg_t *factors_req,
+		     priority_factors_response_msg_t **factors_resp)
+{
+	int rc;
+        slurm_msg_t req_msg;
+        slurm_msg_t resp_msg;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+
+        req_msg.msg_type = REQUEST_PRIORITY_FACTORS;
+        req_msg.data     = factors_req;
+
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+
+	switch (resp_msg.msg_type) {
+	case RESPONSE_PRIORITY_FACTORS:
+		*factors_resp =
+			(priority_factors_response_msg_t *) resp_msg.data;
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);
+		if (rc)
+			slurm_seterrno_ret(rc);
+		*factors_resp = NULL;
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+
+	return SLURM_PROTOCOL_SUCCESS;
+}
diff --git a/src/sprio/sprio.h b/src/sprio/sprio.h
new file mode 100644
index 0000000000000000000000000000000000000000..d98503f60183e34e6181944f3bd639443c5f1214
--- /dev/null
+++ b/src/sprio/sprio.h
@@ -0,0 +1,94 @@
+/****************************************************************************\
+ *  sprio.h - definitions used for printing job queue state
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Don Lipari <lipari1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under 
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __SPRIO_H__
+#define __SPRIO_H__
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <ctype.h>
+#include <stdio.h>
+
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>
+#else  /* !HAVE_INTTYPES_H */
+#  if HAVE_STDINT_H
+#    include <stdint.h>
+#  endif
+#endif  /* HAVE_INTTYPES_H */
+
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#include <slurm/slurm.h>
+
+#include "src/common/hostlist.h"
+#include "src/common/list.h"
+#include "src/common/log.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xmalloc.h"
+#include "src/sprio/print.h"
+
+struct sprio_parameters {
+	bool job_flag;
+	bool long_list;
+	bool no_header;
+	bool normalized;
+	bool weights;
+
+	int  verbose;
+
+	char* format;
+	char* jobs;
+	char* users;
+
+	List  format_list;
+	List  job_list;
+	List  user_list;
+};
+
+extern struct sprio_parameters params;
+
+extern void parse_command_line( int argc, char* argv[] );
+extern int  parse_format( char* format );
+
+#endif
diff --git a/src/squeue/Makefile.in b/src/squeue/Makefile.in
index e085dcc4be34f55efb80d415ac66f216fbebac00..0e969cf23d5ca9fd7af8aae20dce7f4e3e39b8f2 100644
--- a/src/squeue/Makefile.in
+++ b/src/squeue/Makefile.in
@@ -47,14 +47,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/squeue/opts.c b/src/squeue/opts.c
index 1d38df6c74dfe41cb3718ec5ba702686acb619de..e8552e8e706ed0d039fb98398a28ab4aef040e8e 100644
--- a/src/squeue/opts.c
+++ b/src/squeue/opts.c
@@ -1,15 +1,16 @@
 /****************************************************************************\
  *  opts.c - srun command line option parsing
  *
- *  $Id: opts.c 16350 2009-01-29 18:16:08Z jette $
+ *  $Id: opts.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -635,6 +636,11 @@ extern int parse_format( char* format )
 				                          field_size, 
 				                          right_justify, 
 				                          suffix );
+			else if (field[0] == 'v')
+				job_format_add_reservation( params.format_list, 
+				                        field_size, 
+				                        right_justify, 
+				                        suffix );
 			else if (field[0] == 'w')
 				job_format_add_wckey( params.format_list, 
 						      field_size, 
diff --git a/src/squeue/print.c b/src/squeue/print.c
index 2d86c61e90802021be0d7f86115b7f6e08360805..2a222df3fc786f3fee5e8731ca226a19a8791f82 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -6,10 +6,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, 
  *             Morris Jette <jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *    
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -37,11 +38,11 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include <time.h>
+#include <grp.h>
+#include <pwd.h>
 #include <stdio.h>
 #include <string.h>
-#include <pwd.h>
-#include <grp.h>
+#include <time.h>
 #include <sys/types.h>
 
 #include "src/common/hostlist.h"
@@ -49,11 +50,11 @@
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
 #include "src/common/parse_time.h"
+#include "src/squeue/print.h"
+#include "src/squeue/squeue.h"
 #include "src/common/uid.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/squeue/print.h"
-#include "src/squeue/squeue.h"
 
 static int	_adjust_completing (job_info_t *j, node_info_msg_t **ni);
 static int	_filter_job(job_info_t * job);
@@ -333,9 +334,12 @@ int _print_job_reason(job_info_t * job, int width, bool right, char* suffix)
 	if (job == NULL)        /* Print the Header instead */
 		_print_str("REASON", width, right, true);
 	else {
-		char id[FORMAT_STRING_SIZE];
-		snprintf(id, FORMAT_STRING_SIZE, "%s", 
-			job_reason_string(job->state_reason));
+		char id[FORMAT_STRING_SIZE], *reason;
+		if (job->state_desc)
+			reason = job->state_desc;
+		else
+			reason = job_reason_string(job->state_reason);
+		snprintf(id, FORMAT_STRING_SIZE, "%s", reason);
 		_print_str(id, width, right, true);
 	}
 	if (suffix)
@@ -347,20 +351,9 @@ int _print_job_name(job_info_t * job, int width, bool right, char* suffix)
 {
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("NAME", width, right, true);
-	else {
-		char *temp = NULL, *jname = NULL;
-		if (job->name) {
-			/* first set the jname to the job_ptr->name */
-			jname = xstrdup(job->name);
-			/* then grep for " since that is the delimiter
-			 * for the wckey and set to NULL */
-			if((temp = strchr(jname, '\"')))
-				temp[0] = '\0';
-		}
+	else 
+		_print_str(job->name, width, right, true);
 		
-		_print_str(jname, width, right, true);
-		xfree(jname);
-	}
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -370,16 +363,9 @@ int _print_job_wckey(job_info_t * job, int width, bool right, char* suffix)
 {
 	if (job == NULL)	/* Print the Header instead */
 		_print_str("WCKEY", width, right, true);
-	else {
-		char *temp = NULL;
-		/* grep for " since that is the delimiter for
-		   the wckey */
-		temp = strchr(job->name, '\"');
-		if(temp) 
-			temp++;
-				
-		_print_str(temp, width, right, true);
-	}
+	else				
+		_print_str(job->wckey, width, right, true);
+	
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -602,9 +588,12 @@ int _print_job_reason_list(job_info_t * job, int width, bool right,
 	} else if ((job->job_state == JOB_PENDING)
 	||         (job->job_state == JOB_TIMEOUT)
 	||         (job->job_state == JOB_FAILED)) {
-		char id[FORMAT_STRING_SIZE];
-		snprintf(id, FORMAT_STRING_SIZE, "(%s)", 
-			job_reason_string(job->state_reason));
+		char id[FORMAT_STRING_SIZE], *reason;
+		if (job->state_desc)
+			reason = job->state_desc;
+		else
+			reason = job_reason_string(job->state_reason);
+		snprintf(id, FORMAT_STRING_SIZE, "(%s)", reason);
 		_print_str(id, width, right, true);
 	} else {
 #ifdef HAVE_BG
@@ -1081,6 +1070,18 @@ int _print_job_select_jobinfo(job_info_t * job, int width, bool right_justify,
 	return SLURM_SUCCESS;
 }
 
+int _print_job_reservation(job_info_t * job, int width, bool right_justify,
+			char* suffix)
+{
+	if (job == NULL)	 /* Print the Header instead */
+		_print_str("RESERVATION", width, right_justify, true);
+	else
+		_print_str(job->resv_name, width, right_justify, true);
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
 /*****************************************************************************
  * Job Step Print Functions
  *****************************************************************************/
diff --git a/src/squeue/print.h b/src/squeue/print.h
index d183b0085d8659315c912965208a4fb05a2793e0..932648872e0f8afb1171356d258437b6656864df 100644
--- a/src/squeue/print.h
+++ b/src/squeue/print.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -169,6 +170,8 @@ int job_format_add_function(List list, int width, bool right_justify,
 	job_format_add_function(list,wid,right,suffix,_print_job_select_jobinfo)
 #define job_format_add_comment(list,wid,right,suffix) \
 	job_format_add_function(list,wid,right,suffix,_print_job_comment)
+#define job_format_add_reservation(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_reservation)
 
 /*****************************************************************************
  * Job Line Print Functions
@@ -263,6 +266,8 @@ int _print_job_select_jobinfo(job_info_t * job, int width, bool right_justify,
 			char* suffix);
 int _print_job_comment(job_info_t * job, int width, bool right_justify,
 			char* suffix);
+int _print_job_reservation(job_info_t * job, int width, bool right_justify,
+			char* suffix);
 
 /*****************************************************************************
  * Step Print Format Functions
diff --git a/src/squeue/sort.c b/src/squeue/sort.c
index cc52e0b3b2b0e0d5168bbd40765e5c7abbb48f96..1deeb837b44c86394d03d2351dd7b3fef4c12b51 100644
--- a/src/squeue/sort.c
+++ b/src/squeue/sort.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -75,6 +76,7 @@ static int _sort_job_by_partition(void *void1, void *void2);
 static int _sort_job_by_priority(void *void1, void *void2);
 static int _sort_job_by_user_id(void *void1, void *void2);
 static int _sort_job_by_user_name(void *void1, void *void2);
+static int _sort_job_by_reservation(void *void1, void *void2);
 
 static int _sort_step_by_id(void *void1, void *void2);
 static int _sort_step_by_node_list(void *void1, void *void2);
@@ -157,6 +159,8 @@ void sort_job_list(List job_list)
 			list_sort(job_list, _sort_job_by_user_name);
 		else if (params.sort[i] == 'U')
 			list_sort(job_list, _sort_job_by_user_id);
+		else if (params.sort[i] == 'v')
+			list_sort(job_list, _sort_job_by_reservation);
 		else if (params.sort[i] == 'X')
 			list_sort(job_list, _sort_job_by_num_sockets);
 		else if (params.sort[i] == 'Y')
@@ -627,6 +631,24 @@ static int _sort_job_by_user_name(void *void1, void *void2)
 	return diff;
 }
 
+static int _sort_job_by_reservation(void *void1, void *void2)
+{
+	int diff;
+	job_info_t *job1 = (job_info_t *) void1;
+	job_info_t *job2 = (job_info_t *) void2;
+	char *val1 = "", *val2 = "";
+
+	if (job1->resv_name)
+		val1 = job1->resv_name;
+	if (job2->resv_name)
+		val2 = job2->resv_name;
+	diff = strcmp(val1, val2);
+
+	if (reverse_order)
+		diff = -diff;
+	return diff;
+}
+
 /*****************************************************************************
  * Local Step Sort Functions
  *****************************************************************************/
diff --git a/src/squeue/squeue.c b/src/squeue/squeue.c
index 5bedfbaaac628075d975efeb177efe246d2a7511..9937c2d7ea3593e683caa327497bd0468dd5408a 100644
--- a/src/squeue/squeue.c
+++ b/src/squeue/squeue.c
@@ -6,10 +6,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>, 
  *             Morris Jette <jette1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -210,7 +211,9 @@ _print_job_steps( void )
 		show_flags |= SHOW_ALL;
 
 	if (old_step_ptr) {
-		error_code = slurm_get_job_steps (old_step_ptr->last_update, 
+		/* Use a last_update time of 0 so that we can get an updated
+		 * run_time for jobs rather than just its start_time */
+		error_code = slurm_get_job_steps ((time_t) 0, 
 				0, 0, &new_step_ptr, show_flags);
 		if (error_code ==  SLURM_SUCCESS)
 			slurm_free_job_step_info_response_msg( old_step_ptr );
@@ -220,7 +223,7 @@ _print_job_steps( void )
 		}
 	}
 	else
-		error_code = slurm_get_job_steps ((time_t) NULL, 0, 0, 
+		error_code = slurm_get_job_steps ((time_t) 0, 0, 0, 
 				&new_step_ptr, show_flags);
 	if (error_code) {
 		slurm_perror ("slurm_get_job_steps error");
diff --git a/src/squeue/squeue.h b/src/squeue/squeue.h
index b3aef322f7917bfcc860d730ce823f9f817099a4..a4c9bb285f009b263a88a040f59a19eab76f2e76 100644
--- a/src/squeue/squeue.h
+++ b/src/squeue/squeue.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Joey Ekstrom <ekstrom1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sreport/Makefile.am b/src/sreport/Makefile.am
index fd6611ed7d4c8142ce3034177d7709171eab9681..e0bf3acf3b8c65e53a198d13c9db623d94b799f4 100644
--- a/src/sreport/Makefile.am
+++ b/src/sreport/Makefile.am
@@ -13,6 +13,7 @@ sreport_SOURCES =	\
 	assoc_reports.c assoc_reports.h	\
 	job_reports.c job_reports.h	\
 	user_reports.c user_reports.h	\
+	resv_reports.c resv_reports.h	\
 	common.c
 
 sreport_LDADD =  \
diff --git a/src/sreport/Makefile.in b/src/sreport/Makefile.in
index c5cf69888d7e466bf32b88f307441b6f4ddb07b4..daef919e3252097cbe5255a6d5642b612e6a9e6b 100644
--- a/src/sreport/Makefile.in
+++ b/src/sreport/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -73,7 +77,7 @@ binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(bin_PROGRAMS)
 am_sreport_OBJECTS = sreport.$(OBJEXT) cluster_reports.$(OBJEXT) \
 	assoc_reports.$(OBJEXT) job_reports.$(OBJEXT) \
-	user_reports.$(OBJEXT) common.$(OBJEXT)
+	user_reports.$(OBJEXT) resv_reports.$(OBJEXT) common.$(OBJEXT)
 sreport_OBJECTS = $(am_sreport_OBJECTS)
 am__DEPENDENCIES_1 =
 sreport_DEPENDENCIES = $(top_builddir)/src/api/libslurm.o \
@@ -108,6 +112,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -276,6 +284,7 @@ sreport_SOURCES = \
 	assoc_reports.c assoc_reports.h	\
 	job_reports.c job_reports.h	\
 	user_reports.c user_reports.h	\
+	resv_reports.c resv_reports.h	\
 	common.c
 
 sreport_LDADD = \
@@ -358,6 +367,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cluster_reports.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/common.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_reports.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/resv_reports.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sreport.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/user_reports.Po@am__quote@
 
diff --git a/src/sreport/assoc_reports.c b/src/sreport/assoc_reports.c
index 40d718f1a97c475b93040897c4b8b9e95cf8767a..5cfa93f00b5d7cafb6160cc63bc9f18633039185 100644
--- a/src/sreport/assoc_reports.c
+++ b/src/sreport/assoc_reports.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sreport/assoc_reports.h b/src/sreport/assoc_reports.h
index 702d96e9eb151e65b23df623007604e555fa88f6..7ba023cd546d4c9e6d63c7bfac1fd13d5684f26f 100644
--- a/src/sreport/assoc_reports.h
+++ b/src/sreport/assoc_reports.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sreport/cluster_reports.c b/src/sreport/cluster_reports.c
index 54e9c067282f1c79a95c06131e4808b372afaca7..c38cabab46639ef7663d3acce815f70898b0d6d2 100644
--- a/src/sreport/cluster_reports.c
+++ b/src/sreport/cluster_reports.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -46,6 +47,7 @@ enum {
 	PRINT_CLUSTER_ACPU,
 	PRINT_CLUSTER_DCPU,
 	PRINT_CLUSTER_ICPU,
+	PRINT_CLUSTER_PDCPU,
 	PRINT_CLUSTER_OCPU,
 	PRINT_CLUSTER_RCPU,
 	PRINT_CLUSTER_TOTAL,
@@ -390,7 +392,7 @@ static int _setup_print_fields_list(List format_list)
 			field->type = PRINT_CLUSTER_ACCT;
 			field->name = xstrdup("Account");
 			if(tree_display)
-				field->len = 20;
+				field->len = -20;
 			else
 				field->len = 15;
 			field->print_routine = print_fields_str;
@@ -411,7 +413,7 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Cluster");
 			field->len = 9;
 			field->print_routine = print_fields_str;
-		} else if(!strncasecmp("cpu_count", object, 
+		} else if(!strncasecmp("cpucount", object, 
 				       MAX(command_len, 2))) {
 			field->type = PRINT_CLUSTER_CPUS;
 			field->name = xstrdup("CPU count");
@@ -453,7 +455,18 @@ static int _setup_print_fields_list(List format_list)
 			else
 				field->len = 9;
 			field->print_routine = sreport_print_time;
-		} else if(!strncasecmp("Proper", object, MAX(command_len, 1))) {
+		} else if(!strncasecmp("PlannedDown", object,
+				       MAX(command_len, 2))) {
+			field->type = PRINT_CLUSTER_PDCPU;
+			field->name = xstrdup("PLND Down");
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
+				field->len = 18;
+			else
+				field->len = 10;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("Proper", object, MAX(command_len, 2))) {
 			field->type = PRINT_CLUSTER_USER_PROPER;
 			field->name = xstrdup("Proper Name");
 			field->len = 15;
@@ -505,7 +518,7 @@ static int _setup_print_fields_list(List format_list)
 			continue;
 		}
 
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
@@ -550,7 +563,8 @@ static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
 		       "----------------------------------------\n");
 		printf("%s %s - %s (%d*cpus secs)\n", 
 		       report_name, start_char, end_char, 
-		       (cluster_cond->usage_end - cluster_cond->usage_start));
+		       (int)(cluster_cond->usage_end
+			     - cluster_cond->usage_start));
 		switch(time_format) {
 		case SREPORT_TIME_PERCENT:
 			printf("Time reported in %s\n", time_format_string);
@@ -596,14 +610,15 @@ extern int cluster_account_by_user(int argc, char *argv[])
 
 	print_fields_list = list_create(destroy_print_field);
 
-	bzero(&cluster_cond, sizeof(acct_cluster_cond_t));
+	memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t));
 
 	assoc_cond->with_sub_accts = 1;
 
 	_set_assoc_cond(&i, argc, argv, assoc_cond, format_list);
 
 	if(!list_count(format_list)) 
-		slurm_addto_char_list(format_list, "Cluster,Ac,L,P,Used");
+		slurm_addto_char_list(format_list, 
+				      "Cluster,Ac,Login,Proper,Used");
 
 	_setup_print_fields_list(format_list);
 	list_destroy(format_list);
@@ -723,7 +738,7 @@ extern int cluster_account_by_user(int argc, char *argv[])
 		       "----------------------------------------\n");
 		printf("Cluster/Account/User Utilization %s - %s (%d secs)\n", 
 		       start_char, end_char, 
-		       (assoc_cond->usage_end - assoc_cond->usage_start));
+		       (int)(assoc_cond->usage_end - assoc_cond->usage_start));
 		
 		switch(time_format) {
 		case SREPORT_TIME_PERCENT:
@@ -782,7 +797,6 @@ extern int cluster_account_by_user(int argc, char *argv[])
 						print_acct = get_tree_acct_name(
 							local_acct,
 							parent_acct,
-							sreport_cluster->name,
 							tree_list);
 						xfree(local_acct);
 					} else {
@@ -900,12 +914,13 @@ extern int cluster_user_by_account(int argc, char *argv[])
 
 	print_fields_list = list_create(destroy_print_field);
 
-	bzero(&cluster_cond, sizeof(acct_cluster_cond_t));
+	memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t));
 
 	_set_assoc_cond(&i, argc, argv, assoc_cond, format_list);
 
 	if(!list_count(format_list)) 
-		slurm_addto_char_list(format_list, "Cluster,L,P,Ac,Used");
+		slurm_addto_char_list(format_list,
+				      "Cluster,Login,Proper,Ac,Used");
 
 	_setup_print_fields_list(format_list);
 	list_destroy(format_list);
@@ -1047,7 +1062,7 @@ extern int cluster_user_by_account(int argc, char *argv[])
 		       "----------------------------------------\n");
 		printf("Cluster/User/Account Utilization %s - %s (%d secs)\n", 
 		       start_char, end_char, 
-		       (assoc_cond->usage_end - assoc_cond->usage_start));
+		       (int)(assoc_cond->usage_end - assoc_cond->usage_start));
 		
 		switch(time_format) {
 		case SREPORT_TIME_PERCENT:
@@ -1186,12 +1201,13 @@ extern int cluster_user_by_wckey(int argc, char *argv[])
 
 	print_fields_list = list_create(destroy_print_field);
 
-	bzero(&cluster_cond, sizeof(acct_cluster_cond_t));
+	memset(&cluster_cond, 0, sizeof(acct_cluster_cond_t));
 
 	_set_wckey_cond(&i, argc, argv, wckey_cond, format_list);
 
 	if(!list_count(format_list)) 
-		slurm_addto_char_list(format_list, "Cluster,L,P,WCkey,Used");
+		slurm_addto_char_list(format_list, 
+				      "Cluster,Login,Proper,WCkey,Used");
 
 	_setup_print_fields_list(format_list);
 	list_destroy(format_list);
@@ -1315,7 +1331,7 @@ extern int cluster_user_by_wckey(int argc, char *argv[])
 		       "----------------------------------------\n");
 		printf("Cluster/User/WCKey Utilization %s - %s (%d secs)\n", 
 		       start_char, end_char, 
-		       (wckey_cond->usage_end - wckey_cond->usage_start));
+		       (int)(wckey_cond->usage_end - wckey_cond->usage_start));
 		
 		switch(time_format) {
 		case SREPORT_TIME_PERCENT:
@@ -1455,7 +1471,7 @@ extern int cluster_utilization(int argc, char *argv[])
 		goto end_it;
 
 	if(!list_count(format_list)) 
-		slurm_addto_char_list(format_list, "Cl,al,d,i,res,rep");
+		slurm_addto_char_list(format_list, "Cl,al,d,planned,i,res,rep");
 
 	_setup_print_fields_list(format_list);
 	list_destroy(format_list);
@@ -1484,6 +1500,7 @@ extern int cluster_utilization(int argc, char *argv[])
 		while((accting = list_next(itr3))) {
 			total_acct.alloc_secs += accting->alloc_secs;
 			total_acct.down_secs += accting->down_secs;
+			total_acct.pdown_secs += accting->pdown_secs;
 			total_acct.idle_secs += accting->idle_secs;
 			total_acct.resv_secs += accting->resv_secs;
 			total_acct.over_secs += accting->over_secs;
@@ -1496,7 +1513,8 @@ extern int cluster_utilization(int argc, char *argv[])
 		local_total_time =
 			(uint64_t)total_time * (uint64_t)total_acct.cpu_count;
 		total_reported = total_acct.alloc_secs + total_acct.down_secs 
-			+ total_acct.idle_secs + total_acct.resv_secs;
+			+ total_acct.pdown_secs + total_acct.idle_secs
+			+ total_acct.resv_secs;
 
 		while((field = list_next(itr2))) {
 			switch(field->type) {
@@ -1547,6 +1565,13 @@ extern int cluster_utilization(int argc, char *argv[])
 						     (curr_inx == 
 						      field_count));
 				break;
+			case PRINT_CLUSTER_PDCPU:
+					field->print_routine(field,
+						     total_acct.pdown_secs,
+						     total_reported,
+						     (curr_inx == 
+						      field_count));
+				break;
 			case PRINT_CLUSTER_TOTAL:
 				field->print_routine(field,
 						     total_reported,
@@ -1605,7 +1630,6 @@ extern int cluster_wckey_by_user(int argc, char *argv[])
 	sreport_cluster_rec_t *sreport_cluster = NULL;
 	print_field_t *field = NULL;
 	int field_count = 0;
-	char *print_acct = NULL;
 
 	print_fields_list = list_create(destroy_print_field);
 
@@ -1614,7 +1638,8 @@ extern int cluster_wckey_by_user(int argc, char *argv[])
 	_set_wckey_cond(&i, argc, argv, wckey_cond, format_list);
 
 	if(!list_count(format_list)) 
-		slurm_addto_char_list(format_list, "Cluster,WCKey,L,P,Used");
+		slurm_addto_char_list(format_list, 
+				      "Cluster,WCKey,Login,Proper,Used");
 
 	_setup_print_fields_list(format_list);
 	list_destroy(format_list);
@@ -1748,7 +1773,7 @@ extern int cluster_wckey_by_user(int argc, char *argv[])
 		       "----------------------------------------\n");
 		printf("Cluster/WCKey/User Utilization %s - %s (%d secs)\n", 
 		       start_char, end_char, 
-		       (wckey_cond->usage_end - wckey_cond->usage_start));
+		       (int)(wckey_cond->usage_end - wckey_cond->usage_start));
 		
 		switch(time_format) {
 		case SREPORT_TIME_PERCENT:
@@ -1788,35 +1813,9 @@ extern int cluster_wckey_by_user(int argc, char *argv[])
 				struct passwd *pwd = NULL;
 				switch(field->type) {
 				case PRINT_CLUSTER_WCKEY:
-					if(tree_display) {
-						char *local_acct = NULL;
-						char *parent_acct = NULL;
-						if(sreport_assoc->user) {
-							local_acct =
-								xstrdup_printf(
-									"|%s", 
-									sreport_assoc->acct);
-							parent_acct =
-								sreport_assoc->acct;
-						} else {
-							local_acct = xstrdup(
-								sreport_assoc->acct);
-							parent_acct = sreport_assoc->
-								parent_acct;
-						}
-						print_acct = get_tree_acct_name(
-							local_acct,
-							parent_acct,
-							sreport_cluster->name,
-							tree_list);
-						xfree(local_acct);
-					} else {
-						print_acct =
-							sreport_assoc->acct;
-					}
 					field->print_routine(
 						field, 
-						print_acct,
+						sreport_assoc->acct,
 						(curr_inx == field_count));
 					
 					break;
diff --git a/src/sreport/cluster_reports.h b/src/sreport/cluster_reports.h
index 868cfe5f9af1ca0ba83a346f693b045be84b8ab8..2efbffe995723c84aca5310b4553e4b79aea9184 100644
--- a/src/sreport/cluster_reports.h
+++ b/src/sreport/cluster_reports.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sreport/common.c b/src/sreport/common.c
index 4a3c691e4840cc0b5493a2ee3f6563c8663f1e78..faaf4c42336aacc3e3c2002f32a1879f3d0cbfad 100644
--- a/src/sreport/common.c
+++ b/src/sreport/common.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -42,6 +43,8 @@
 extern void sreport_print_time(print_field_t *field,
 			       uint64_t value, uint64_t total_time, int last)
 {
+	int abs_len = abs(field->len);
+
 	if(!total_time) 
 		total_time = 1;
 
@@ -54,7 +57,7 @@ extern void sreport_print_time(print_field_t *field,
 		else if(print_fields_parsable_print)
 			printf("|");	
 		else				
-			printf("%-*s ", field->len, " ");
+			printf("%-*s ", abs_len, " ");
 	} else {
 		char *output = NULL;
 		double percent = (double)value;
@@ -109,8 +112,11 @@ extern void sreport_print_time(print_field_t *field,
 			printf("%s", output);
 		else if(print_fields_parsable_print)
 			printf("%s|", output);	
+		else if(field->len == abs_len)
+			printf("%*.*s ", abs_len, abs_len, output);
 		else
-			printf("%*.*s ", field->len, field->len, output);
+			printf("%-*.*s ", abs_len, abs_len, output);
+
 		xfree(output);
 	}
 }
@@ -433,6 +439,45 @@ extern int sort_assoc_dec(sreport_assoc_rec_t *assoc_a,
 	return 0;
 }
 
+/* 
+ * Comparator used for sorting resvs largest cpu to smallest cpu
+ * 
+ * returns: 1: resv_a > resv_b   0: resv_a == resv_b   -1: resv_a < resv_b
+ * 
+ */
+extern int sort_reservations_dec(acct_reservation_rec_t *resv_a, 
+				 acct_reservation_rec_t *resv_b)
+{
+	int diff = 0;
+
+	if(!resv_a->cluster || !resv_b->cluster)
+		return 0;
+
+	diff = strcmp(resv_a->cluster, resv_b->cluster);
+
+	if (diff > 0)
+		return 1;
+	else if (diff < 0)
+		return -1;
+
+	if(!resv_a->name || !resv_b->name)
+		return 0;
+
+	diff = strcmp(resv_a->name, resv_b->name);
+
+	if (diff > 0)
+		return 1;
+	else if (diff < 0)
+		return -1;
+	
+	if(resv_a->time_start < resv_b->time_start)
+		return 1;
+	else if(resv_a->time_start > resv_b->time_start)
+		return -1;
+
+	return 0;
+}
+
 extern int get_uint(char *in_value, uint32_t *out_value, char *type)
 {
 	char *ptr = NULL, *meat = NULL;
diff --git a/src/sreport/job_reports.c b/src/sreport/job_reports.c
index 0a2aac6e7d88d73a03371daaca6a6966460885d3..94a059dfa3c4936002111f7b2442a502bbad2e4f 100644
--- a/src/sreport/job_reports.c
+++ b/src/sreport/job_reports.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -376,6 +377,17 @@ static int _set_cond(int *start, int argc, char *argv[],
 				start_char = end_char + 1;
 			}
 			
+			set = 1;
+		} else if(!strncasecmp (argv[i], "Nodes", 
+					 MAX(command_len, 1))) {
+			if(job_cond->used_nodes) {
+				error("You already specified nodes '%s' "
+				      " combine your request into 1 nodes=.",
+				      job_cond->used_nodes);
+				exit_code = 1;
+				break;
+			}
+			job_cond->used_nodes = xstrdup(argv[i]+end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Partitions",
 					 MAX(command_len, 2))) {
@@ -517,7 +529,7 @@ static int _setup_print_fields_list(List format_list)
 			continue;
 		}
 
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
@@ -571,7 +583,7 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 		last_object = object;
 		if((tmp_char = strstr(object, "\%"))) {
 			int newlen = atoi(tmp_char+1);
-			if(newlen > 0) 
+			if(newlen) 
 				field->len = newlen;
 		}
 		list_append(grouping_print_fields_list, field);		
@@ -584,7 +596,7 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 			field->type = PRINT_JOB_COUNT;
 		else
 			field->type = PRINT_JOB_SIZE;
-		field->name = xstrdup_printf("> %u cpus", last_size);
+		field->name = xstrdup_printf(">= %u cpus", last_size);
 		if(time_format == SREPORT_TIME_SECS_PER
 		   || time_format == SREPORT_TIME_MINS_PER
 		   || time_format == SREPORT_TIME_HOURS_PER)
@@ -597,7 +609,7 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 			field->print_routine = sreport_print_time;
 		if((tmp_char = strstr(last_object, "\%"))) {
 			int newlen = atoi(tmp_char+1);
-			if(newlen > 0) 
+			if(newlen) 
 				field->len = newlen;
 		}
 		list_append(grouping_print_fields_list, field);		
@@ -688,7 +700,7 @@ extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 		       "----------------------------------------\n");
 		printf("Job Sizes %s - %s (%d secs)\n", 
 		       start_char, end_char, 
-		       (job_cond->usage_end - job_cond->usage_start));
+		       (int)(job_cond->usage_end - job_cond->usage_start));
 		if(print_job_count)
 			printf("Units are in number of jobs ran\n");
 		else
@@ -1083,7 +1095,7 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 		       "----------------------------------------\n");
 		printf("Job Sizes by Wckey %s - %s (%d secs)\n", 
 		       start_char, end_char, 
-		       (job_cond->usage_end - job_cond->usage_start));
+		       (int)(job_cond->usage_end - job_cond->usage_start));
 		if(print_job_count)
 			printf("Units are in number of jobs ran\n");
 		else
diff --git a/src/sreport/job_reports.h b/src/sreport/job_reports.h
index 81bd1923505febbcca3f6953794c5fdd173a7878..249e9cd005414bd8374933773d3f6f31da6260c2 100644
--- a/src/sreport/job_reports.h
+++ b/src/sreport/job_reports.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sreport/resv_reports.c b/src/sreport/resv_reports.c
new file mode 100644
index 0000000000000000000000000000000000000000..338bed0b5868c9ca5bf1e7051d1d7aae20dc28f0
--- /dev/null
+++ b/src/sreport/resv_reports.c
@@ -0,0 +1,562 @@
+/*****************************************************************************\
+ *  resv_reports.c - functions for generating reservation reports
+ *                       from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "cluster_reports.h"
+
+enum {
+	PRINT_RESV_NAME,
+	PRINT_RESV_CLUSTER,
+	PRINT_RESV_CPUS,
+	PRINT_RESV_ACPU,
+	PRINT_RESV_DCPU,
+	PRINT_RESV_ICPU,
+	PRINT_RESV_NODES,
+	PRINT_RESV_ASSOCS,
+	PRINT_RESV_START,
+	PRINT_RESV_END,
+	PRINT_RESV_FLAGS,
+	PRINT_RESV_TIME,
+	PRINT_RESV_CPUTIME,
+};
+
+typedef enum {
+	GROUP_BY_ACCOUNT,
+	GROUP_BY_ACCOUNT_JOB_SIZE,
+	GROUP_BY_ACCOUNT_JOB_SIZE_DURATION,
+	GROUP_BY_USER,
+	GROUP_BY_USER_JOB_SIZE,
+	GROUP_BY_USER_JOB_SIZE_DURATION,
+	GROUP_BY_NONE
+} report_grouping_t;
+
+static List print_fields_list = NULL; /* types are of print_field_t */
+
+static int _set_resv_cond(int *start, int argc, char *argv[],
+			  acct_reservation_cond_t *resv_cond,
+			  List format_list)
+{
+	int i;
+	int set = 0;
+	int end = 0;
+	int local_cluster_flag = all_clusters_flag;
+	time_t start_time, end_time;
+	int command_len = 0;
+	int option = 0;
+
+	if(!resv_cond) {
+		error("We need an acct_reservation_cond to call this");
+		return SLURM_ERROR;
+	}
+
+	resv_cond->with_usage = 1;
+
+	if(!resv_cond->cluster_list)
+		resv_cond->cluster_list = list_create(slurm_destroy_char);
+	for (i=(*start); i<argc; i++) {
+		end = parse_option_end(argv[i]);
+		if(!end)
+			command_len=strlen(argv[i]);
+		else {
+			command_len=end-1;
+			if(argv[i][end] == '=') {
+				option = (int)argv[i][end-1];
+				end++;
+			}
+		}
+
+		if(!end && !strncasecmp(argv[i], "all_clusters",
+					       MAX(command_len, 1))) {
+			local_cluster_flag = 1;
+		} else if(!end
+			  || !strncasecmp (argv[i], "Names", 
+					 MAX(command_len, 1))) {
+			if(!resv_cond->name_list)
+				resv_cond->name_list = 
+					list_create(slurm_destroy_char);
+			slurm_addto_char_list(resv_cond->name_list, 
+					      argv[i]+end);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "Clusters",
+					 MAX(command_len, 1))) {
+			slurm_addto_char_list(resv_cond->cluster_list,
+					      argv[i]+end);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "End", MAX(command_len, 1))) {
+			resv_cond->time_end = parse_time(argv[i]+end, 1);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "Flags",
+					 MAX(command_len, 2))) {
+			/* FIX ME: make flags work here */
+			//resv_cond->flags = parse_resv_flags(argv[i]+end);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "Format", 
+					 MAX(command_len, 2))) {
+			if(format_list)
+				slurm_addto_char_list(format_list,
+						      argv[i]+end);
+		} else if (!strncasecmp (argv[i], "Ids", 
+					 MAX(command_len, 1))) {
+			if(!resv_cond->id_list)
+				resv_cond->id_list = 
+					list_create(slurm_destroy_char);
+			slurm_addto_char_list(resv_cond->id_list, argv[i]+end);
+			set = 1;
+		} else if(!strncasecmp (argv[i], "Nodes", 
+					 MAX(command_len, 1))) {
+			if(resv_cond->nodes) {
+				error("You already specified nodes '%s' "
+				      " combine your request into 1 nodes=.",
+				      resv_cond->nodes);
+				exit_code = 1;
+				break;
+			}
+			resv_cond->nodes = xstrdup(argv[i]+end);
+			set = 1;
+		} else if (!strncasecmp (argv[i], "Start",
+					 MAX(command_len, 1))) {
+			resv_cond->time_start = parse_time(argv[i]+end, 1);
+			set = 1;
+		} else {
+			exit_code=1;
+			fprintf(stderr," Unknown condition: %s\n"
+			       "Use keyword set to modify value\n", argv[i]);
+		}
+	}
+	(*start) = i;
+
+	if(!local_cluster_flag && !list_count(resv_cond->cluster_list)) {
+		char *temp = slurm_get_cluster_name();
+		if(temp)
+			list_append(resv_cond->cluster_list, temp);
+	}
+
+	/* This needs to be done on some systems to make sure
+	   cluster_cond isn't messed.  This has happened on some 64
+	   bit machines and this is here to be on the safe side.
+	*/
+	start_time = resv_cond->time_start;
+	end_time = resv_cond->time_end;
+	set_start_end_time(&start_time, &end_time);
+	resv_cond->time_start = start_time;
+	resv_cond->time_end = end_time;
+
+	return set;
+}
+
+static int _setup_print_fields_list(List format_list)
+{
+	ListIterator itr = NULL;
+	print_field_t *field = NULL;
+	char *object = NULL;
+
+	if(!format_list || !list_count(format_list)) {
+		exit_code=1;
+			fprintf(stderr, " we need a format list "
+				"to set up the print.\n");
+		return SLURM_ERROR;
+	}
+
+	if(!print_fields_list)
+		print_fields_list = list_create(destroy_print_field);
+
+	itr = list_iterator_create(format_list);
+	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
+		int command_len = 0;
+		int newlen = 0;
+		
+		if((tmp_char = strstr(object, "\%"))) {
+			newlen = atoi(tmp_char+1);
+			tmp_char[0] = '\0';
+		} 
+
+		command_len = strlen(object);
+
+		field = xmalloc(sizeof(print_field_t));
+		if(!strncasecmp("allocated", object, 
+				MAX(command_len, 2))) {
+			field->type = PRINT_RESV_ACPU;
+			field->name = xstrdup("Allocated");
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
+				field->len = 20;
+			else
+				field->len = 9;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("Associations",
+				       object, MAX(command_len, 2))) {
+			field->type = PRINT_RESV_ASSOCS;
+			field->name = xstrdup("Associations");
+			field->len = 15;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Cluster", object, 
+				       MAX(command_len, 2))) {
+			field->type = PRINT_RESV_CLUSTER;
+			field->name = xstrdup("Cluster");
+			field->len = 9;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("cpucount", object, 
+				       MAX(command_len, 2))) {
+			field->type = PRINT_RESV_CPUS;
+			field->name = xstrdup("CPU count");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("down", object, MAX(command_len, 1))) {
+			field->type = PRINT_RESV_DCPU;
+			field->name = xstrdup("Down");
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
+				field->len = 20;
+			else
+				field->len = 9;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("idle", object, MAX(command_len, 1))) {
+			field->type = PRINT_RESV_ICPU;
+			field->name = xstrdup("Idle");
+			if(time_format == SREPORT_TIME_SECS_PER
+			   || time_format == SREPORT_TIME_MINS_PER
+			   || time_format == SREPORT_TIME_HOURS_PER)
+				field->len = 20;
+			else
+				field->len = 9;
+			field->print_routine = sreport_print_time;
+		} else if(!strncasecmp("Nodes", object, MAX(command_len, 2))) {
+			field->type = PRINT_RESV_NODES;
+			field->name = xstrdup("Nodes");
+			field->len = 15;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Name", object, 
+				       MAX(command_len, 2))) {
+			field->type = PRINT_RESV_NAME;
+			field->name = xstrdup("Name");
+			field->len = 9;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Start", object, 
+				       MAX(command_len, 2))) {
+			field->type = PRINT_RESV_START;
+			field->name = xstrdup("Start");
+			field->len = 19;
+			field->print_routine = print_fields_date;
+		} else if(!strncasecmp("End", object, 
+				       MAX(command_len, 2))) {
+			field->type = PRINT_RESV_END;
+			field->name = xstrdup("End");
+			field->len = 19;
+			field->print_routine = print_fields_date;
+		} else if(!strncasecmp("TotalTime", object, 
+				       MAX(command_len, 2))) {
+			field->type = PRINT_RESV_TIME;
+			field->name = xstrdup("TotalTime");
+			field->len = 9;
+			field->print_routine = print_fields_time_from_secs;
+		} else if(!strncasecmp("CPUTime", object, 
+				       MAX(command_len, 2))) {
+			field->type = PRINT_RESV_CPUTIME;
+			field->name = xstrdup("CPUTime");
+			field->len = 9;
+			field->print_routine = print_fields_time_from_secs;
+		} else {
+			exit_code=1;
+			fprintf(stderr, " Unknown field '%s'\n", object);
+			xfree(field);
+			continue;
+		}
+
+		if(newlen) 
+			field->len = newlen;
+		
+		list_append(print_fields_list, field);		
+	}
+	list_iterator_destroy(itr);
+
+	return SLURM_SUCCESS;
+}
+
+static List _get_resv_list(int argc, char *argv[],
+			   char *report_name, List format_list)
+{
+	acct_reservation_cond_t *resv_cond =
+		xmalloc(sizeof(acct_reservation_cond_t));
+	int i=0;
+	List resv_list = NULL;
+
+	resv_cond->with_usage = 1;
+
+	_set_resv_cond(&i, argc, argv, resv_cond, format_list);
+	
+	resv_list = acct_storage_g_get_reservations(db_conn, my_uid,
+						    resv_cond);
+	if(!resv_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem with resv query.\n");
+		return NULL;
+	}
+
+	if(print_fields_have_header) {
+		char start_char[20];
+		char end_char[20];
+		time_t my_start = resv_cond->time_start;
+		time_t my_end = resv_cond->time_end-1;
+
+		slurm_make_time_str(&my_start, 
+				    start_char, sizeof(start_char));
+		slurm_make_time_str(&my_end,
+				    end_char, sizeof(end_char));
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+		printf("%s %s - %s\n", 
+		       report_name, start_char, end_char);
+		switch(time_format) {
+		case SREPORT_TIME_PERCENT:
+			printf("Time reported in %s\n", time_format_string);
+			break; 
+		default:
+			printf("Time reported in CPU %s\n", time_format_string);
+			break;
+		}
+		printf("----------------------------------------"
+		       "----------------------------------------\n");
+	}
+
+	destroy_acct_reservation_cond(resv_cond);
+	
+	return resv_list;
+}
+
+extern int resv_utilization(int argc, char *argv[])
+{
+	int rc = SLURM_SUCCESS;
+	ListIterator itr = NULL;
+	ListIterator tot_itr = NULL;
+	ListIterator itr2 = NULL;
+	acct_reservation_rec_t *resv = NULL;
+	acct_reservation_rec_t *tot_resv = NULL;
+
+	print_field_t *field = NULL;
+	int32_t total_time = 0;
+
+	List resv_list = NULL; 
+	List tot_resv_list = NULL; 
+
+	List format_list = list_create(slurm_destroy_char);
+	int field_count = 0;
+
+	print_fields_list = list_create(destroy_print_field);
+
+
+	if(!(resv_list = _get_resv_list(argc, argv,
+					"Reservation Utilization",
+					format_list))) 
+		goto end_it;
+
+	if(!list_count(format_list)) 
+		slurm_addto_char_list(format_list,
+				      "Cl,name,start,end,al,i");
+
+	_setup_print_fields_list(format_list);
+	list_destroy(format_list);
+
+	/* we will just use the pointers returned from the
+	   get_resv_list here, so don't remove them 
+	*/
+	tot_resv_list = list_create(NULL);
+
+	itr = list_iterator_create(resv_list);
+	tot_itr = list_iterator_create(tot_resv_list);
+	itr2 = list_iterator_create(print_fields_list);
+
+	print_fields_header(print_fields_list);
+
+	field_count = list_count(print_fields_list);
+
+	/* compress all the reservations into a single reservation.
+	   Since reservations can have multiple entries like if the
+	   node count changes or something after the reservation
+	   starts.  Here we colapse them into 1 record.
+	*/
+	while((resv = list_next(itr))) {
+		while((tot_resv = list_next(tot_itr))) {
+			if(tot_resv->id == resv->id) {
+				/* get an average of cpus if the
+				   reservation changes we will just
+				   get an average.
+				*/
+				tot_resv->cpus += resv->cpus;
+				tot_resv->cpus /= 2;
+				tot_resv->alloc_secs += resv->alloc_secs;
+				tot_resv->down_secs += resv->down_secs;
+				if(resv->time_start < tot_resv->time_start)
+					tot_resv->time_start = resv->time_start;
+				if(resv->time_end > tot_resv->time_end)
+					tot_resv->time_end = resv->time_end;
+				break;
+			}
+		}
+		if(!tot_resv) 
+			list_append(tot_resv_list, resv);
+		
+		list_iterator_reset(tot_itr);
+	}
+	
+	list_sort(tot_resv_list, (ListCmpF)sort_reservations_dec);
+	list_iterator_reset(tot_itr);
+	while((tot_resv = list_next(tot_itr))) {
+		uint64_t idle_secs = 0, total_reported = 0;
+		int curr_inx = 1;
+	
+		total_time = tot_resv->time_end - tot_resv->time_start;
+		if(total_time <= 0)
+			continue;
+		total_reported = (uint64_t)(total_time * tot_resv->cpus);
+
+		idle_secs = total_reported
+			- tot_resv->alloc_secs - tot_resv->down_secs;
+
+		while((field = list_next(itr2))) {
+			switch(field->type) {
+			case PRINT_RESV_NAME:
+				field->print_routine(field,
+						     tot_resv->name,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_CLUSTER:
+				field->print_routine(field,
+						     tot_resv->cluster,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_CPUS:
+				field->print_routine(field,
+						     tot_resv->cpus,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_ACPU:
+				field->print_routine(field,
+						     tot_resv->alloc_secs,
+						     total_reported,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_DCPU:
+				field->print_routine(field,
+						     tot_resv->down_secs,
+						     total_reported,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_ICPU:
+				field->print_routine(field,
+						     idle_secs,
+						     total_reported,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_NODES:
+				field->print_routine(field,
+						     tot_resv->nodes,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_ASSOCS:
+				field->print_routine(field,
+						     tot_resv->assocs,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_START:
+				field->print_routine(field,
+						     tot_resv->time_start,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_END:
+				field->print_routine(field,
+						     tot_resv->time_end,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_TIME:
+				field->print_routine(field,
+						     total_time,
+						     (curr_inx == 
+						      field_count));
+				break;
+			case PRINT_RESV_CPUTIME:
+				field->print_routine(field,
+						     total_reported,
+						     (curr_inx == 
+						      field_count));
+				break;
+			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
+				break;
+			}
+			curr_inx++;
+		}
+		list_iterator_reset(itr2);
+		printf("\n");
+	}
+
+	list_iterator_destroy(tot_itr);
+	list_iterator_destroy(itr2);
+	list_iterator_destroy(itr);
+
+end_it:
+	if(resv_list) {
+		list_destroy(resv_list);
+		resv_list = NULL;
+	}
+	if(tot_resv_list) {
+		list_destroy(tot_resv_list);
+		tot_resv_list = NULL;
+	}
+	if(print_fields_list) {
+		list_destroy(print_fields_list);
+		print_fields_list = NULL;
+	}
+
+	return rc;
+}
diff --git a/src/sreport/resv_reports.h b/src/sreport/resv_reports.h
new file mode 100644
index 0000000000000000000000000000000000000000..50d674a4735f761812fed2a605de686fdf6277dc
--- /dev/null
+++ b/src/sreport/resv_reports.h
@@ -0,0 +1,52 @@
+/*****************************************************************************\
+ *  resv_reports.h - functions for generating reservation reports
+ *                       from accounting infrastructure.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __SREPORT_RESV_REPORTS_H
+#define __SREPORT_RESV_REPORTS_H
+
+#include "sreport.h"
+
+//extern int cluster_account_by_user(int argc, char *argv[]);
+//extern int cluster_user_by_account(int argc, char *argv[]);
+//extern int cluster_user_by_wckey(int argc, char *argv[]);
+extern int resv_utilization(int argc, char *argv[]);
+//extern int cluster_wckey_by_user(int argc, char *argv[]);
+
+#endif
diff --git a/src/sreport/sreport.c b/src/sreport/sreport.c
index 858be26d430fb76955854e2f6dd140334b029a16..f04d6025ac5651534c1250f4691f1dd652c80030 100644
--- a/src/sreport/sreport.c
+++ b/src/sreport/sreport.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -40,6 +41,7 @@
 #include "src/sreport/assoc_reports.h"
 #include "src/sreport/cluster_reports.h"
 #include "src/sreport/job_reports.h"
+#include "src/sreport/resv_reports.h"
 #include "src/sreport/user_reports.h"
 #include "src/common/xsignal.h"
 
@@ -60,6 +62,7 @@ sreport_sort_t sort_flag = SREPORT_SORT_TIME;
 
 static void	_job_rep (int argc, char *argv[]);
 static void	_user_rep (int argc, char *argv[]);
+static void	_resv_rep (int argc, char *argv[]);
 static void	_cluster_rep (int argc, char *argv[]);
 static void	_assoc_rep (int argc, char *argv[]);
 static int	_get_command (int *argc, char *argv[]);
@@ -81,11 +84,11 @@ main (int argc, char *argv[])
 		{"all_clusters", 0, 0, 'a'},
 		{"help",     0, 0, 'h'},
 		{"immediate",0, 0, 'i'},
-		{"no_header", 0, 0, 'n'},
+		{"noheader", 0, 0, 'n'},
 		{"parsable", 0, 0, 'p'},
-		{"parsable2", 0, 0, 'P'},
+		{"parsable2",0, 0, 'P'},
 		{"quiet",    0, 0, 'q'},
-		{"sort",    0, 0, 's'},
+		{"sort",     0, 0, 's'},
 		{"usage",    0, 0, 'h'},
 		{"verbose",  0, 0, 'v'},
 		{"version",  0, 0, 'V'},
@@ -249,7 +252,7 @@ static void _job_rep (int argc, char *argv[])
 }
 
 /* 
- * _user_rep - Reports having to do with jobs 
+ * _user_rep - Reports having to do with users 
  * IN argc - count of arguments
  * IN argv - list of arguments
  */
@@ -272,7 +275,30 @@ static void _user_rep (int argc, char *argv[])
 }
 
 /* 
- * _cluster_rep - Reports having to do with jobs 
+ * _resv_rep - Reports having to do with reservations 
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ */
+static void _resv_rep (int argc, char *argv[]) 
+{
+	int error_code = SLURM_SUCCESS;
+
+	if (strncasecmp (argv[0], "Utilization", 1) == 0) {
+		error_code = resv_utilization((argc - 1), &argv[1]);
+	} else {
+		exit_code = 1;
+		fprintf(stderr, "Not valid report %s\n", argv[0]);
+		fprintf(stderr, "Valid reservation reports are, ");
+		fprintf(stderr, "\"Utilization\"\n");
+	}	
+	
+	if (error_code) {
+		exit_code = 1;
+	}
+}
+
+/* 
+ * _cluster_rep - Reports having to do with clusters 
  * IN argc - count of arguments
  * IN argv - list of arguments
  */
@@ -484,6 +510,18 @@ _process_command (int argc, char *argv[])
 				 argv[0]);
 		}
 		exit_flag = 1;
+	} else if ((strncasecmp (argv[0], "reservation",
+				 MAX(command_len, 2)) == 0)
+		   || (strncasecmp (argv[0], "resv",
+				    MAX(command_len, 2)) == 0)) {
+		if (argc < 2) {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				fprintf(stderr, 
+				        "too few arguments for keyword:%s\n", 
+				        argv[0]);
+		} else 
+			_resv_rep((argc - 1), &argv[1]);
 	} else if (strncasecmp (argv[0], "sort", MAX(command_len, 1)) == 0) {
 		if (argc < 2) {
 			exit_code = 1;
@@ -590,7 +628,7 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
     Valid <OPTION> values are:                                             \n\
      -a or --all_clusters: Use all clusters instead of current             \n\
      -h or --help: equivalent to \"help\" command                          \n\
-     -n or --no_header: equivalent to \"no_header\" command                \n\
+     -n or --noheader: equivalent to \"noheader\" command                \n\
      -p or --parsable: output will be '|' delimited with a '|' at the end  \n\
      -P or --parsable2: output will be '|' delimited without a '|' at the end\n\
      -q or --quiet: equivalent to \"quiet\" command                        \n\
@@ -623,6 +661,8 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
      cluster - AccountUtilizationByUser, UserUtilizationByAccount,         \n\
                UserUtilizationByWckey, Utilization, WCKeyUtilizationByUser \n\
      job     - SizesByAccount, SizesByWckey                                \n\
+     reservation                                                           \n\
+             - Utilization                                                 \n\
      user    - TopUsage                                                    \n\
                                                                            \n\
   <OPTIONS> are different for each report type.                            \n\
@@ -674,6 +714,8 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
                                    1-49, 50-99, 100-149, > 150).           \n\
              - Jobs=<OPT>       - List of jobs/steps to include in report. \n\
                                   Default is all.                          \n\
+             - Nodes=<OPT>      - Only show jobs that ran on these nodes.  \n\
+                                  Default is all.                          \n\
              - Partitions=<OPT> - List of partitions jobs ran on to include\n\
                                   in report.  Default is all.              \n\
              - PrintJobCount    - When used with the any Sizes report      \n\
@@ -687,6 +729,12 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
                                   you want only certain users specify them \n\
                                   them with the Users= option.             \n\
                                                                            \n\
+     reservation                                                           \n\
+             - Names=<OPT>      - List of reservations to use for the report\n\
+                                  Default is all.                          \n\
+             - Nodes=<OPT>      - Only show reservations that used these   \n\
+                                  nodes.  Default is all.                  \n\
+                                                                           \n\
      user    - Accounts=<OPT>   - List of accounts to use for the report   \n\
                                   Default is all.                          \n\
              - Group            - Group all accounts together for each user.\n\
@@ -697,7 +745,6 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
              - Users=<OPT>      - List of users jobs to include in report. \n\
                                   Default is all.                          \n\
                                                                            \n\
-                                                                           \n\
   Below are the format options for each report.                            \n\
                                                                            \n\
        Cluster                                                             \n\
@@ -709,22 +756,27 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
              - Cluster, CPUCount, Login, Proper, Used, Wckey               \n\
        - Utilization                                                       \n\
              - Allocated, Cluster, CPUCount, Down, Idle, Overcommited,     \n\
-               Reported, Reserved                                          \n\
+               PlannedDown, Reported, Reserved                             \n\
                                                                            \n\
        Job                                                                 \n\
        - Sizes                                                             \n\
              - Account, Cluster                                            \n\
                                                                            \n\
+       Reservation                                                         \n\
+       - Utilization                                                       \n\
+             - Allocated, Associations, Cluster, CPUCount, CPUTime,        \n\
+               End, Idle, Name, Nodes, Start, TotalTime                    \n\
+                                                                           \n\
        User                                                                \n\
        - TopUsage                                                          \n\
              - Account, Cluster, Login, Proper, Used                       \n\
                                                                            \n\
                                                                            \n\
-                                                                           \n\
   Note, valid start/end time formats are...                                \n\
        HH:MM[:SS] [AM|PM]                                                  \n\
        MMDD[YY] or MM/DD[/YY] or MM.DD[.YY]                                \n\
        MM/DD[/YY]-HH:MM[:SS]                                               \n\
+       YYYY-MM-DD[THH[:MM[:SS]]]                                           \n\
                                                                            \n\
                                                                            \n\
   All commands and options are case-insensitive.                         \n\n");
diff --git a/src/sreport/sreport.h b/src/sreport/sreport.h
index a1f2dc6177526560e66c9a3e68d9244849b13717..656f101765be5039e01f68a377abf5bca2fb608a 100644
--- a/src/sreport/sreport.h
+++ b/src/sreport/sreport.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -152,6 +153,8 @@ extern int sort_cluster_dec(sreport_cluster_rec_t *cluster_a,
 			    sreport_cluster_rec_t *cluster_b);
 extern int sort_assoc_dec(sreport_assoc_rec_t *assoc_a,
 			  sreport_assoc_rec_t *assoc_b);
+extern int sort_reservations_dec(acct_reservation_rec_t *resv_a, 
+				 acct_reservation_rec_t *resv_b);
 
 extern int get_uint(char *in_value, uint32_t *out_value, char *type);
 
diff --git a/src/sreport/user_reports.c b/src/sreport/user_reports.c
index 09537f117f5a740a468d2eda27e29815dd5220fa..27d8e77e7cb72b1baaf110b5687cfc5f0a5d045c 100644
--- a/src/sreport/user_reports.c
+++ b/src/sreport/user_reports.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -225,7 +226,7 @@ static int _setup_print_fields_list(List format_list)
 			continue;
 		}
 
-		if(newlen > 0) 
+		if(newlen) 
 			field->len = newlen;
 		
 		list_append(print_fields_list, field);		
@@ -343,7 +344,7 @@ extern int user_top(int argc, char *argv[])
 		       "----------------------------------------\n");
 		printf("Top %u Users %s - %s (%d secs)\n", 
 		       top_limit, start_char, end_char, 
-		       (user_cond->assoc_cond->usage_end 
+		       (int)(user_cond->assoc_cond->usage_end 
 			- user_cond->assoc_cond->usage_start));
 		
 		switch(time_format) {
diff --git a/src/sreport/user_reports.h b/src/sreport/user_reports.h
index 534c4eca1c6e2d3a1fe6a5288d80b3aaa41ebb8b..a4288780a518c7f05f244b35ec30dbe25f950530 100644
--- a/src/sreport/user_reports.h
+++ b/src/sreport/user_reports.h
@@ -6,10 +6,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/Makefile.am b/src/srun/Makefile.am
index f16818e2a87d674ac7f74aa09bd64ca21457c46f..7a39a597db930be893e54e8d92bd440bf651ad7c 100644
--- a/src/srun/Makefile.am
+++ b/src/srun/Makefile.am
@@ -3,7 +3,7 @@
 AUTOMAKE_OPTIONS = foreign
 CLEANFILES = core.*
 
-INCLUDES = -I$(top_srcdir) 
+INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 
 bin_PROGRAMS = srun
 
@@ -21,6 +21,7 @@ srun_SOURCES = \
 	core-format.c \
 	core-format.h \
 	multi_prog.c multi_prog.h \
+	task_state.c task_state.h \
 	srun.wrapper.c
 
 convenience_libs = \
diff --git a/src/srun/Makefile.in b/src/srun/Makefile.in
index 7eda34bdea94d4dea88e1eeb16036c9504cc79ca..7bb62883ca18fcf8b7fe96898a739f45d936c3bc 100644
--- a/src/srun/Makefile.in
+++ b/src/srun/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -74,7 +78,7 @@ PROGRAMS = $(bin_PROGRAMS)
 am_srun_OBJECTS = srun.$(OBJEXT) opt.$(OBJEXT) srun_job.$(OBJEXT) \
 	srun_pty.$(OBJEXT) debugger.$(OBJEXT) fname.$(OBJEXT) \
 	allocate.$(OBJEXT) core-format.$(OBJEXT) multi_prog.$(OBJEXT) \
-	srun.wrapper.$(OBJEXT)
+	task_state.$(OBJEXT) srun.wrapper.$(OBJEXT)
 srun_OBJECTS = $(am_srun_OBJECTS)
 am__DEPENDENCIES_1 = $(top_builddir)/src/api/libslurm.o
 srun_DEPENDENCIES = $(am__DEPENDENCIES_1)
@@ -108,6 +112,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -269,7 +277,7 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 CLEANFILES = core.*
-INCLUDES = -I$(top_srcdir) 
+INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 srun_SOURCES = \
 	srun.c srun.h \
 	opt.c opt.h \
@@ -284,6 +292,7 @@ srun_SOURCES = \
 	core-format.c \
 	core-format.h \
 	multi_prog.c multi_prog.h \
+	task_state.c task_state.h \
 	srun.wrapper.c
 
 convenience_libs = \
@@ -374,6 +383,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun.wrapper.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun_job.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun_pty.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/task_state.Po@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
diff --git a/src/srun/allocate.c b/src/srun/allocate.c
index 70bfc6d63a7d384add12c068f51eb22ea9b6a43a..5a84a35b8db1853184c277e73385623c2f29dba5 100644
--- a/src/srun/allocate.c
+++ b/src/srun/allocate.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
- * src/srun/allocate.c - srun functions for managing node allocations
- * $Id: allocate.c 15808 2008-12-02 23:38:47Z da $
+ *  src/srun/allocate.c - srun functions for managing node allocations
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -61,6 +62,19 @@
 #include "src/srun/opt.h"
 #include "src/srun/debugger.h"
 
+#ifdef HAVE_BG
+#include "src/api/job_info.h"
+#include "src/api/node_select_info.h"
+#include "src/common/node_select.h"
+#include "src/plugins/select/bluegene/plugin/bg_boot_time.h"
+#include "src/plugins/select/bluegene/wrap_rm_api.h"
+#endif
+
+#ifdef HAVE_CRAY_XT
+#include "src/common/node_select.h"
+#endif
+
+
 #define MAX_ALLOC_WAIT 60	/* seconds */
 #define MIN_ALLOC_WAIT  5	/* seconds */
 #define MAX_RETRIES    10
@@ -84,6 +98,18 @@ static void _exit_on_signal(int signo);
 static void _signal_while_allocating(int signo);
 static void  _intr_handler(int signo);
 
+#ifdef HAVE_BG
+#define POLL_SLEEP 3			/* retry interval in seconds  */
+static int _wait_bluegene_block_ready(
+			resource_allocation_response_msg_t *alloc);
+static int _blocks_dealloc();
+#endif
+
+#ifdef HAVE_CRAY_XT
+static int  _claim_reservation(resource_allocation_response_msg_t *alloc);
+#endif
+
+
 static sig_atomic_t destroy_job = 0;
 
 static void _set_pending_job_id(uint32_t job_id)
@@ -165,6 +191,13 @@ static bool _retry()
 		else
 			return false;
 		sleep (++retries);
+	} else if (errno == EINTR) {
+		/* srun may be interrupted by the BLCR checkpoint signal */
+		/*
+		 * XXX: this will cause the old job cancelled and a new job allocated
+		 */
+		debug("Syscall interrupted while allocating resources, retrying.");
+		return true;
 	} else {
 		error("Unable to allocate resources: %m");
 		return false;
@@ -182,6 +215,120 @@ _intr_handler(int signo)
 	destroy_job = 1;
 }
 
+#ifdef HAVE_BG
+/* returns 1 if job and nodes are ready for job to begin, 0 otherwise */
+static int _wait_bluegene_block_ready(resource_allocation_response_msg_t *alloc)
+{
+	int is_ready = 0, i, rc;
+	char *block_id = NULL;
+	int cur_delay = 0;
+	int max_delay = BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT +
+		(BG_INCR_BLOCK_BOOT * alloc->node_cnt);
+
+	pending_job_id = alloc->job_id;
+	select_g_get_jobinfo(alloc->select_jobinfo, SELECT_DATA_BLOCK_ID,
+			     &block_id);
+
+	for (i=0; (cur_delay < max_delay); i++) {
+		if(i == 1)
+			debug("Waiting for block %s to become ready for job",
+			     block_id);
+		if (i) {
+			sleep(POLL_SLEEP);
+			rc = _blocks_dealloc();
+			if ((rc == 0) || (rc == -1)) 
+				cur_delay += POLL_SLEEP;
+			debug2("still waiting");
+		}
+
+		rc = slurm_job_node_ready(alloc->job_id);
+
+		if (rc == READY_JOB_FATAL)
+			break;				/* fatal error */
+		if (rc == READY_JOB_ERROR)		/* error */
+			continue;			/* retry */
+		if ((rc & READY_JOB_STATE) == 0)	/* job killed */
+			break;
+		if (rc & READY_NODE_STATE) {		/* job and node ready */
+			is_ready = 1;
+			break;
+		}
+	}
+	if (is_ready)
+     		debug("Block %s is ready for job", block_id);
+	else if(!destroy_job)
+		error("Block %s still not ready", block_id);
+	else /* this should never happen, but if destroy_job
+		send back not ready */
+		is_ready = 0;
+
+	xfree(block_id);
+	pending_job_id = 0;
+
+	return is_ready;
+}
+
+/*
+ * Test if any BG blocks are in deallocating state since they are
+ * probably related to this job we will want to sleep longer
+ * RET	1:  deallocate in progress
+ *	0:  no deallocate in progress
+ *     -1: error occurred
+ */
+static int _blocks_dealloc()
+{
+	static node_select_info_msg_t *bg_info_ptr = NULL, *new_bg_ptr = NULL;
+	int rc = 0, error_code = 0, i;
+	
+	if (bg_info_ptr) {
+		error_code = slurm_load_node_select(bg_info_ptr->last_update, 
+						   &new_bg_ptr);
+		if (error_code == SLURM_SUCCESS)
+			select_g_free_node_info(&bg_info_ptr);
+		else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA) {
+			error_code = SLURM_SUCCESS;
+			new_bg_ptr = bg_info_ptr;
+		}
+	} else {
+		error_code = slurm_load_node_select((time_t) NULL, &new_bg_ptr);
+	}
+
+	if (error_code) {
+		error("slurm_load_partitions: %s\n",
+		      slurm_strerror(slurm_get_errno()));
+		return -1;
+	}
+	for (i=0; i<new_bg_ptr->record_count; i++) {
+		if(new_bg_ptr->bg_info_array[i].state 
+		   == RM_PARTITION_DEALLOCATING) {
+			rc = 1;
+			break;
+		}
+	}
+	bg_info_ptr = new_bg_ptr;
+	return rc;
+}
+#endif	/* HAVE_BG */
+
+#ifdef HAVE_CRAY_XT
+/* returns 1 if job and nodes are ready for job to begin, 0 otherwise */
+static int _claim_reservation(resource_allocation_response_msg_t *alloc)
+{
+	int rc = 0;
+	char *resv_id = NULL;
+
+	select_g_get_jobinfo(alloc->select_jobinfo, SELECT_DATA_RESV_ID,
+			     &resv_id);
+	if (resv_id == NULL)
+		return rc;
+	if (basil_resv_conf(resv_id, alloc->job_id) == SLURM_SUCCESS)
+		rc = 1;
+	xfree(resv_id);
+	return rc;
+}
+#endif
+
+
 int
 allocate_test(void)
 {
@@ -242,6 +389,30 @@ allocate_nodes(void)
 		}
 	}
 	
+	if(resp && !destroy_job) {
+		/*
+		 * Allocation granted!
+		 */
+#ifdef HAVE_BG
+		if (!_wait_bluegene_block_ready(resp)) {
+			if(!destroy_job)
+				error("Something is wrong with the "
+				      "boot of the block.");
+			goto relinquish;
+		}
+#endif
+#ifdef HAVE_CRAY_XT
+		if (!_claim_reservation(resp)) {
+			if(!destroy_job)
+				error("Something is wrong with the ALPS "
+				      "resource reservation.");
+			goto relinquish;
+		}
+#endif
+	} else if (destroy_job) {
+		goto relinquish;
+	}
+
 	xsignal(SIGHUP, _exit_on_signal);
 	xsignal(SIGINT, ignore_signal);
 	xsignal(SIGQUIT, ignore_signal);
@@ -253,6 +424,14 @@ allocate_nodes(void)
 	job_desc_msg_destroy(j);
 
 	return resp;
+
+relinquish:
+
+	slurm_free_resource_allocation_response_msg(resp);
+	if(!destroy_job)
+		slurm_complete_job(resp->job_id, 1);
+	exit(1);
+	return NULL;
 }
 
 void
@@ -287,7 +466,7 @@ existing_allocation(void)
                 else
                         error ("Unable to confirm allocation for job %u: %m",
                               old_job_id);
-                info ("Check SLURM_JOBID environment variable "
+                info ("Check SLURM_JOB_ID environment variable "
                       "for expired or invalid job.");
                 exit(1);
         }
@@ -307,7 +486,6 @@ slurmctld_msg_init(void)
 		return slurmctld_fd;
 
 	slurmctld_fd = -1;
-	slurmctld_comm_addr.hostname = NULL;
 	slurmctld_comm_addr.port = 0;
 
 	if ((slurmctld_fd = slurm_init_msg_engine_port(0)) < 0)
@@ -318,11 +496,8 @@ slurmctld_msg_init(void)
 	/* hostname is not set,  so slurm_get_addr fails
 	   slurm_get_addr(&slurm_address, &port, hostname, sizeof(hostname)); */
 	port = ntohs(slurm_address.sin_port);
-	slurmctld_comm_addr.hostname = xstrdup(opt.ctrl_comm_ifhn);
 	slurmctld_comm_addr.port     = port;
-	debug2("slurmctld messages to host=%s,port=%u", 
-	       slurmctld_comm_addr.hostname, 
-	       slurmctld_comm_addr.port);
+	debug2("srun PMI messages to port=%u", slurmctld_comm_addr.port);
 
 	return slurmctld_fd;
 }
@@ -347,9 +522,8 @@ job_desc_msg_create_from_opts ()
 		j->name   = xstrdup(opt.job_name);
 	else
 		j->name   = xstrdup(opt.cmd_name);
-
-	if (opt.wckey) 
-		xstrfmtcat(j->name, "\"%s", opt.wckey);
+	j->reservation    = xstrdup(opt.reservation);
+	j->wckey          = xstrdup(opt.wckey);
 	
 	j->req_nodes      = xstrdup(opt.nodelist);
 	
@@ -387,9 +561,19 @@ job_desc_msg_create_from_opts ()
 	j->dependency     = opt.dependency;
 	if (opt.nice)
 		j->nice   = NICE_OFFSET + opt.nice;
-	j->task_dist      = opt.distribution;
+
+	if (opt.cpu_bind)
+		j->cpu_bind       = opt.cpu_bind;
+	if (opt.cpu_bind_type)
+		j->cpu_bind_type  = opt.cpu_bind_type;
+	if (opt.mem_bind)
+		j->mem_bind       = opt.mem_bind;
+	if (opt.mem_bind_type)
+		j->mem_bind_type  = opt.mem_bind_type;
 	if (opt.plane_size != NO_VAL)
 		j->plane_size     = opt.plane_size;
+	j->task_dist      = opt.distribution;
+
 	j->group_id       = opt.gid;
 	j->mail_type      = opt.mail_type;
 
@@ -500,8 +684,8 @@ job_desc_msg_destroy(job_desc_msg_t *j)
 	}
 }
 
-int
-create_job_step(srun_job_t *job)
+extern int
+create_job_step(srun_job_t *job, bool use_all_cpus)
 {
 	int i, rc;
 	SigFunc *oquitf = NULL, *ointf = NULL, *otermf = NULL;
@@ -518,16 +702,23 @@ create_job_step(srun_job_t *job)
 	if (!opt.nprocs_set && (opt.ntasks_per_node != NO_VAL))
 		job->ntasks = opt.nprocs = job->nhosts * opt.ntasks_per_node;
 	job->ctx_params.task_count = opt.nprocs;
-	
-	job->ctx_params.cpu_count = opt.overcommit ? job->ctx_params.node_count
-		: (opt.nprocs*opt.cpus_per_task);
+
+	if (use_all_cpus)
+		job->ctx_params.cpu_count = job->cpu_count;
+	else if (opt.overcommit)
+		job->ctx_params.cpu_count = job->ctx_params.node_count;
+	else
+		job->ctx_params.cpu_count = opt.nprocs*opt.cpus_per_task;
 	
 	job->ctx_params.relative = (uint16_t)opt.relative;
 	job->ctx_params.ckpt_interval = (uint16_t)opt.ckpt_interval;
-	job->ctx_params.ckpt_path = opt.ckpt_path;
+	job->ctx_params.ckpt_dir = opt.ckpt_dir;
 	job->ctx_params.exclusive = (uint16_t)opt.exclusive;
 	job->ctx_params.immediate = (uint16_t)opt.immediate;
 	job->ctx_params.verbose_level = (uint16_t)_verbose;
+	if (opt.resv_port_cnt != NO_VAL)
+		job->ctx_params.resv_port_cnt = (uint16_t) opt.resv_port_cnt;
+
 	switch (opt.distribution) {
 	case SLURM_DIST_BLOCK:
 	case SLURM_DIST_ARBITRARY:
@@ -546,6 +737,7 @@ create_job_step(srun_job_t *job)
 		job->ctx_params.task_dist = (job->ctx_params.task_count <= 
 			job->ctx_params.node_count) 
 			? SLURM_DIST_CYCLIC : SLURM_DIST_BLOCK;
+		opt.distribution = job->ctx_params.task_dist;
 		break;
 
 	}
@@ -583,19 +775,22 @@ create_job_step(srun_job_t *job)
 		rc = slurm_get_errno();
 
 		if (opt.immediate ||
-		    ((rc != ESLURM_NODES_BUSY) && (rc != ESLURM_DISABLED))) {
+		    ((rc != ESLURM_NODES_BUSY) && (rc != ESLURM_PORTS_BUSY) &&
+		     (rc != ESLURM_PROLOG_RUNNING) && 
+		     (rc != ESLURM_DISABLED))) {
 			error ("Unable to create job step: %m");
 			return -1;
 		}
 		
 		if (i == 0) {
-			info("Job step creation temporarily disabled, retrying");
+			info("Job step creation temporarily disabled, "
+			     "retrying");
 			ointf  = xsignal(SIGINT,  _intr_handler);
 			otermf  = xsignal(SIGTERM, _intr_handler);
 			oquitf  = xsignal(SIGQUIT, _intr_handler);
 		} else
 			verbose("Job step creation still disabled, retrying");
-		sleep(MIN((i*10), 60));
+		sleep(MIN((i*10+1), 60));
 	}
 	if (i > 0) {
 		xsignal(SIGINT,  ointf);
@@ -609,7 +804,7 @@ create_job_step(srun_job_t *job)
 
 	slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_STEPID, &job->stepid);
 	/*  Number of hosts in job may not have been initialized yet if 
-	 *    --jobid was used or only SLURM_JOBID was set in user env.
+	 *    --jobid was used or only SLURM_JOB_ID was set in user env.
 	 *    Reset the value here just in case.
 	 */
 	slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_NUM_HOSTS,
@@ -623,3 +818,4 @@ create_job_step(srun_job_t *job)
 	return 0;
 }
 
+
diff --git a/src/srun/allocate.h b/src/srun/allocate.h
index 85672c5528384a10d90558f0541a10dbab120aa2..6b830e6e5e73c3ee2a058c9ce9dee1d4adb18961 100644
--- a/src/srun/allocate.h
+++ b/src/srun/allocate.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/srun/allocate.h - node allocation functions for srun
- * $Id: allocate.h 14570 2008-07-18 22:06:26Z da $
+ * $Id: allocate.h 17313 2009-04-21 20:28:06Z lipari $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -44,7 +45,6 @@
 #include "src/srun/srun_job.h"
 
 typedef struct slurmctld_communication_addr {
-	char *hostname;
 	uint16_t port;
 } slurmctld_comm_addr_t;
 
@@ -89,28 +89,21 @@ job_desc_msg_t * job_desc_msg_create_from_opts ();
 void job_desc_msg_destroy (job_desc_msg_t *j);
 
 /*
- * Check for SLURM_JOBID environment variable, and if it is a valid
+ * Check for SLURM_JOB_ID environment variable, and if it is a valid
  * jobid, return a pseudo allocation response pointer.
  *
- * Returns NULL if SLURM_JOBID is not present or is invalid.
+ * Returns NULL if SLURM_JOB_ID is not present or is invalid.
  */
 resource_allocation_response_msg_t * existing_allocation(void);
 
-/*
- * Return the jobid number stored in SLURM_JOBID env var
- *
- * Returns 0 if SLURM_JOBID is not set in current environment, or
- * is invalid.
- */
-uint32_t jobid_from_env(void);
-
 /*
  * Create a job step given the job information stored in 'j'
  * After returning, 'j' is filled in with information for job step.
+ * IN use_all_cpus - true to use every CPU allocated to the job
  *
  * Returns -1 if job step creation failure, 0 otherwise
  */
-int create_job_step(srun_job_t *j);
+int create_job_step(srun_job_t *j, bool use_all_cpus);
 
 /* set the job for debugging purpose */
 void set_allocate_job(srun_job_t *job);
diff --git a/src/srun/core-format.c b/src/srun/core-format.c
index 1edf3fe6a31067d9496bc36ad1412b61956d4bec..2677c903d12708d716c8cadb704600d3d2fac962 100644
--- a/src/srun/core-format.c
+++ b/src/srun/core-format.c
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/srun/core-format.c - Change corefile characteristics for job
- * $Id: core-format.c 13672 2008-03-19 23:10:58Z jette $
+ * $Id: core-format.c 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/core-format.h b/src/srun/core-format.h
index 696e522f39bd4fbbac17735bdfb753243f66e044..f4cf5c9fe7a86525156205aca5a7abc8b8ad7b1f 100644
--- a/src/srun/core-format.h
+++ b/src/srun/core-format.h
@@ -1,14 +1,15 @@
 /*****************************************************************************\
  * src/srun/core-format.h - Change corefile characteristics for job
- * $Id: core-format.h 13672 2008-03-19 23:10:58Z jette $
+ * $Id: core-format.h 16616 2009-02-20 17:00:27Z jette $
  *****************************************************************************
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/debugger.c b/src/srun/debugger.c
index e27b37e3c11b3d3e0d0ead0fb84efc7a3c89d480..77adfd68eb8b43226167826e4107917d9afb9055 100644
--- a/src/srun/debugger.c
+++ b/src/srun/debugger.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/debugger.h b/src/srun/debugger.h
index 6c34800df32dba673cfa9c17e034e332bd06fa6a..d401d6851d1c73c0ab29640e0552cb46faf23ec3 100644
--- a/src/srun/debugger.h
+++ b/src/srun/debugger.h
@@ -29,6 +29,11 @@
 #define VOLATILE
 #endif
 #endif
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
 #include "src/srun/srun_job.h"
 /*****************************************************************************
 *                                DEBUGGING SUPPORT                           *
@@ -42,11 +47,20 @@
  * DO NOT change the name of this structure or its fields. The debugger knows
  * them, and will be confused if you change them.
  */
+
+#ifdef HAVE_BG_FILES
+/* On bluegene systems the below structure is defined here.  So as to
+ * not confict with allocate.c including this file we will just use the
+ * definition there instead of defining it here. 
+ */
+# include "src/plugins/select/bluegene/wrap_rm_api.h"
+#else
 typedef struct {
   char * host_name;           /* Something we can pass to inet_addr */
   char * executable_name;     /* The name of the image */
   int    pid;		      /* The pid of the process */
 } MPIR_PROCDESC;
+#endif
 
 /* Array of procdescs for debugging purposes */
 extern MPIR_PROCDESC *MPIR_proctable;
diff --git a/src/srun/fname.c b/src/srun/fname.c
index 2b4093234fe0cd9da268a732abf719873e5a5e02..47e6ecc5a83ab57f81256275ad59b0c675072aad 100644
--- a/src/srun/fname.c
+++ b/src/srun/fname.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/fname.h b/src/srun/fname.h
index ff51145536480fe640e9d9897f199a4dca3e2e7d..574fb6852596e971b6e8b11dbf4d8f5a283ed7fa 100644
--- a/src/srun/fname.h
+++ b/src/srun/fname.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/multi_prog.c b/src/srun/multi_prog.c
index 91d09ce0ec0ac8eaf31a8b54e80029b1e2a3fbc5..4c3e6fbcac54362883d1c1de2575446117016b2f 100644
--- a/src/srun/multi_prog.c
+++ b/src/srun/multi_prog.c
@@ -11,10 +11,11 @@
  *  and
  *  Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/multi_prog.h b/src/srun/multi_prog.h
index e32d98efbd345ff0556cf517ede1e05317d30e7a..1405a14dad86ab7a900d95507e991d2a85ee521b 100644
--- a/src/srun/multi_prog.h
+++ b/src/srun/multi_prog.h
@@ -7,10 +7,11 @@
  *  and
  *  Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/opt.c b/src/srun/opt.c
index 9a0f044600824c81a17c23ee62d887493ccb3eec..c7ae97a64c5f6c2f80ceaa42ea7ff8a58ba0c548 100644
--- a/src/srun/opt.c
+++ b/src/srun/opt.c
@@ -2,13 +2,14 @@
  *  opt.c - options processing for srun
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -61,7 +62,6 @@
 #include <stdio.h>
 #include <stdlib.h>		/* getenv     */
 #include <pwd.h>		/* getpwuid   */
-#include <ctype.h>		/* isdigit    */
 #include <sys/param.h>		/* MAXPATHLEN */
 #include <sys/stat.h>
 #include <unistd.h>
@@ -71,22 +71,24 @@
 
 #include "src/common/list.h"
 #include "src/common/log.h"
+#include "src/common/mpi.h"
+#include "src/common/optz.h"
 #include "src/common/parse_time.h"
+#include "src/common/plugstack.h"
 #include "src/common/proc_args.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_interface.h"
+#include "src/common/slurm_rlimits_info.h"
+#include "src/common/slurm_resource_info.h"
 #include "src/common/uid.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/common/slurm_rlimits_info.h"
-#include "src/common/plugstack.h"
-#include "src/common/optz.h"
+
 #include "src/api/pmi_server.h"
 
 #include "src/srun/multi_prog.h"
 #include "src/srun/opt.h"
 #include "src/srun/debugger.h"
-#include "src/common/mpi.h"
 
 /* generic OPT_ definitions -- mainly for use with env vars  */
 #define OPT_NONE        0x00
@@ -97,6 +99,7 @@
 #define OPT_OVERCOMMIT  0x06
 #define OPT_CORE        0x07
 #define OPT_CONN_TYPE	0x08
+#define OPT_RESV_PORTS	0x09
 #define OPT_NO_ROTATE	0x0a
 #define OPT_GEOMETRY	0x0b
 #define OPT_MPI         0x0c
@@ -125,7 +128,8 @@
 #define LONG_OPT_UID         0x10a
 #define LONG_OPT_GID         0x10b
 #define LONG_OPT_MPI         0x10c
-#define LONG_OPT_CORE	     0x10e
+#define LONG_OPT_RESV_PORTS  0x10d
+#define LONG_OPT_CORE        0x10e
 #define LONG_OPT_DEBUG_TS    0x110
 #define LONG_OPT_CONNTYPE    0x111
 #define LONG_OPT_TEST_ONLY   0x113
@@ -142,7 +146,6 @@
 #define LONG_OPT_NICE        0x11e
 #define LONG_OPT_CPU_BIND    0x11f
 #define LONG_OPT_MEM_BIND    0x120
-#define LONG_OPT_CTRL_COMM_IFHN 0x121
 #define LONG_OPT_MULTI       0x122
 #define LONG_OPT_COMMENT     0x124
 #define LONG_OPT_SOCKETSPERNODE  0x130
@@ -164,10 +167,12 @@
 #define LONG_OPT_GET_USER_ENV    0x145
 #define LONG_OPT_PTY             0x146
 #define LONG_OPT_CHECKPOINT      0x147
-#define LONG_OPT_CHECKPOINT_PATH 0x148
+#define LONG_OPT_CHECKPOINT_DIR  0x148
 #define LONG_OPT_OPEN_MODE       0x149
 #define LONG_OPT_ACCTG_FREQ      0x14a
 #define LONG_OPT_WCKEY           0x14b
+#define LONG_OPT_RESERVATION     0x14c
+#define LONG_OPT_RESTART_DIR     0x14d
 
 /*---- global variables, defined in opt.h ----*/
 int _verbose;
@@ -203,13 +208,8 @@ static bool _opt_verify(void);
 static void _process_env_var(env_vars_t *e, const char *val);
 
 static bool  _under_parallel_debugger(void);
-
 static void  _usage(void);
 static bool  _valid_node_list(char **node_list_pptr);
-static int   _verify_cpu_bind(const char *arg, char **cpu_bind,
-			      cpu_bind_type_t *flags);
-static int   _verify_mem_bind(const char *arg, char **mem_bind,
-			      mem_bind_type_t *flags);
 
 /*---[ end forward declarations of static functions ]---------------------*/
 
@@ -271,273 +271,6 @@ static bool _valid_node_list(char **node_list_pptr)
 	return true;
 }
 
-/*
- * _isvalue
- * returns 1 is the argument appears to be a value, 0 otherwise
- */
-static int _isvalue(char *arg) {
-    	if (isdigit(*arg)) {	 /* decimal values and 0x... hex values */
-	    	return 1;
-	}
-
-	while (isxdigit(*arg)) { /* hex values not preceded by 0x */
-		arg++;
-	}
-	if (*arg == ',' || *arg == '\0') { /* end of field or string */
-	    	return 1;
-	}
-
-	return 0;	/* not a value */
-}
-
-/*
- * First clear all of the bits in "*data" which are set in "clear_mask".
- * Then set all of the bits in "*data" that are set in "set_mask".
- */
-static void clear_then_set(int *data, int clear_mask, int set_mask)
-{
-	*data &= ~clear_mask;
-	*data |= set_mask;
-}
-
-static void _print_cpu_bind_help()
-{
-	printf(
-"CPU bind options:\n"
-"    --cpu_bind=         Bind tasks to CPUs\n"
-"        q[uiet]         quietly bind before task runs (default)\n"
-"        v[erbose]       verbosely report binding before task runs\n"
-"        no[ne]          don't bind tasks to CPUs (default)\n"
-"        rank            bind by task rank\n"
-"        map_cpu:<list>  specify a CPU ID binding for each task\n"
-"                        where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n"
-"        mask_cpu:<list> specify a CPU ID binding mask for each task\n"
-"                        where <list> is <mask1>,<mask2>,...<maskN>\n"
-"        sockets         auto-generated masks bind to sockets\n"
-"        cores           auto-generated masks bind to cores\n"
-"        threads         auto-generated masks bind to threads\n"
-"        help            show this help message\n");
-}
-
-/*
- * verify cpu_bind arguments
- *
- * we support different launch policy names
- * we also allow a verbose setting to be specified
- *     --cpu_bind=threads
- *     --cpu_bind=cores
- *     --cpu_bind=sockets
- *     --cpu_bind=v
- *     --cpu_bind=rank,v
- *     --cpu_bind=rank
- *     --cpu_bind={MAP_CPU|MASK_CPU}:0,1,2,3,4
- *
- *
- * returns -1 on error, 0 otherwise
- */
-static int _verify_cpu_bind(const char *arg, char **cpu_bind, 
-			    cpu_bind_type_t *flags)
-{
-	char *buf, *p, *tok;
-	int bind_bits =
-		CPU_BIND_NONE|CPU_BIND_RANK|CPU_BIND_MAP|CPU_BIND_MASK;
-	int bind_to_bits =
-		CPU_BIND_TO_SOCKETS|CPU_BIND_TO_CORES|CPU_BIND_TO_THREADS;
-
-	if (arg == NULL) {
-	    	return 0;
-	}
-
-    	buf = xstrdup(arg);
-    	p = buf;
-	/* change all ',' delimiters not followed by a digit to ';'  */
-	/* simplifies parsing tokens while keeping map/mask together */
-	while (p[0] != '\0') {
-	    	if ((p[0] == ',') && (!_isvalue(&(p[1]))))
-			p[0] = ';';
-		p++;
-	}
-
-	p = buf;
-	while ((tok = strsep(&p, ";"))) {
-		if (strcasecmp(tok, "help") == 0) {
-			_print_cpu_bind_help();
-			return 1;
-		} else if ((strcasecmp(tok, "q") == 0) ||
-			   (strcasecmp(tok, "quiet") == 0)) {
-		        *flags &= ~CPU_BIND_VERBOSE;
-		} else if ((strcasecmp(tok, "v") == 0) ||
-			   (strcasecmp(tok, "verbose") == 0)) {
-		        *flags |= CPU_BIND_VERBOSE;
-		} else if ((strcasecmp(tok, "no") == 0) ||
-			   (strcasecmp(tok, "none") == 0)) {
-			clear_then_set((int *)flags, bind_bits, CPU_BIND_NONE);
-			xfree(*cpu_bind);
-		} else if (strcasecmp(tok, "rank") == 0) {
-			clear_then_set((int *)flags, bind_bits, CPU_BIND_RANK);
-			xfree(*cpu_bind);
-		} else if ((strncasecmp(tok, "map_cpu", 7) == 0) ||
-		           (strncasecmp(tok, "mapcpu", 6) == 0)) {
-			char *list;
-			list = strsep(&tok, ":=");
-			list = strsep(&tok, ":=");
-			clear_then_set((int *)flags, bind_bits, CPU_BIND_MAP);
-			xfree(*cpu_bind);
-			if (list && *list) {
-				*cpu_bind = xstrdup(list);
-			} else {
-				error("missing list for \"--cpu_bind=map_cpu:<list>\"");
-				xfree(buf);
-				return 1;
-			}
-		} else if ((strncasecmp(tok, "mask_cpu", 8) == 0) ||
-		           (strncasecmp(tok, "maskcpu", 7) == 0)) {
-			char *list;
-			list = strsep(&tok, ":=");
-			list = strsep(&tok, ":=");
-			clear_then_set((int *)flags, bind_bits, CPU_BIND_MASK);
-			xfree(*cpu_bind);
-			if (list && *list) {
-				*cpu_bind = xstrdup(list);
-			} else {
-				error("missing list for \"--cpu_bind=mask_cpu:<list>\"");
-				xfree(buf);
-				return 1;
-			}
-		} else if ((strcasecmp(tok, "socket") == 0) ||
-		           (strcasecmp(tok, "sockets") == 0)) {
-			clear_then_set((int *)flags, bind_to_bits,
-				       CPU_BIND_TO_SOCKETS);
-		} else if ((strcasecmp(tok, "core") == 0) ||
-		           (strcasecmp(tok, "cores") == 0)) {
-			clear_then_set((int *)flags, bind_to_bits,
-				       CPU_BIND_TO_CORES);
-		} else if ((strcasecmp(tok, "thread") == 0) ||
-		           (strcasecmp(tok, "threads") == 0)) {
-			clear_then_set((int *)flags, bind_to_bits,
-				       CPU_BIND_TO_THREADS);
-		} else {
-			error("unrecognized --cpu_bind argument \"%s\"", tok);
-			xfree(buf);
-			return 1;
-		}
-	}
-
-	xfree(buf);
-	return 0;
-}
-
-static void _print_mem_bind_help()
-{
-			printf(
-"Memory bind options:\n"
-"    --mem_bind=         Bind memory to locality domains (ldom)\n"
-"        q[uiet]         quietly bind before task runs (default)\n"
-"        v[erbose]       verbosely report binding before task runs\n"
-"        no[ne]          don't bind tasks to memory (default)\n"
-"        rank            bind by task rank\n"
-"        local           bind to memory local to processor\n"
-"        map_mem:<list>  specify a memory binding for each task\n"
-"                        where <list> is <cpuid1>,<cpuid2>,...<cpuidN>\n"
-"        mask_mem:<list> specify a memory binding mask for each tasks\n"
-"                        where <list> is <mask1>,<mask2>,...<maskN>\n"
-"        help            show this help message\n");
-}
-
-/*
- * verify mem_bind arguments
- *
- * we support different memory binding names
- * we also allow a verbose setting to be specified
- *     --mem_bind=v
- *     --mem_bind=rank,v
- *     --mem_bind=rank
- *     --mem_bind={MAP_MEM|MASK_MEM}:0,1,2,3,4
- *
- * returns -1 on error, 0 otherwise
- */
-static int _verify_mem_bind(const char *arg, char **mem_bind, 
-			    mem_bind_type_t *flags)
-{
-	char *buf, *p, *tok;
-	int bind_bits = MEM_BIND_NONE|MEM_BIND_RANK|MEM_BIND_LOCAL|
-		MEM_BIND_MAP|MEM_BIND_MASK;
-
-	if (arg == NULL) {
-	    	return 0;
-	}
-
-    	buf = xstrdup(arg);
-    	p = buf;
-	/* change all ',' delimiters not followed by a digit to ';'  */
-	/* simplifies parsing tokens while keeping map/mask together */
-	while (p[0] != '\0') {
-	    	if ((p[0] == ',') && (!_isvalue(&(p[1]))))
-			p[0] = ';';
-		p++;
-	}
-
-	p = buf;
-	while ((tok = strsep(&p, ";"))) {
-		if (strcasecmp(tok, "help") == 0) {
-			_print_mem_bind_help();
-			return 1;
-			
-		} else if ((strcasecmp(tok, "q") == 0) ||
-			   (strcasecmp(tok, "quiet") == 0)) {
-		        *flags &= ~MEM_BIND_VERBOSE;
-		} else if ((strcasecmp(tok, "v") == 0) ||
-			   (strcasecmp(tok, "verbose") == 0)) {
-		        *flags |= MEM_BIND_VERBOSE;
-		} else if ((strcasecmp(tok, "no") == 0) ||
-			   (strcasecmp(tok, "none") == 0)) {
-			clear_then_set((int *)flags, bind_bits, MEM_BIND_NONE);
-			xfree(*mem_bind);
-		} else if (strcasecmp(tok, "rank") == 0) {
-			clear_then_set((int *)flags, bind_bits, MEM_BIND_RANK);
-			xfree(*mem_bind);
-		} else if (strcasecmp(tok, "local") == 0) {
-			clear_then_set((int *)flags, bind_bits, MEM_BIND_LOCAL);
-			xfree(*mem_bind);
-		} else if ((strncasecmp(tok, "map_mem", 7) == 0) ||
-		           (strncasecmp(tok, "mapmem", 6) == 0)) {
-			char *list;
-			list = strsep(&tok, ":=");
-			list = strsep(&tok, ":=");
-			clear_then_set((int *)flags, bind_bits, MEM_BIND_MAP);
-			xfree(*mem_bind);
-			if (list && *list) {
-				*mem_bind = xstrdup(list);
-			} else {
-				error("missing list for \"--mem_bind=map_mem:<list>\"");
-				xfree(buf);
-				return 1;
-			}
-		} else if ((strncasecmp(tok, "mask_mem", 8) == 0) ||
-		           (strncasecmp(tok, "maskmem", 7) == 0)) {
-			char *list;
-			list = strsep(&tok, ":=");
-			list = strsep(&tok, ":=");
-			clear_then_set((int *)flags, bind_bits, MEM_BIND_MASK);
-			xfree(*mem_bind);
-			if (list && *list) {
-				*mem_bind = xstrdup(list);
-			} else {
-				error("missing list for \"--mem_bind=mask_mem:<list>\"");
-				xfree(buf);
-				return 1;
-			}
-		} else {
-			error("unrecognized --mem_bind argument \"%s\"", tok);
-			xfree(buf);
-			return 1;
-		}
-	}
-
-	xfree(buf);
-	return 0;
-}
-
 /*
  * print error message to stderr with opt.progname prepended
  */
@@ -607,13 +340,15 @@ static void _opt_default()
 	opt.time_limit_str = NULL;
 	opt.ckpt_interval = 0;
 	opt.ckpt_interval_str = NULL;
-	opt.ckpt_path = NULL;
+	opt.ckpt_dir = NULL;
+	opt.restart_dir = NULL;
 	opt.partition = NULL;
 	opt.max_threads = MAX_THREADS;
 	pmi_server_max_threads(opt.max_threads);
 
 	opt.relative = NO_VAL;
 	opt.relative_set = false;
+	opt.resv_port_cnt = NO_VAL;
 	opt.cmd_name = NULL;
 	opt.job_name = NULL;
 	opt.job_name_set_cmd = false;
@@ -649,6 +384,7 @@ static void _opt_default()
 	opt.quit_on_intr = false;
 	opt.disable_status = false;
 	opt.test_only   = false;
+	opt.preserve_env = false;
 
 	opt.quiet = 0;
 	_verbose = 0;
@@ -689,12 +425,11 @@ static void _opt_default()
 
 	opt.prolog = slurm_get_srun_prolog();
 	opt.epilog = slurm_get_srun_epilog();
+	opt.begin = (time_t)0;
 
 	opt.task_prolog     = NULL;
 	opt.task_epilog     = NULL;
 
-	opt.ctrl_comm_ifhn  = NULL;
-
 	/*
 	 * Reset some default values if running under a parallel debugger
 	 */
@@ -708,6 +443,7 @@ static void _opt_default()
 	opt.pty = false;
 	opt.open_mode = 0;
 	opt.acctg_freq = -1;
+	opt.reservation = NULL;
 	opt.wckey = NULL;
 }
 
@@ -744,7 +480,7 @@ env_vars_t env_vars[] = {
 {"SLURM_IMMEDIATE",     OPT_INT,        &opt.immediate,     NULL             },
 {"SLURM_JOB_NAME",      OPT_STRING,     &opt.job_name,      
 					&opt.job_name_set_env},
-{"SLURM_JOBID",         OPT_INT,        &opt.jobid,         NULL             },
+{"SLURM_JOB_ID",        OPT_INT,        &opt.jobid,         NULL             },
 {"SLURM_KILL_BAD_EXIT", OPT_INT,        &opt.kill_bad_exit, NULL             },
 {"SLURM_LABELIO",       OPT_INT,        &opt.labelio,       NULL             },
 {"SLURM_LINUX_IMAGE",   OPT_STRING,     &opt.linuximage,    NULL             },
@@ -754,6 +490,7 @@ env_vars_t env_vars[] = {
 {"SLURM_NSOCKETS_PER_NODE",OPT_NSOCKETS,NULL,               NULL             },
 {"SLURM_NCORES_PER_SOCKET",OPT_NCORES,  NULL,               NULL             },
 {"SLURM_NTHREADS_PER_CORE",OPT_NTHREADS,NULL,               NULL             },
+{"SLURM_NTASKS_PER_NODE", OPT_INT,      &opt.ntasks_per_node, NULL           },
 {"SLURM_NO_ROTATE",     OPT_NO_ROTATE,  NULL,               NULL             },
 {"SLURM_NPROCS",        OPT_INT,        &opt.nprocs,        &opt.nprocs_set  },
 {"SLURM_OVERCOMMIT",    OPT_OVERCOMMIT, NULL,               NULL             },
@@ -761,17 +498,18 @@ env_vars_t env_vars[] = {
 {"SLURM_RAMDISK_IMAGE", OPT_STRING,     &opt.ramdiskimage,  NULL             },
 {"SLURM_IOLOAD_IMAGE",  OPT_STRING,     &opt.ramdiskimage,  NULL             },
 {"SLURM_REMOTE_CWD",    OPT_STRING,     &opt.cwd,           NULL             },
+{"SLURM_RESV_PORTS",    OPT_RESV_PORTS, NULL,               NULL             },
 {"SLURM_STDERRMODE",    OPT_STRING,     &opt.efname,        NULL             },
 {"SLURM_STDINMODE",     OPT_STRING,     &opt.ifname,        NULL             },
 {"SLURM_STDOUTMODE",    OPT_STRING,     &opt.ofname,        NULL             },
 {"SLURM_THREADS",       OPT_INT,        &opt.max_threads,   NULL             },
 {"SLURM_TIMELIMIT",     OPT_STRING,     &opt.time_limit_str,NULL             },
 {"SLURM_CHECKPOINT",    OPT_STRING,     &opt.ckpt_interval_str, NULL         },
-{"SLURM_CHECKPOINT_PATH",OPT_STRING,    &opt.ckpt_path,     NULL             },
+{"SLURM_CHECKPOINT_DIR",OPT_STRING,     &opt.ckpt_dir,      NULL             },
+{"SLURM_RESTART_DIR",   OPT_STRING,     &opt.restart_dir ,  NULL             },
 {"SLURM_WAIT",          OPT_INT,        &opt.max_wait,      NULL             },
 {"SLURM_DISABLE_STATUS",OPT_INT,        &opt.disable_status,NULL             },
 {"SLURM_MPI_TYPE",      OPT_MPI,        NULL,               NULL             },
-{"SLURM_SRUN_COMM_IFHN",OPT_STRING,     &opt.ctrl_comm_ifhn,NULL             },
 {"SLURM_SRUN_MULTI",    OPT_MULTI,      NULL,               NULL             },
 {"SLURM_UNBUFFEREDIO",  OPT_INT,        &opt.unbuffered,    NULL             },
 {"SLURM_NODELIST",      OPT_STRING,     &opt.alloc_nodelist,NULL             },
@@ -826,8 +564,10 @@ _process_env_var(env_vars_t *e, const char *val)
 	case OPT_INT:
 		if (val != NULL) {
 			*((int *) e->arg) = (int) strtol(val, &end, 10);
-			if (!(end && *end == '\0')) 
-				error("%s=%s invalid. ignoring...", e->var, val);
+			if (!(end && *end == '\0')) {
+				error("%s=%s invalid. ignoring...", 
+				      e->var, val);
+			}
 		}
 		break;
 
@@ -843,14 +583,14 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	case OPT_CPU_BIND:
-		if (_verify_cpu_bind(val, &opt.cpu_bind,
-				     &opt.cpu_bind_type))
+		if (slurm_verify_cpu_bind(val, &opt.cpu_bind,
+					  &opt.cpu_bind_type))
 			exit(1);
 		break;
 
 	case OPT_MEM_BIND:
-		if (_verify_mem_bind(val, &opt.mem_bind,
-				     &opt.mem_bind_type))
+		if (slurm_verify_mem_bind(val, &opt.mem_bind,
+					  &opt.mem_bind_type))
 			exit(1);
 		break;
 
@@ -873,6 +613,13 @@ _process_env_var(env_vars_t *e, const char *val)
 		opt.shared = 0;
 		break;
 
+	case OPT_RESV_PORTS:
+		if (val)
+			opt.resv_port_cnt = strtol(val, NULL, 10);
+		else
+			opt.resv_port_cnt = 0;
+		break;
+
 	case OPT_OPEN_MODE:
 		if ((val[0] == 'a') || (val[0] == 'A'))
 			opt.open_mode = OPEN_MODE_APPEND;
@@ -954,6 +701,8 @@ static void set_options(const int argc, char **argv)
 		{"slurmd-debug",  required_argument, 0, 'd'},
 		{"chdir",         required_argument, 0, 'D'},
 		{"error",         required_argument, 0, 'e'},
+		{"preserve-env",  no_argument,       0, 'E'},
+		{"preserve-slurm-env", no_argument,  0, 'E'},
 		{"geometry",      required_argument, 0, 'g'},
 		{"hold",          no_argument,       0, 'H'},
 		{"input",         required_argument, 0, 'i'},
@@ -997,11 +746,10 @@ static void set_options(const int argc, char **argv)
 		{"mincores",         required_argument, 0, LONG_OPT_MINCORES},
 		{"minthreads",       required_argument, 0, LONG_OPT_MINTHREADS},
 		{"mem",              required_argument, 0, LONG_OPT_MEM},
-		{"job-mem",          required_argument, 0, LONG_OPT_MEM_PER_CPU},
-		{"task-mem",         required_argument, 0, LONG_OPT_MEM_PER_CPU},
 		{"mem-per-cpu",      required_argument, 0, LONG_OPT_MEM_PER_CPU},
 		{"hint",             required_argument, 0, LONG_OPT_HINT},
 		{"mpi",              required_argument, 0, LONG_OPT_MPI},
+		{"resv-ports",       optional_argument, 0, LONG_OPT_RESV_PORTS},
 		{"tmp",              required_argument, 0, LONG_OPT_TMP},
 		{"jobid",            required_argument, 0, LONG_OPT_JOBID},
 		{"msg-timeout",      required_argument, 0, LONG_OPT_TIMEO},
@@ -1024,7 +772,6 @@ static void set_options(const int argc, char **argv)
 		{"task-prolog",      required_argument, 0, LONG_OPT_TASK_PROLOG},
 		{"task-epilog",      required_argument, 0, LONG_OPT_TASK_EPILOG},
 		{"nice",             optional_argument, 0, LONG_OPT_NICE},
-		{"ctrl-comm-ifhn",   required_argument, 0, LONG_OPT_CTRL_COMM_IFHN},
 		{"multi-prog",       no_argument,       0, LONG_OPT_MULTI},
 		{"comment",          required_argument, 0, LONG_OPT_COMMENT},
 		{"sockets-per-node", required_argument, 0, LONG_OPT_SOCKETSPERNODE},
@@ -1044,14 +791,16 @@ static void set_options(const int argc, char **argv)
 		{"get-user-env",     optional_argument, 0, LONG_OPT_GET_USER_ENV},
 		{"pty",              no_argument,       0, LONG_OPT_PTY},
 		{"checkpoint",       required_argument, 0, LONG_OPT_CHECKPOINT},
-		{"checkpoint-path",  required_argument, 0, LONG_OPT_CHECKPOINT_PATH},
+		{"checkpoint-dir",   required_argument, 0, LONG_OPT_CHECKPOINT_DIR},
 		{"open-mode",        required_argument, 0, LONG_OPT_OPEN_MODE},
 		{"acctg-freq",       required_argument, 0, LONG_OPT_ACCTG_FREQ},
 		{"wckey",            required_argument, 0, LONG_OPT_WCKEY},
+		{"reservation",      required_argument, 0, LONG_OPT_RESERVATION},
+		{"restart-dir",      required_argument, 0, LONG_OPT_RESTART_DIR},
 		{NULL,               0,                 0, 0}
 	};
-	char *opt_string = "+aAbB:c:C:d:D:e:g:Hi:IjJ:kKlL:m:n:N:"
-		"o:Op:P:qQr:R:st:T:uU:vVw:W:x:XZ";
+	char *opt_string = "+aAbB:c:C:d:D:e:Eg:Hi:IjJ:kKlL:m:n:N:"
+		"o:Op:P:qQr:Rst:T:uU:vVw:W:x:XZ";
 
 	struct option *optz = spank_option_table_create (long_options);
 
@@ -1131,6 +880,9 @@ static void set_options(const int argc, char **argv)
 			else
 				opt.efname = xstrdup(optarg);
 			break;
+		case (int)'E':
+			opt.preserve_env = true;
+			break;
 		case (int)'g':
 			if (verify_geometry(optarg, opt.geometry))
 				exit(1);
@@ -1287,13 +1039,13 @@ static void set_options(const int argc, char **argv)
                         opt.shared = 0;
                         break;
                 case LONG_OPT_CPU_BIND:
-			if (_verify_cpu_bind(optarg, &opt.cpu_bind,
-					     &opt.cpu_bind_type))
+			if (slurm_verify_cpu_bind(optarg, &opt.cpu_bind,
+						  &opt.cpu_bind_type))
 				exit(1);
 			break;
 		case LONG_OPT_MEM_BIND:
-			if (_verify_mem_bind(optarg, &opt.mem_bind,
-					     &opt.mem_bind_type))
+			if (slurm_verify_mem_bind(optarg, &opt.mem_bind,
+						  &opt.mem_bind_type))
 				exit(1);
 			break;
 		case LONG_OPT_CORE:
@@ -1340,6 +1092,12 @@ static void set_options(const int argc, char **argv)
 				      optarg);
 			}
 			break;
+		case LONG_OPT_RESV_PORTS:
+			if (optarg)
+				opt.resv_port_cnt = strtol(optarg, NULL, 10);
+			else
+				opt.resv_port_cnt = 0;
+			break;
 		case LONG_OPT_TMP:
 			opt.job_min_tmp_disk = str_to_bytes(optarg);
 			if (opt.job_min_tmp_disk < 0) {
@@ -1464,10 +1222,6 @@ static void set_options(const int argc, char **argv)
 				}
 			}
 			break;
-		case LONG_OPT_CTRL_COMM_IFHN:
-			xfree(opt.ctrl_comm_ifhn);
-			opt.ctrl_comm_ifhn = xstrdup(optarg);
-			break;
 		case LONG_OPT_MULTI:
 			opt.multi_prog = true;
 			break;
@@ -1537,7 +1291,8 @@ static void set_options(const int argc, char **argv)
 			opt.reboot = true;
 			break;
 		case LONG_OPT_GET_USER_ENV:
-			error("--get-user-env is no longer supported in srun, use sbatch");
+			error("--get-user-env is no longer supported in srun, "
+			      "use sbatch");
 			break;
 		case LONG_OPT_PTY:
 #ifdef HAVE_PTY_H
@@ -1575,9 +1330,17 @@ static void set_options(const int argc, char **argv)
 			xfree(opt.wckey);
 			opt.wckey = xstrdup(optarg);
 			break;
-		case LONG_OPT_CHECKPOINT_PATH:
-			xfree(opt.ckpt_path);
-			opt.ckpt_path = xstrdup(optarg);
+		case LONG_OPT_RESERVATION:
+			xfree(opt.reservation);
+			opt.reservation = xstrdup(optarg);
+			break;
+		case LONG_OPT_CHECKPOINT_DIR:
+			xfree(opt.ckpt_dir);
+			opt.ckpt_dir = xstrdup(optarg);
+			break;
+		case LONG_OPT_RESTART_DIR:
+			xfree(opt.restart_dir);
+			opt.restart_dir = xstrdup(optarg);
 			break;
 		default:
 			if (spank_process_option (opt_char, optarg) < 0) {
@@ -1741,8 +1504,10 @@ static bool _opt_verify(void)
 	 *   these debug messages cause the generation of more
 	 *   debug messages ad infinitum)
 	 */
-	if (opt.slurmd_debug + LOG_LEVEL_ERROR > LOG_LEVEL_DEBUG2)
+	if (opt.slurmd_debug + LOG_LEVEL_ERROR > LOG_LEVEL_DEBUG2) {
 		opt.slurmd_debug = LOG_LEVEL_DEBUG2 - LOG_LEVEL_ERROR;
+		info("Using srun's max debug increment of %d", opt.slurmd_debug);
+	}
 
 	if (opt.quiet && _verbose) {
 		error ("don't specify both --verbose (-v) and --quiet (-Q)");
@@ -1837,21 +1602,50 @@ static bool _opt_verify(void)
 
 	/* check for realistic arguments */
 	if (opt.nprocs <= 0) {
-		error("%s: invalid number of processes (-n %d)",
-		      opt.progname, opt.nprocs);
+		error("invalid number of processes (-n %d)", opt.nprocs);
 		verified = false;
 	}
 
 	if (opt.cpus_per_task < 0) {
-		error("%s: invalid number of cpus per task (-c %d)\n",
-		      opt.progname, opt.cpus_per_task);
+		error("invalid number of cpus per task (-c %d)\n",
+		      opt.cpus_per_task);
 		verified = false;
 	}
 
 	if ((opt.min_nodes <= 0) || (opt.max_nodes < 0) || 
 	    (opt.max_nodes && (opt.min_nodes > opt.max_nodes))) {
-		error("%s: invalid number of nodes (-N %d-%d)\n",
-		      opt.progname, opt.min_nodes, opt.max_nodes);
+		error("invalid number of nodes (-N %d-%d)\n",
+		      opt.min_nodes, opt.max_nodes);
+		verified = false;
+	}
+
+#ifdef HAVE_BGL
+	if (opt.blrtsimage && strchr(opt.blrtsimage, ' ')) {
+		error("invalid BlrtsImage given '%s'", opt.blrtsimage);
+		verified = false;
+	}
+#endif
+
+	if (opt.linuximage && strchr(opt.linuximage, ' ')) {
+#ifdef HAVE_BGL
+		error("invalid LinuxImage given '%s'", opt.linuximage);
+#else
+		error("invalid CnloadImage given '%s'", opt.linuximage);
+#endif
+		verified = false;
+	}
+
+	if (opt.mloaderimage && strchr(opt.mloaderimage, ' ')) {
+		error("invalid MloaderImage given '%s'", opt.mloaderimage);
+		verified = false;
+	}
+
+	if (opt.ramdiskimage && strchr(opt.ramdiskimage, ' ')) {
+#ifdef HAVE_BGL
+		error("invalid RamDiskImage given '%s'", opt.ramdiskimage);
+#else
+		error("invalid IoloadImage given '%s'", opt.ramdiskimage);
+#endif
 		verified = false;
 	}
 
@@ -1864,7 +1658,8 @@ static bool _opt_verify(void)
 		 */
 		if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS |
 					   CPU_BIND_TO_CORES |
-					   CPU_BIND_TO_THREADS))) {
+					   CPU_BIND_TO_THREADS |
+					   CPU_BIND_TO_LDOMS))) {
 			opt.cpu_bind_type |= CPU_BIND_TO_CORES;
 		}
 	}
@@ -1876,7 +1671,8 @@ static bool _opt_verify(void)
 		 */
 		if (!(opt.cpu_bind_type & (CPU_BIND_TO_SOCKETS |
 					   CPU_BIND_TO_CORES |
-					   CPU_BIND_TO_THREADS))) {
+					   CPU_BIND_TO_THREADS |
+					   CPU_BIND_TO_LDOMS))) {
 			opt.cpu_bind_type |= CPU_BIND_TO_SOCKETS;
 		}
 	}
@@ -2018,8 +1814,8 @@ static bool _opt_verify(void)
 		}
 	}
 
-	if (! opt.ckpt_path)
-		opt.ckpt_path = xstrdup(opt.cwd);
+	if (! opt.ckpt_dir)
+		opt.ckpt_dir = xstrdup(opt.cwd);
 
 	if ((opt.euid != (uid_t) -1) && (opt.euid != opt.uid)) 
 		opt.uid = opt.euid;
@@ -2037,6 +1833,10 @@ static bool _opt_verify(void)
 		xfree(sched_name);
 	}
 
+	 if (slurm_verify_cpu_bind(NULL, &opt.cpu_bind,
+				   &opt.cpu_bind_type))
+		exit(1);
+
 	return verified;
 }
 
@@ -2112,6 +1912,7 @@ static void _opt_list()
 	info("partition      : %s",
 	     opt.partition == NULL ? "default" : opt.partition);
 	info("job name       : `%s'", opt.job_name);
+	info("reservation    : `%s'", opt.reservation);
 	info("wckey          : `%s'", opt.wckey);
 	info("distribution   : %s", format_task_dist_states(opt.distribution));
 	if(opt.distribution == SLURM_DIST_PLANE)
@@ -2134,7 +1935,9 @@ static void _opt_list()
 		info("time_limit     : %d", opt.time_limit);
 	if (opt.ckpt_interval)
 		info("checkpoint     : %d secs", opt.ckpt_interval);
-	info("checkpoint_path: %s", opt.ckpt_path);
+	info("checkpoint_dir : %s", opt.ckpt_dir);
+	if (opt.restart_dir)
+		info("restart_dir    : %s", opt.restart_dir);
 	info("wait           : %d", opt.max_wait);
 	if (opt.nice)
 		info("nice           : %d", opt.nice);
@@ -2155,6 +1958,7 @@ static void _opt_list()
 	xfree(str);
 	info("reboot         : %s", opt.reboot ? "no" : "yes");
 	info("rotate         : %s", opt.no_rotate ? "yes" : "no");
+	info("preserve_env   : %s", tf_(opt.preserve_env));
 	
 #ifdef HAVE_BGL
 	if (opt.blrtsimage)
@@ -2189,7 +1993,6 @@ static void _opt_list()
 	info("mail_user      : %s", opt.mail_user);
 	info("task_prolog    : %s", opt.task_prolog);
 	info("task_epilog    : %s", opt.task_epilog);
-	info("ctrl_comm_ifhn : %s", opt.ctrl_comm_ifhn);
 	info("multi_prog     : %s", opt.multi_prog ? "yes" : "no");
 	info("sockets-per-node  : %d - %d", opt.min_sockets_per_node,
 					    opt.max_sockets_per_node);
@@ -2201,6 +2004,8 @@ static void _opt_list()
 	info("ntasks-per-socket : %d", opt.ntasks_per_socket);
 	info("ntasks-per-core   : %d", opt.ntasks_per_core);
 	info("plane_size        : %u", opt.plane_size);
+	if (opt.resv_port_cnt != NO_VAL)
+		info("resv_port_cnt     : %d", opt.resv_port_cnt);
 	str = print_commandline(opt.argc, opt.argv);
 	info("remote command    : `%s'", str);
 	xfree(str);
@@ -2213,6 +2018,7 @@ static bool _under_parallel_debugger (void)
 	return (MPIR_being_debugged != 0);
 }
 
+
 static void _usage(void)
 {
  	printf(
@@ -2222,13 +2028,14 @@ static void _usage(void)
 "            [--share] [--label] [--unbuffered] [-m dist] [-J jobname]\n"
 "            [--jobid=id] [--verbose] [--slurmd_debug=#]\n"
 "            [--core=type] [-T threads] [-W sec] [--checkpoint=time]\n"
-"            [--checkpoint-path=dir]  [--licenses=names]\n"
+"            [--checkpoint-dir=dir]  [--licenses=names]\n"
+"            [--restart-dir=dir]\n"
 "            [--contiguous] [--mincpus=n] [--mem=MB] [--tmp=MB] [-C list]\n"
 "            [--mpi=type] [--account=name] [--dependency=type:jobid]\n"
 "            [--kill-on-bad-exit] [--propagate[=rlimits] [--comment=name]\n"
 "            [--cpu_bind=...] [--mem_bind=...] [--network=type]\n"
-"            [--ntasks-per-node=n] [--ntasks-per-socket=n]\n"
-"            [--ntasks-per-core=n] [--mem-per-cpu=MB]\n"
+"            [--ntasks-per-node=n] [--ntasks-per-socket=n] [reservation=name]\n"
+"            [--ntasks-per-core=n] [--mem-per-cpu=MB] [--preserve-env]\n"
 #ifdef HAVE_BG		/* Blue gene specific options */
 "            [--geometry=XxYxZ] [--conn-type=type] [--no-rotate] [--reboot]\n"
 #ifdef HAVE_BGL
@@ -2254,75 +2061,80 @@ static void _help(void)
 "Usage: srun [OPTIONS...] executable [args...]\n"
 "\n"
 "Parallel run options:\n"
-"  -n, --ntasks=ntasks         number of tasks to run\n"
-"  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
+"  -b, --batch                 submit as batch job for later execution\n"
+"      --begin=time            defer job until HH:MM DD/MM/YY\n"
 "  -c, --cpus-per-task=ncpus   number of cpus required per task\n"
-"      --ntasks-per-node=n     number of tasks to invoke on each node\n"
-"  -i, --input=in              location of stdin redirection\n"
-"  -o, --output=out            location of stdout redirection\n"
+"      --checkpoint=time       job step checkpoint interval\n"
+"      --checkpoint-dir=dir    directory to store job step checkpoint image \n"
+"                              files\n"
+"      --comment=name          arbitrary comment\n"
+"      --core=type             change default corefile format type\n"
+"                              (type=\"list\" to list of valid formats)\n"
+"  -d, --slurmd-debug=level    slurmd debug level\n"
+"  -D, --chdir=path            change remote current working directory\n"
 "  -e, --error=err             location of stderr redirection\n"
-"  -r, --relative=n            run job step relative to node n of allocation\n"
-"  -p, --partition=partition   partition requested\n"
+"      --epilog=program        run \"program\" after launching job step\n"
+"  -E, --preserve-env          env vars for node and task counts override\n"
+"                              command-line flags\n"
+"      --get-user-env          used by Moab.  See srun man page.\n"
 "  -H, --hold                  submit job in held state\n"
-"  -t, --time=minutes          time limit\n"
-"  -D, --chdir=path            change remote current working directory\n"
+"  -i, --input=in              location of stdin redirection\n"
 "  -I, --immediate             exit if resources are not immediately available\n"
-"  -O, --overcommit            overcommit resources\n"
+"      --jobid=id              run under already allocated job\n"
+"  -J, --job-name=jobname      name of job\n"
 "  -k, --no-kill               do not kill job on node failure\n"
 "  -K, --kill-on-bad-exit      kill the job if any task terminates with a\n"
 "                              non-zero exit code\n"
-"  -s, --share                 share nodes with other jobs\n"
 "  -l, --label                 prepend task number to lines of stdout/err\n"
-"  -u, --unbuffered            do not line-buffer stdout/err\n"
+"  -L, --licenses=names        required license, comma separated\n"
 "  -m, --distribution=type     distribution method for processes to nodes\n"
 "                              (type = block|cyclic|arbitrary)\n"
-"  -J, --job-name=jobname      name of job\n"
-"      --jobid=id              run under already allocated job\n"
-"      --mpi=type              type of MPI being used\n"
-"  -b, --batch                 submit as batch job for later execution\n"
-"  -T, --threads=threads       set srun launch fanout\n"
-"  -W, --wait=sec              seconds to wait after first task exits\n"
-"                              before killing job\n"
-"  -q, --quit-on-interrupt     quit on single Ctrl-C\n"
-"  -X, --disable-status        Disable Ctrl-C status feature\n"
-"  -v, --verbose               verbose mode (multiple -v's increase verbosity)\n"
-"  -Q, --quiet                 quiet mode (suppress informational messages)\n"
-"  -d, --slurmd-debug=level    slurmd debug level\n"
-"      --core=type             change default corefile format type\n"
-"                              (type=\"list\" to list of valid formats)\n"
-"  -P, --dependency=type:jobid defer job until condition on jobid is satisfied\n"
-"      --nice[=value]          decrease secheduling priority by value\n"
-"  -U, --account=name          charge job to specified account\n"
-"      --comment=name          arbitrary comment\n"
-"      --propagate[=rlimits]   propagate all [or specific list of] rlimits\n"
-"      --mpi=type              specifies version of MPI to use\n"
-"      --prolog=program        run \"program\" before launching job step\n"
-"      --epilog=program        run \"program\" after launching job step\n"
-"      --task-prolog=program   run \"program\" before launching task\n"
-"      --task-epilog=program   run \"program\" after launching task\n"
-"      --begin=time            defer job until HH:MM DD/MM/YY\n"
 "      --mail-type=type        notify on state change: BEGIN, END, FAIL or ALL\n"
-"      --mail-user=user        who to send email notification for job state changes\n"
-"      --ctrl-comm-ifhn=addr   interface hostname for PMI communications from srun\n"
+"      --mail-user=user        who to send email notification for job state\n"
+"                              changes\n"
+"      --mpi=type              type of MPI being used\n"
 "      --multi-prog            if set the program name specified is the\n"
 "                              configuration specification for multiple programs\n"
-"      --get-user-env          used by Moab.  See srun man page.\n"
-"  -L, --licenses=names        required license, comma separated\n"
-"      --checkpoint=time       job step checkpoint interval\n"
-"      --checkpoint-path=dir   path to store job step checkpoint image files\n"
+"  -n, --ntasks=ntasks         number of tasks to run\n"
+"      --nice[=value]          decrease secheduling priority by value\n"
+"      --ntasks-per-node=n     number of tasks to invoke on each node\n"
+"  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
+"  -o, --output=out            location of stdout redirection\n"
+"  -O, --overcommit            overcommit resources\n"
+"  -p, --partition=partition   partition requested\n"
+"      --prolog=program        run \"program\" before launching job step\n"
+"      --propagate[=rlimits]   propagate all [or specific list of] rlimits\n"
 #ifdef HAVE_PTY_H
 "      --pty                   run task zero in pseudo terminal\n"
 #endif
+"  -P, --dependency=type:jobid defer job until condition on jobid is satisfied\n"
+"  -q, --quit-on-interrupt     quit on single Ctrl-C\n"
+"  -Q, --quiet                 quiet mode (suppress informational messages)\n"
+"  -r, --relative=n            run job step relative to node n of allocation\n"
+"      --restart-dir=dir       directory of checkpoint image files to restart\n"
+"                              from\n"
+"  -s, --share                 share nodes with other jobs\n"
+"  -t, --time=minutes          time limit\n"
+"      --task-epilog=program   run \"program\" after launching task\n"
+"      --task-prolog=program   run \"program\" before launching task\n"
+"  -T, --threads=threads       set srun launch fanout\n"
+"  -u, --unbuffered            do not line-buffer stdout/err\n"
+"  -U, --account=name          charge job to specified account\n"
+"  -v, --verbose               verbose mode (multiple -v's increase verbosity)\n"
+"  -W, --wait=sec              seconds to wait after first task exits\n"
+"                              before killing job\n"
+"  -X, --disable-status        Disable Ctrl-C status feature\n"
 "\n"
 "Constraint options:\n"
+"  -C, --constraint=list       specify a list of constraints\n"
+"      --contiguous            demand a contiguous range of nodes\n"
 "      --mincpus=n             minimum number of cpus per node\n"
-"      --minsockets=n          minimum number of sockets per node\n"
 "      --mincores=n            minimum number of cores per cpu\n"
+"      --minsockets=n          minimum number of sockets per node\n"
 "      --minthreads=n          minimum number of threads per core\n"
 "      --mem=MB                minimum amount of real memory\n"
+"      --reservation=name      allocate resources from named reservation\n"
 "      --tmp=MB                minimum amount of temporary disk\n"
-"      --contiguous            demand a contiguous range of nodes\n"
-"  -C, --constraint=list       specify a list of constraints\n"
 "  -w, --nodelist=hosts...     request a specific list of hosts\n"
 "  -x, --exclude=hosts...      exclude a specific list of hosts\n"
 "  -Z, --no-allocate           don't allocate nodes (must supply -w)\n"
@@ -2333,74 +2145,73 @@ static void _help(void)
 "                              or don't share CPUs for job steps\n"
 "      --mem-per-cpu=MB        maximum amount of real memory per allocated\n"
 "                              CPU required by the job.\n" 
-"                              --mem >= --job-mem if --mem is specified.\n" 
+"                              --mem >= --mem-per-cpu if --mem is specified.\n" 
+"      --resv-ports            reserve communication ports\n" 
 "\n"
 "Affinity/Multi-core options: (when the task/affinity plugin is enabled)\n" 
-"  -B --extra-node-info=S[:C[:T]]            Expands to:\n"
-"      --sockets-per-node=S    number of sockets per node to allocate\n"
-"      --cores-per-socket=C    number of cores per socket to allocate\n"
-"      --threads-per-core=T    number of threads per core to allocate\n"
+"  -B  --extra-node-info=S[:C[:T]]            Expands to:\n"
+"       --sockets-per-node=S   number of sockets per node to allocate\n"
+"       --cores-per-socket=C   number of cores per socket to allocate\n"
+"       --threads-per-core=T   number of threads per core to allocate\n"
 "                              each field can be 'min[-max]' or wildcard '*'\n"
 "                              total cpus requested = (N x S x C x T)\n"
-"\n"
 "      --ntasks-per-socket=n   number of tasks to invoke on each socket\n"
 "      --ntasks-per-core=n     number of tasks to invoke on each core\n"
+"\n"
 "\n");
 	conf = slurm_conf_lock();
 	if (conf->task_plugin != NULL
 	    && strcasecmp(conf->task_plugin, "task/affinity") == 0) {
 		printf(
-"      --hint=                 Bind tasks according to application hints\n"
-"                              (see \"--hint=help\" for options)\n"
 "      --cpu_bind=             Bind tasks to CPUs\n"
 "                              (see \"--cpu_bind=help\" for options)\n"
+"      --hint=                 Bind tasks according to application hints\n"
+"                              (see \"--hint=help\" for options)\n"
 "      --mem_bind=             Bind memory to locality domains (ldom)\n"
 "                              (see \"--mem_bind=help\" for options)\n"
 			);
 	}
 	slurm_conf_unlock();
-	printf("\n");
 	spank_print_options (stdout, 6, 30);
-	printf("\n");
 
-        printf(
+	printf("\n"
 #ifdef HAVE_AIX				/* AIX/Federation specific options */
-		"AIX related options:\n"
-		"  --network=type              communication protocol to be used\n"
-		"\n"
+"AIX related options:\n"
+"  --network=type              communication protocol to be used\n"
+"\n"
 #endif
 
 #ifdef HAVE_BG				/* Blue gene specific options */
-		"Blue Gene related options:\n"
-		"  -g, --geometry=XxYxZ        geometry constraints of the job\n"
-		"  -R, --no-rotate             disable geometry rotation\n"
-		"      --reboot                reboot block before starting job\n"
-		"      --conn-type=type        constraint on type of connection, MESH or TORUS\n"
-		"                              if not set, then tries to fit TORUS else MESH\n"
+"Blue Gene related options:\n"
+"  -g, --geometry=XxYxZ        geometry constraints of the job\n"
+"  -R, --no-rotate             disable geometry rotation\n"
+"      --reboot                reboot block before starting job\n"
+"      --conn-type=type        constraint on type of connection, MESH or TORUS\n"
+"                              if not set, then tries to fit TORUS else MESH\n"
 #ifndef HAVE_BGL
-		"                              If wanting to run in HTC mode (only for 1\n"
-		"                              midplane and below).  You can use HTC_S for\n"
-		"                              SMP, HTC_D for Dual, HTC_V for\n"
-		"                              virtual node mode, and HTC_L for Linux mode.\n" 
-                "      --cnload-image=path     path to compute node image for bluegene block.  Default if not set\n"
-                "      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
-                "      --ioload-image=path     path to ioload image for bluegene block.  Default if not set\n"
+"                              If wanting to run in HTC mode (only for 1\n"
+"                              midplane and below).  You can use HTC_S for\n"
+"                              SMP, HTC_D for Dual, HTC_V for\n"
+"                              virtual node mode, and HTC_L for Linux mode.\n" 
+"      --cnload-image=path     path to compute node image for bluegene block.  Default if not set\n"
+"      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
+"      --ioload-image=path     path to ioload image for bluegene block.  Default if not set\n"
 #else
-                "      --blrts-image=path      path to blrts image for bluegene block.  Default if not set\n"
-                "      --linux-image=path      path to linux image for bluegene block.  Default if not set\n"
-                "      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
-                "      --ramdisk-image=path    path to ramdisk image for bluegene block.  Default if not set\n"
+"      --blrts-image=path      path to blrts image for bluegene block.  Default if not set\n"
+"      --linux-image=path      path to linux image for bluegene block.  Default if not set\n"
+"      --mloader-image=path    path to mloader image for bluegene block.  Default if not set\n"
+"      --ramdisk-image=path    path to ramdisk image for bluegene block.  Default if not set\n"
 #endif
 #endif
-		"\n"
-		"Help options:\n"
-		"      --help                  show this help message\n"
-		"      --usage                 display brief usage message\n"
-		"      --print-request         Display job's layout without scheduling it\n"
-		"\n"
-		"Other options:\n"
-		"  -V, --version               output version information and exit\n"
-		"\n"
+"\n"
+"Help options:\n"
+"      --help                  show this help message\n"
+"      --usage                 display brief usage message\n"
+"      --print-request         Display job's layout without scheduling it\n"
+"\n"
+"Other options:\n"
+"  -V, --version               output version information and exit\n"
+"\n"
 		);
 
 }
diff --git a/src/srun/opt.h b/src/srun/opt.h
index daca2b85cb2fb67c4454345c042637b200f1d56e..01dcfde4480ec9f0f00a93b539b2b944d9b146a4 100644
--- a/src/srun/opt.h
+++ b/src/srun/opt.h
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  *  opt.h - definitions for srun option processing
- *  $Id: opt.h 15808 2008-12-02 23:38:47Z da $
+ *  $Id: opt.h 16867 2009-03-12 16:35:42Z jette $
  *****************************************************************************
- *  Copyright (C) 2002-2006 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -111,8 +113,9 @@ typedef struct srun_options {
 	char *time_limit_str;	/* --time,   -t (string)	*/
 	int  ckpt_interval;	/* --checkpoint (int minutes)	*/
 	char *ckpt_interval_str;/* --checkpoint (string)	*/
-	char *ckpt_path;	/* --checkpoint-path (string)   */
+	char *ckpt_dir;  	/* --checkpoint-dir (string)   */
 	bool exclusive;		/* --exclusive			*/
+	int  resv_port_cnt;	/* --resv_ports			*/
 	char *partition;	/* --partition=n,   -p n   	*/
 	enum task_dist_states
 	        distribution;	/* --distribution=, -m dist	*/
@@ -165,6 +168,7 @@ typedef struct srun_options {
 	char *task_epilog;	/* --task-epilog=		*/
 	char *task_prolog;	/* --task-prolog=		*/
 	char *licenses;		/* --licenses, -L		*/
+	bool preserve_env;	/* --preserve-env		*/
 
 	/* constraint options */
 	int32_t job_min_cpus;	/* --mincpus=n			*/
@@ -203,13 +207,14 @@ typedef struct srun_options {
 	time_t begin;		/* --begin			*/
 	uint16_t mail_type;	/* --mail-type			*/
 	char *mail_user;	/* --mail-user			*/
-	char *ctrl_comm_ifhn;	/* --ctrl-comm-ifhn		*/
 	uint8_t open_mode;	/* --open-mode=append|truncate	*/
 	int acctg_freq;		/* --acctg-freq=secs		*/
 	bool pty;		/* --pty			*/
+	char *restart_dir;	/* --restart                    */
 	int argc;		/* length of argv array		*/
 	char **argv;		/* left over on command line	*/
 	char *wckey;            /* --wckey workload characterization key */
+	char *reservation;      /* --reservation		*/
 } opt_t;
 
 extern opt_t opt;
diff --git a/src/srun/srun.c b/src/srun/srun.c
index 1f0fc7f1b716ca64aa8a7f96fe28eda81388d540..b168cf8399d4b96b3750db1ec83e8e8ca3e49727 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -3,13 +3,14 @@
  *	parallel jobs.
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -68,7 +69,6 @@
 #include <signal.h>
 #include <termios.h>
 #include <unistd.h>
-#include <fcntl.h>
 #include <grp.h>
 
 
@@ -93,6 +93,7 @@
 #include "src/srun/srun.h"
 #include "src/srun/srun_pty.h"
 #include "src/srun/multi_prog.h"
+#include "src/srun/task_state.h"
 #include "src/api/pmi_server.h"
 #include "src/api/step_launch.h"
 
@@ -102,6 +103,13 @@
 #  endif
 #endif /* defined HAVE_DECL_STRSIGNAL && !HAVE_DECL_STRSIGNAL */
 
+#ifndef OPEN_MPI_PORT_ERROR
+/* This exit code indicates the launched Open MPI tasks could 
+ *	not open the reserved port. It was already open by some
+ *	other process. */
+#define OPEN_MPI_PORT_ERROR 108
+#endif
+
 #define MAX_RETRIES 20
 #define MAX_ENTRIES 50
 
@@ -113,13 +121,12 @@ mpi_plugin_client_info_t mpi_job_info[1];
 static struct termios termdefaults;
 uint32_t global_rc = 0;
 srun_job_t *job = NULL;
+task_state_t task_state;
 
-struct {
-	bitstr_t *start_success;
-	bitstr_t *start_failure;
-	bitstr_t *finish_normal;
-	bitstr_t *finish_abnormal;
-} task_state;
+#define MAX_STEP_RETRIES 4
+time_t launch_start_time;
+bool   retry_step_begin = false;
+int    retry_step_cnt = 0;
 
 /*
  * forward declaration of static funcs
@@ -145,9 +152,6 @@ static int   _set_umask_env(void);
 static int   _slurm_debug_env_val (void);
 static void  _task_start(launch_tasks_response_msg_t *msg);
 static void  _task_finish(task_exit_msg_t *msg);
-static void  _task_state_struct_init(int num_tasks);
-static void  _task_state_struct_print(void);
-static void  _task_state_struct_free(void);
 static char *_uint16_array_to_str(int count, const uint16_t *array);
 
 int srun(int ac, char **av)
@@ -167,7 +171,7 @@ int srun(int ac, char **av)
 	env->nodeid = -1;
 	env->cli = NULL;
 	env->env = NULL;
-	env->ckpt_path = NULL;
+	env->ckpt_dir = NULL;
 
 	debug_level = _slurm_debug_env_val();
 	logopt.stderr_level += debug_level;
@@ -198,6 +202,9 @@ int srun(int ac, char **av)
 		exit (1);
 	}
 	record_ppid();
+
+	if (spank_init_post_opt() < 0)
+		fatal("Plugin stack post-option processing failed.");
 	
 	/* reinit log with new verbosity (if changed by command line)
 	 */
@@ -234,7 +241,7 @@ int srun(int ac, char **av)
 	} else if (opt.no_alloc) {
 		info("do not allocate resources");
 		job = job_create_noalloc(); 
-		if (create_job_step(job) < 0) {
+		if (create_job_step(job, false) < 0) {
 			exit(1);
 		}
 	} else if ((resp = existing_allocation())) {
@@ -248,7 +255,11 @@ int srun(int ac, char **av)
 		job = job_step_create_allocation(resp);
 		slurm_free_resource_allocation_response_msg(resp);
 
-		if (!job || create_job_step(job) < 0)
+		if (opt.begin != 0)
+			error("--begin is ignored because nodes"
+				" are already allocated.");
+
+		if (!job || create_job_step(job, false) < 0)
 			exit(1);
 	} else {
 		/* Combined job allocation and job step launch */
@@ -273,8 +284,8 @@ int srun(int ac, char **av)
 			/* use SLURM_JOB_NAME env var */
 			opt.job_name_set_cmd = true;
 		}
-		if (!job || create_job_step(job) < 0) {
-			slurm_complete_job(job->jobid, 1);
+		if (!job || create_job_step(job, true) < 0) {
+			slurm_complete_job(resp->job_id, 1);
 			exit(1);
 		}
 		
@@ -290,7 +301,6 @@ int srun(int ac, char **av)
 	/*
 	 *  Enhance environment for job
 	 */
-	env->nprocs = opt.nprocs;
 	env->cpus_per_task = opt.cpus_per_task;
 	if (opt.ntasks_per_node != NO_VAL)
 		env->ntasks_per_node = opt.ntasks_per_node;
@@ -309,14 +319,13 @@ int srun(int ac, char **av)
 	env->slurmd_debug = opt.slurmd_debug;
 	env->labelio = opt.labelio;
 	env->comm_port = slurmctld_comm_addr.port;
-	env->comm_hostname = slurmctld_comm_addr.hostname;
+	env->batch_flag = 0;
 	if (job) {
 		uint16_t *tasks = NULL;
 		slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_TASKS, 
 				   &tasks);
 
 		env->select_jobinfo = job->select_jobinfo;
-		env->nhosts = job->nhosts;
 		env->nodelist = job->nodelist;
 		env->task_count = _uint16_array_to_str(
 			job->nhosts, tasks);
@@ -346,7 +355,8 @@ int srun(int ac, char **av)
 	xfree(env->task_count);
 	xfree(env);
 	
-	_task_state_struct_init(opt.nprocs);
+ re_launch:
+	task_state = task_state_create(opt.nprocs);
 	slurm_step_launch_params_t_init(&launch_params);
 	launch_params.gid = opt.gid;
 	launch_params.argc = opt.argc;
@@ -369,15 +379,14 @@ int srun(int ac, char **av)
 	if (opt.acctg_freq >= 0)
 		launch_params.acctg_freq = opt.acctg_freq;
 	launch_params.pty = opt.pty;
-	launch_params.max_sockets     = opt.max_sockets_per_node;
-	launch_params.max_cores       = opt.max_cores_per_socket;
-	launch_params.max_threads     = opt.max_threads_per_core;
-	launch_params.cpus_per_task = opt.cpus_per_task;
-	launch_params.ntasks_per_node   = opt.ntasks_per_node;
-	launch_params.ntasks_per_socket = opt.ntasks_per_socket;
-	launch_params.ntasks_per_core   = opt.ntasks_per_core;
-	launch_params.ckpt_path = xstrdup(opt.ckpt_path);
-
+	launch_params.max_sockets	= opt.max_sockets_per_node;
+	launch_params.max_cores		= opt.max_cores_per_socket;
+	launch_params.max_threads	= opt.max_threads_per_core;
+	launch_params.cpus_per_task	= opt.cpus_per_task;
+	launch_params.task_dist         = opt.distribution;
+	launch_params.ckpt_dir		= opt.ckpt_dir;
+	launch_params.restart_dir       = opt.restart_dir;
+	launch_params.preserve_env      = opt.preserve_env;
 	/* job structure should now be filled in */
 	_setup_signals();
 
@@ -403,8 +412,9 @@ int srun(int ac, char **av)
 	}
 
 	update_job_state(job, SRUN_JOB_LAUNCHING);
-	if (slurm_step_launch(job->step_ctx, slurmctld_comm_addr.hostname, 
-	    &launch_params, &callbacks) != SLURM_SUCCESS) {
+	launch_start_time = time(NULL);
+	if (slurm_step_launch(job->step_ctx, &launch_params, &callbacks) != 
+	    SLURM_SUCCESS) {
 		error("Application launch failed: %m");
 		global_rc = 1;
 		goto cleanup;
@@ -414,7 +424,7 @@ int srun(int ac, char **av)
 	if (slurm_step_launch_wait_start(job->step_ctx) == SLURM_SUCCESS) {
 		update_job_state(job, SRUN_JOB_RUNNING);
 		/* Only set up MPIR structures if the step launched
-		   correctly. */
+		 * correctly. */
 		if (opt.multi_prog)
 			mpir_set_multi_name(job->ctx_params.task_count,
 					    launch_params.argv[0]);
@@ -430,32 +440,65 @@ int srun(int ac, char **av)
 	}
 
 	slurm_step_launch_wait_finish(job->step_ctx);
+	if (retry_step_begin && (retry_step_cnt < MAX_STEP_RETRIES)) {
+		retry_step_begin = false;
+		slurm_step_ctx_destroy(job->step_ctx);
+		if (got_alloc) {
+			if (create_job_step(job, true) < 0)
+				exit(1);
+		} else {
+			if (create_job_step(job, false) < 0)
+				exit(1);
+		}
+		task_state_destroy(task_state);
+		goto re_launch;
+	}
 
 cleanup:
 	if(got_alloc) {
 		cleanup_allocation();
-		slurm_complete_job(job->jobid, global_rc);
+
+		/* send the controller we were cancelled */
+		if (job->state >= SRUN_JOB_CANCELLED)
+			slurm_complete_job(job->jobid, NO_VAL);
+		else
+			slurm_complete_job(job->jobid, global_rc);
 	}
+
 	_run_srun_epilog(job);
 	slurm_step_ctx_destroy(job->step_ctx);
 	mpir_cleanup();
-	_task_state_struct_free();
+	task_state_destroy(task_state);
 	log_fini();
 
+	if (WIFEXITED(global_rc))
+		global_rc = WEXITSTATUS(global_rc);
 	return (int)global_rc;
 }
 
+static slurm_step_layout_t *
+_get_slurm_step_layout(srun_job_t *job)
+{
+	job_step_create_response_msg_t *resp;
+
+	if (!job || !job->step_ctx)
+		return (NULL);
+
+	slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_RESP, &resp);
+	if (!resp)
+	    return (NULL);
+	return (resp->step_layout);
+}
+
 static int _call_spank_local_user (srun_job_t *job)
 {
 	struct spank_launcher_job_info info[1];
-	job_step_create_response_msg_t *step_resp;
 
 	info->uid = opt.uid;
 	info->gid = opt.gid;
 	info->jobid = job->jobid;
 	info->stepid = job->stepid;
-	slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_RESP, &step_resp);
-	info->step_layout = step_resp->step_layout;
+	info->step_layout = _get_slurm_step_layout(job);
 	info->argc = opt.argc;
 	info->argv = opt.argv;
 
@@ -601,7 +644,7 @@ static void _set_cpu_env_var(resource_allocation_response_msg_t *resp)
 	if (getenv("SLURM_JOB_CPUS_PER_NODE"))
 		return;
 
-	tmp = uint32_compressed_to_str((uint32_t)resp->num_cpu_groups,
+	tmp = uint32_compressed_to_str(resp->num_cpu_groups,
 				       resp->cpus_per_node,
 				       resp->cpu_count_reps);
 	if (setenvf(NULL, "SLURM_JOB_CPUS_PER_NODE", "%s", tmp) < 0)
@@ -746,7 +789,7 @@ static int _run_srun_script (srun_job_t *job, char *script)
 		if (waitpid(cpid, &status, 0) < 0) {
 			if (errno == EINTR)
 				continue;
-			error("waidpid: %m");
+			error("waitpid: %m");
 			return 0;
 		} else
 			return status;
@@ -799,14 +842,10 @@ _set_stdio_fds(srun_job_t *job, slurm_step_io_fds_t *cio_fds)
 				fatal("Could not open stdin file: %m");
 		}
 		if (job->ifname->type == IO_ONE) {
-			job_step_create_response_msg_t *step_resp = NULL;
-			
-			slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_RESP,
-					   &step_resp);
-		
 			cio_fds->in.taskid = job->ifname->taskid;
 			cio_fds->in.nodeid = slurm_step_layout_host_id(
-				step_resp->step_layout, job->ifname->taskid);
+				_get_slurm_step_layout(job),
+				job->ifname->taskid);
 		}
 	}
 
@@ -902,9 +941,9 @@ _task_start(launch_tasks_response_msg_t *msg)
 		table->pid = msg->local_pids[i];
 
 		if (msg->return_code == 0) {
-			bit_set(task_state.start_success, taskid);
+			task_state_update(task_state, taskid, TS_START_SUCCESS);
 		} else {
-			bit_set(task_state.start_failure, taskid);
+			task_state_update(task_state, taskid, TS_START_FAILURE);
 		}
 	}
 
@@ -918,6 +957,7 @@ _terminate_job_step(slurm_step_ctx_t *step_ctx)
 	slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_JOBID, &job_id);
 	slurm_step_ctx_get(step_ctx, SLURM_STEP_CTX_STEPID, &step_id);
 	info("Terminating job step %u.%u", job_id, step_id);
+	update_job_state(job, SRUN_JOB_CANCELLED);
 	slurm_kill_job_step(job_id, step_id, SIGKILL);
 }
 
@@ -925,199 +965,221 @@ static void
 _handle_max_wait(int signo)
 {
 	info("First task exited %ds ago", opt.max_wait);
-	_task_state_struct_print();
+	task_state_print(task_state, (log_f) info);
 	_terminate_job_step(job->step_ctx);
 }
 
 static char *
-_taskids_to_nodelist(bitstr_t *tasks_exited)
+_hostset_to_string(hostset_t hs)
+{
+	size_t n = 1024;
+	size_t maxsize = 1024*64;
+	char *str = NULL;
+
+	do {
+		str = xrealloc(str, n);
+	} while (hostset_ranged_string(hs, n*=2, str) < 0 && (n < maxsize));
+
+	/*
+	 *  If string was truncated, indicate this with a '+' suffix.
+	 */
+	if (n >= maxsize)
+		strcpy(str + (maxsize - 2), "+");
+
+	return str;
+}
+
+/* Convert an array of task IDs into a list of host names
+ * RET: the string, caller must xfree() this value */ 
+static char *
+_task_ids_to_host_list(int ntasks, uint32_t taskids[])
 {
 	int i;
-	char *hostname, *hostlist_str;
-	hostlist_t hostlist;
-	job_step_create_response_msg_t *step_resp;
-	slurm_step_layout_t *step_layout;
-
-	if (!job->step_ctx) {
-		error("No step_ctx");
-		hostlist_str = xstrdup("Unknown");
-		return hostlist_str;
+	hostset_t hs;
+	char *hosts;
+	slurm_step_layout_t *sl;
+
+	if ((sl = _get_slurm_step_layout(job)) == NULL)
+		return (xstrdup("Unknown"));
+
+	hs = hostset_create(NULL);
+	for (i = 0; i < ntasks; i++) {
+		char *host = slurm_step_layout_host_name(sl, taskids[i]);
+		if (host) {
+			hostset_insert(hs, host);
+			free(host);
+		} else {
+			error("Could not identify host name for task %u",
+			      taskids[i]);
+		}
 	}
 
-	slurm_step_ctx_get(job->step_ctx, SLURM_STEP_CTX_RESP, &step_resp);
-	step_layout = step_resp->step_layout;
-	hostlist = hostlist_create(NULL);
-	for (i=0; i<job->ntasks; i++) {
-		if (!bit_test(tasks_exited, i))
-			continue;
-		hostname = slurm_step_layout_host_name(step_layout, i);
-		hostlist_push(hostlist, hostname);
+	hosts = _hostset_to_string(hs);
+	hostset_destroy(hs);
+
+	return (hosts);
+}
+
+/* Convert an array of task IDs into a string.
+ * RET: the string, caller must xfree() this value
+ * NOTE: the taskids array is not necessarily in numeric order, 
+ *       so we use existing bitmap functions to format */
+static char *
+_task_array_to_string(int ntasks, uint32_t taskids[])
+{
+	bitstr_t *tasks_bitmap = NULL;
+	char *str;
+	int i;
+
+	tasks_bitmap = bit_alloc(job->ntasks);
+	if (!tasks_bitmap)
+		fatal("bit_alloc: memory allocation failure");
+	for (i=0; i<ntasks; i++)
+		bit_set(tasks_bitmap, taskids[i]);
+	str = xmalloc(2048);
+	bit_fmt(str, 2048, tasks_bitmap);
+	bit_free(tasks_bitmap);
+
+	return str;
+}
+
+static void
+_update_task_exit_state(uint32_t ntasks, uint32_t taskids[], int abnormal)
+{
+	int i;
+	task_state_type_t t = abnormal ? TS_ABNORMAL_EXIT : TS_NORMAL_EXIT;
+
+	for (i = 0; i < ntasks; i++)
+		task_state_update(task_state, taskids[i], t);
+}
+
+static int _kill_on_bad_exit(void)
+{
+	return (opt.kill_bad_exit || slurm_get_kill_on_bad_exit());
+}
+
+static void _setup_max_wait_timer(void)
+{
+	/*  If these are the first tasks to finish we need to
+	 *   start a timer to kill off the job step if the other
+	 *   tasks don't finish within opt.max_wait seconds.
+	 */
+	verbose("First task exited. Terminating job in %ds.", opt.max_wait);
+	xsignal(SIGALRM, _handle_max_wait);
+	alarm(opt.max_wait);
+}
+
+static const char *
+_taskstr(int n)
+{
+	if (n == 1)
+		return "task";
+	else
+		return "tasks";
+}
+
+static int
+_is_openmpi_port_error(int errcode)
+{
+	if (errcode != OPEN_MPI_PORT_ERROR)
+		return 0;
+	if (opt.resv_port_cnt == NO_VAL)
+		return 0;
+	if (difftime(time(NULL), launch_start_time) > slurm_get_msg_timeout())
+		return 0;
+	return 1;
+}
+
+static void
+_handle_openmpi_port_error(const char *tasks, const char *hosts)
+{
+	char *msg = "retrying";
+
+	if (!retry_step_begin) {
+		retry_step_begin = true;
+		retry_step_cnt++;
+	}
+	if (retry_step_cnt >= MAX_STEP_RETRIES) {
+		msg = "aborting";
+		opt.kill_bad_exit = true;
 	}
-	hostlist_uniq(hostlist);
-	hostlist_str = xmalloc(2048);
-	hostlist_ranged_string(hostlist, 2048, hostlist_str);
-	hostlist_destroy(hostlist);
-	return hostlist_str;
+	error("%s: tasks %s unable to claim reserved port, %s.",
+	      hosts, tasks, msg);
 }
 
 static void
 _task_finish(task_exit_msg_t *msg)
 {
-	bitstr_t *tasks_exited = NULL;
-	char buf[65536], *core_str = "", *msg_str, *node_list = NULL;
-	static bool first_done = true;
-	static bool first_error = true;
+	char *tasks;
+	char *hosts;
 	uint32_t rc = 0;
-	int i;
+	int normal_exit = 0;
+
+	const char *task_str = _taskstr(msg->num_tasks);
+
+	verbose("Received task exit notification for %d %s (status=0x%04x).",
+	      msg->num_tasks, task_str, msg->return_code);
+
+	tasks = _task_array_to_string(msg->num_tasks, msg->task_id_list);
+	hosts = _task_ids_to_host_list(msg->num_tasks, msg->task_id_list);
 
-	verbose("%u tasks finished (rc=%u)",
-		msg->num_tasks, msg->return_code);
-	tasks_exited = bit_alloc(job->ntasks);
-	for (i=0; i<msg->num_tasks; i++)
-		bit_set(tasks_exited,  msg->task_id_list[i]);
-	bit_fmt(buf, sizeof(buf), tasks_exited);
 	if (WIFEXITED(msg->return_code)) {
-		rc = WEXITSTATUS(msg->return_code);
-		if (rc != 0) {
-			bit_or(task_state.finish_abnormal, tasks_exited);
-			node_list = _taskids_to_nodelist(tasks_exited);
-			error("%s: task %s: Exited with exit code %d", 
-			      node_list, buf, rc);
-		} else {
-			bit_or(task_state.finish_normal, tasks_exited);
-			verbose("task %s: Completed", buf);
+		if ((rc = WEXITSTATUS(msg->return_code)) == 0) {
+			verbose("%s: %s %s: Completed", hosts, task_str, tasks);
+			normal_exit = 1;
 		}
-	} else if (WIFSIGNALED(msg->return_code)) {
-		bit_or(task_state.finish_abnormal, tasks_exited);
-		msg_str = strsignal(WTERMSIG(msg->return_code));
+		else if (_is_openmpi_port_error(rc))
+			_handle_openmpi_port_error(tasks, hosts);
+		else
+			error("%s: %s %s: Exited with exit code %d",
+			      hosts, task_str, tasks, rc);
+		if (!WIFEXITED(global_rc) || (rc > WEXITSTATUS(global_rc)))
+			global_rc = msg->return_code;
+	}
+	else if (WIFSIGNALED(msg->return_code)) {
+		const char *signal_str = strsignal(WTERMSIG(msg->return_code));
+		char * core_str = "";
 #ifdef WCOREDUMP
 		if (WCOREDUMP(msg->return_code))
 			core_str = " (core dumped)";
 #endif
-		node_list = _taskids_to_nodelist(tasks_exited);
 		if (job->state >= SRUN_JOB_CANCELLED) {
-			rc = NO_VAL;
-			verbose("%s: task %s: %s%s", 
-				node_list, buf, msg_str, core_str);
+			verbose("%s: %s %s: %s%s",
+				hosts, task_str, tasks, signal_str, core_str);
 		} else {
 			rc = msg->return_code;
-			error("%s: task %s: %s%s", 
-			      node_list, buf, msg_str, core_str);
+			error("%s: %s %s: %s%s",
+			      hosts, task_str, tasks, signal_str, core_str);
 		}
+		if (global_rc == 0)
+			global_rc = msg->return_code;
 	}
-	xfree(node_list);
-	bit_free(tasks_exited);
-	global_rc = MAX(global_rc, rc);
-
-	if (first_error && rc > 0 && opt.kill_bad_exit) {
-		first_error = false;
-		_terminate_job_step(job->step_ctx);
-	} else if (first_done && opt.max_wait > 0) {
-		/* If these are the first tasks to finish we need to
-		 * start a timer to kill off the job step if the other
-		 * tasks don't finish within opt.max_wait seconds.
-		 */
-		first_done = false;
-		debug2("First task has exited");
-		xsignal(SIGALRM, _handle_max_wait);
-		verbose("starting alarm of %d seconds", opt.max_wait);
-		alarm(opt.max_wait);
-	}
-}
 
-static void
-_task_state_struct_init(int num_tasks)
-{
-	task_state.start_success = bit_alloc(num_tasks);
-	task_state.start_failure = bit_alloc(num_tasks);
-	task_state.finish_normal = bit_alloc(num_tasks);
-	task_state.finish_abnormal = bit_alloc(num_tasks);
-}
+	xfree(tasks);
+	xfree(hosts);
 
-/*
- * Tasks will most likely have bits set in multiple of the task_state
- * bit strings (e.g. a task can start normally and then later exit normally)
- * so we ensure that a task is only "seen" once.
- */
-static void
-_task_state_struct_print(void)
-{
-	bitstr_t *tmp, *seen, *not_seen;
-	char buf[65536];
-	int len;
-
-	len = bit_size(task_state.finish_abnormal); /* all the same length */
-	tmp = bit_alloc(len);
-	seen = bit_alloc(len);
-	not_seen = bit_alloc(len);
-	bit_not(not_seen);
-
-	if (bit_set_count(task_state.finish_abnormal) > 0) {
-		bit_copybits(tmp, task_state.finish_abnormal);
-		bit_and(tmp, not_seen);
-		bit_fmt(buf, sizeof(buf), tmp);
-		info("task %s: exited abnormally", buf);
-		bit_or(seen, tmp);
-		bit_copybits(not_seen, seen);
-		bit_not(not_seen);
-	}
-
-	if (bit_set_count(task_state.finish_normal) > 0) {
-		bit_copybits(tmp, task_state.finish_normal);
-		bit_and(tmp, not_seen);
-		bit_fmt(buf, sizeof(buf), tmp);
-		info("task %s: exited", buf);
-		bit_or(seen, tmp);
-		bit_copybits(not_seen, seen);
-		bit_not(not_seen);
-	}
+	_update_task_exit_state(msg->num_tasks, msg->task_id_list,
+			!normal_exit);
 
-	if (bit_set_count(task_state.start_failure) > 0) {
-		bit_copybits(tmp, task_state.start_failure);
-		bit_and(tmp, not_seen);
-		bit_fmt(buf, sizeof(buf), tmp);
-		info("task %s: failed to start", buf);
-		bit_or(seen, tmp);
-		bit_copybits(not_seen, seen);
-		bit_not(not_seen);
-	}
-
-	if (bit_set_count(task_state.start_success) > 0) {
-		bit_copybits(tmp, task_state.start_success);
-		bit_and(tmp, not_seen);
-		bit_fmt(buf, BUFSIZ, tmp);
-		info("task %s: running", buf);
-		bit_or(seen, tmp);
-		bit_copybits(not_seen, seen);
-		bit_not(not_seen);
-	}
-}
+	if (task_state_first_abnormal_exit(task_state) && _kill_on_bad_exit())
+  		_terminate_job_step(job->step_ctx);
 
-static void
-_task_state_struct_free(void)
-{
-	bit_free(task_state.start_success);
-	bit_free(task_state.start_failure);
-	bit_free(task_state.finish_normal);
-	bit_free(task_state.finish_abnormal);
+	if (task_state_first_exit(task_state) && (opt.max_wait > 0))
+		_setup_max_wait_timer();
 }
 
 static void _handle_intr()
 {
 	static time_t last_intr      = 0;
 	static time_t last_intr_sent = 0;
-	if (opt.quit_on_intr) {
-		job_force_termination(job);
-		slurm_step_launch_abort(job->step_ctx);
-		return;
-	}
 
-	if (((time(NULL) - last_intr) > 1) && !opt.disable_status) {
+	if (!opt.quit_on_intr && 
+	    (((time(NULL) - last_intr) > 1) && !opt.disable_status)) {
 		if (job->state < SRUN_JOB_FORCETERM)
 			info("interrupt (one more within 1 sec to abort)");
 		else
 			info("interrupt (abort already in progress)");
-		_task_state_struct_print();
+		task_state_print(task_state, (log_f) info);
 		last_intr = time(NULL);
 	} else  { /* second Ctrl-C in half as many seconds */
 		update_job_state(job, SRUN_JOB_CANCELLED);
diff --git a/src/srun/srun.h b/src/srun/srun.h
index edf3da93b3b0582d4634e8b68ca1b0d0cb965d6b..7822092a5cdc4322505ce17332eb2bb014a59589 100644
--- a/src/srun/srun.h
+++ b/src/srun/srun.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/srun_job.c b/src/srun/srun_job.c
index 48f7d5a68e656f9e9971a7aa3f86d6d2d4272d1f..f810f0316997356b157aa1902236d48fe6bcfc22 100644
--- a/src/srun/srun_job.c
+++ b/src/srun/srun_job.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -76,8 +77,8 @@ typedef struct allocation_info {
 	uint32_t                stepid;
 	char                   *nodelist;
 	uint32_t                nnodes;
-	uint16_t                num_cpu_groups;
-	uint32_t               *cpus_per_node;
+	uint32_t                num_cpu_groups;
+	uint16_t               *cpus_per_node;
 	uint32_t               *cpu_count_reps;
 	select_jobinfo_t select_jobinfo;
 } allocation_info_t;
@@ -101,7 +102,7 @@ job_create_noalloc(void)
 {
 	srun_job_t *job = NULL;
 	allocation_info_t *ai = xmalloc(sizeof(*ai));
-	uint32_t cpn = 1;
+	uint16_t cpn = 1;
 	hostlist_t  hl = hostlist_create(opt.nodelist);
 
 	if (!hl) {
@@ -448,7 +449,8 @@ static srun_job_t *
 _job_create_structure(allocation_info_t *ainfo)
 {
 	srun_job_t *job = xmalloc(sizeof(srun_job_t));
-	
+	int i;
+
 	_set_nprocs(ainfo);
 	debug2("creating job with %d tasks", opt.nprocs);
 
@@ -476,12 +478,21 @@ _job_create_structure(allocation_info_t *ainfo)
 			error("Are required nodes explicitly excluded?");
 		}
 		return NULL;
-	}	
+	}
+	if ((ainfo->cpus_per_node == NULL) || 
+	    (ainfo->cpu_count_reps == NULL)) {
+		error("cpus_per_node array is not set");
+		return NULL;
+	}
 #endif
 	job->select_jobinfo = ainfo->select_jobinfo;
 	job->jobid   = ainfo->jobid;
 	
 	job->ntasks  = opt.nprocs;
+	for (i=0; i<ainfo->num_cpu_groups; i++) {
+		job->cpu_count += ainfo->cpus_per_node[i] *
+				  ainfo->cpu_count_reps[i];
+	}
 
 	job->rc       = -1;
 	
diff --git a/src/srun/srun_job.h b/src/srun/srun_job.h
index 200bf98382d098bf317fcba0216eb6b605c3948a..a22ca8adab0703de7e39f936ef4d9b19d6a6af31 100644
--- a/src/srun/srun_job.h
+++ b/src/srun/srun_job.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <mgrondona@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -89,6 +90,7 @@ typedef struct srun_job {
 	uint32_t jobid;		/* assigned job id 	                  */
 	uint32_t stepid;	/* assigned step id 	                  */
 
+	uint32_t cpu_count;	/* allocated CPUs */
 	uint32_t nhosts;	/* node count */
 	uint32_t ntasks;	/* task count */
 	srun_job_state_t state;	/* job state	   	                  */
diff --git a/src/srun/srun_pty.c b/src/srun/srun_pty.c
index 8de09059434ac684b5e5415c8084230cea67fd6f..34261377e8c9f2f9bb36df6d0b4bd847f1e6bac0 100644
--- a/src/srun/srun_pty.c
+++ b/src/srun/srun_pty.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette  <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/srun_pty.h b/src/srun/srun_pty.h
index 56e8ded1a25d5a11c0f1e91a1b7b978d468f0992..6f473ef5a053b730d582d12bdfcddab76f7c2507 100644
--- a/src/srun/srun_pty.h
+++ b/src/srun/srun_pty.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Moe Jette <jette@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/srun/task_state.c b/src/srun/task_state.c
new file mode 100644
index 0000000000000000000000000000000000000000..d2a1dd9d8fc3a9917a4112c3b3095feaa55dc3e4
--- /dev/null
+++ b/src/srun/task_state.c
@@ -0,0 +1,193 @@
+/*****************************************************************************\
+ * src/srun/task_state.c - task state container
+ * $Id$
+ *****************************************************************************
+ *  Copyright (C) 2002 The Regents of the University of California.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Mark Grondona <mgrondona@llnl.gov>.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <string.h>
+
+#include "src/common/xmalloc.h"
+#include "src/common/bitstring.h"
+#include "src/common/xassert.h"
+
+#include "src/srun/task_state.h"
+
+struct task_state_struct {
+	int n_tasks;
+	int n_started;
+	int n_abnormal;
+	int n_exited;
+	unsigned int first_exit:1;
+	unsigned int first_abnormal_exit:1;
+	bitstr_t *start_failed;
+	bitstr_t *running;
+	bitstr_t *normal_exit;
+	bitstr_t *abnormal_exit;
+};
+
+task_state_t task_state_create (int ntasks)
+{
+	task_state_t ts = xmalloc (sizeof (*ts));
+
+	/* ts is zero filled by xmalloc() */
+	ts->n_tasks = ntasks;
+	ts->running = bit_alloc (ntasks);
+	ts->start_failed = bit_alloc (ntasks);
+	ts->normal_exit = bit_alloc (ntasks);
+	ts->abnormal_exit = bit_alloc (ntasks);
+
+	return (ts);
+}
+
+void task_state_destroy (task_state_t ts)
+{
+	if (ts == NULL)
+		return;
+	if (ts->start_failed)
+		bit_free (ts->start_failed);
+	if (ts->running)
+		bit_free (ts->running);
+	if (ts->normal_exit)
+		bit_free (ts->normal_exit);
+	if (ts->abnormal_exit)
+		bit_free (ts->abnormal_exit);
+	xfree (ts);
+}
+
+static const char *_task_state_type_str (task_state_type_t t)
+{
+	switch (t) {
+	case TS_START_SUCCESS:
+		return ("TS_START_SUCCESS");
+	case TS_START_FAILURE:
+		return ("TS_START_FAILURE");
+	case TS_NORMAL_EXIT:
+		return ("TS_NORMAL_EXIT");
+	case TS_ABNORMAL_EXIT:
+		return ("TS_ABNORMAL_EXIT");
+	}
+	return ("Unknown");
+}
+
+void task_state_update (task_state_t ts, int taskid, task_state_type_t t)
+{
+	xassert (ts != NULL);
+	xassert (taskid >= 0);
+	xassert (taskid < ts->n_tasks);
+
+	debug3("task_state_update(taskid=%d, %s)",
+	       taskid, _task_state_type_str (t));
+
+	switch (t) {
+	case TS_START_SUCCESS:
+		bit_set (ts->running, taskid);
+		ts->n_started++;
+		break;
+	case TS_START_FAILURE:
+		bit_set (ts->start_failed, taskid);
+		break;
+	case TS_NORMAL_EXIT:
+		bit_set (ts->normal_exit, taskid);
+		bit_clear (ts->running, taskid);
+		ts->n_exited++;
+		break;
+	case TS_ABNORMAL_EXIT:
+		bit_clear (ts->running, taskid);
+		bit_set (ts->abnormal_exit, taskid);
+		ts->n_exited++;
+		ts->n_abnormal++;
+		break;
+	}
+
+	xassert ((bit_set_count(ts->abnormal_exit) +
+		  bit_set_count(ts->normal_exit)) == ts->n_exited);
+}
+
+int task_state_first_exit (task_state_t ts)
+{
+	if (!ts->first_exit && ts->n_exited) {
+		ts->first_exit = 1;
+		return (1);
+	}
+	return (0);
+}
+
+int task_state_first_abnormal_exit (task_state_t ts)
+{
+	if (!ts->first_abnormal_exit && ts->n_abnormal) {
+		ts->first_abnormal_exit = 1;
+		return (1);
+	}
+	return (0);
+}
+
+static void _do_log_msg (bitstr_t *b, log_f fn, const char *msg)
+{
+	char buf [65536];
+	char *s = bit_set_count (b) == 1 ? "" : "s";
+	(*fn) ("task%s %s: %s\n", s, bit_fmt (buf, sizeof(buf), b), msg);
+}
+
+void task_state_print (task_state_t ts, log_f fn)
+{
+	bitstr_t *unseen = bit_alloc (ts->n_tasks);
+
+	if (bit_set_count (ts->start_failed)) {
+		_do_log_msg (ts->start_failed, fn, "failed to start");
+		bit_or (unseen, ts->start_failed);
+	}
+	if (bit_set_count (ts->running)) {
+		_do_log_msg (ts->running, fn, "running");
+		bit_or (unseen, ts->running);
+	}
+	if (bit_set_count (ts->abnormal_exit)) {
+		_do_log_msg (ts->abnormal_exit, fn, "exited abnormally");
+		bit_or (unseen, ts->abnormal_exit);
+	}
+	if (bit_set_count (ts->normal_exit)) {
+		_do_log_msg (ts->normal_exit, fn, "exited");
+		bit_or (unseen, ts->normal_exit);
+	}
+	bit_not (unseen);
+	if (bit_set_count (unseen))
+		_do_log_msg (unseen, fn, "unknown");
+	bit_free (unseen);
+}
+
diff --git a/src/srun/task_state.h b/src/srun/task_state.h
new file mode 100644
index 0000000000000000000000000000000000000000..7309fc9dd75300458654f6cefd663d7dc0912ed6
--- /dev/null
+++ b/src/srun/task_state.h
@@ -0,0 +1,66 @@
+/*****************************************************************************\
+ * src/srun/task_state.h - task state container for srun
+ * $Id$
+ *****************************************************************************
+ *  Copyright (C) 2002 The Regents of the University of California.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Mark Grondona <mgrondona@llnl.gov>.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _HAVE_TASK_STATE_H
+#define _HAVE_TASK_STATE_H
+
+typedef struct task_state_struct * task_state_t;
+
+typedef enum {
+	TS_START_SUCCESS,
+	TS_START_FAILURE,
+	TS_NORMAL_EXIT,
+	TS_ABNORMAL_EXIT
+} task_state_type_t;
+
+task_state_t task_state_create (int ntasks);
+
+void task_state_destroy (task_state_t ts);
+
+void task_state_update (task_state_t ts, int taskid, task_state_type_t t);
+
+int task_state_first_exit (task_state_t ts);
+
+int task_state_first_abnormal_exit (task_state_t ts);
+
+typedef void (*log_f) (const char *, ...);
+
+void task_state_print (task_state_t ts, log_f fn);
+
+#endif /* !_HAVE_TASK_STATE_H */
diff --git a/src/srun_cr/Makefile.am b/src/srun_cr/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..153b01ec914954730bf1581a957d84c86490419c
--- /dev/null
+++ b/src/srun_cr/Makefile.am
@@ -0,0 +1,21 @@
+#
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+INCLUDES = -I$(top_srcdir) $(BLCR_CPPFLAGS)
+
+bin_PROGRAMS = srun_cr
+
+srun_cr_SOURCES = srun_cr.c
+
+convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl
+
+srun_cr_LDADD = $(convenience_libs) $(BLCR_LIBS)
+
+srun_cr_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(BLCR_LDFLAGS)
+
+force:
+$(convenience_libs) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
diff --git a/src/srun_cr/Makefile.in b/src/srun_cr/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..1daf2da3f806501b4c1708861801fabe6b5c2ff6
--- /dev/null
+++ b/src/srun_cr/Makefile.in
@@ -0,0 +1,568 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+bin_PROGRAMS = srun_cr$(EXEEXT)
+subdir = src/srun_cr
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)"
+binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
+PROGRAMS = $(bin_PROGRAMS)
+am_srun_cr_OBJECTS = srun_cr.$(OBJEXT)
+srun_cr_OBJECTS = $(am_srun_cr_OBJECTS)
+am__DEPENDENCIES_1 = $(top_builddir)/src/api/libslurm.o
+am__DEPENDENCIES_2 =
+srun_cr_DEPENDENCIES = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_2)
+srun_cr_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(srun_cr_LDFLAGS) \
+	$(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(srun_cr_SOURCES)
+DIST_SOURCES = $(srun_cr_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+INCLUDES = -I$(top_srcdir) $(BLCR_CPPFLAGS)
+srun_cr_SOURCES = srun_cr.c
+convenience_libs = $(top_builddir)/src/api/libslurm.o -ldl
+srun_cr_LDADD = $(convenience_libs) $(BLCR_LIBS)
+srun_cr_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(BLCR_LDFLAGS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/srun_cr/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  src/srun_cr/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+install-binPROGRAMS: $(bin_PROGRAMS)
+	@$(NORMAL_INSTALL)
+	test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+	@list='$(bin_PROGRAMS)'; for p in $$list; do \
+	  p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \
+	  if test -f $$p \
+	     || test -f $$p1 \
+	  ; then \
+	    f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \
+	   echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \
+	   $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \
+	  else :; fi; \
+	done
+
+uninstall-binPROGRAMS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_PROGRAMS)'; for p in $$list; do \
+	  f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \
+	  echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \
+	  rm -f "$(DESTDIR)$(bindir)/$$f"; \
+	done
+
+clean-binPROGRAMS:
+	@list='$(bin_PROGRAMS)'; for p in $$list; do \
+	  f=`echo $$p|sed 's/$(EXEEXT)$$//'`; \
+	  echo " rm -f $$p $$f"; \
+	  rm -f $$p $$f ; \
+	done
+srun_cr$(EXEEXT): $(srun_cr_OBJECTS) $(srun_cr_DEPENDENCIES) 
+	@rm -f srun_cr$(EXEEXT)
+	$(srun_cr_LINK) $(srun_cr_OBJECTS) $(srun_cr_LDADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun_cr.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS)
+installdirs:
+	for dir in "$(DESTDIR)$(bindir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-binPROGRAMS
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \
+	clean-generic clean-libtool ctags distclean distclean-compile \
+	distclean-generic distclean-libtool distclean-tags distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-binPROGRAMS install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags uninstall uninstall-am \
+	uninstall-binPROGRAMS
+
+
+force:
+$(convenience_libs) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/srun_cr/srun_cr.c b/src/srun_cr/srun_cr.c
new file mode 100644
index 0000000000000000000000000000000000000000..a2a1b0663876af20e5f76f3d284db55ed17161ca
--- /dev/null
+++ b/src/srun_cr/srun_cr.c
@@ -0,0 +1,553 @@
+/*****************************************************************************\
+ *  srun_cr.c - Checkpoint/Restart wrapper for srun
+ *****************************************************************************
+ *  Copyright (C) 2009 National University of Defense Technology, China.
+ *  Written by Hongia Cao.
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <stdint.h>
+#include <stdio.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <time.h>
+#include <poll.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/wait.h>
+
+#include <libcr.h>
+#include <slurm/slurm.h>
+
+#include "src/common/fd.h"
+#include "src/common/log.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
+static char *cr_run_path = BLCR_HOME "/bin/cr_run";
+static char *srun_path = SLURM_PREFIX "/bin/srun";
+
+/* global variables */
+static char **srun_argv = NULL;
+static pid_t srun_pid = 0;
+
+static uint32_t jobid = 0;
+static uint32_t stepid = 0xFFFFFFFF;
+static char *nodelist = NULL;
+
+static char cr_sock_addr[32];
+static int listen_fd = -1;
+
+static int step_launched = 0;
+static pthread_mutex_t step_launch_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t step_launch_cond = PTHREAD_COND_INITIALIZER;
+
+static cr_client_id_t cr_id = -1;
+
+static void remove_listen_socket(void);
+static int  _wait_for_srun_connect(void);
+static void _read_info_from_srun(int srun_fd);
+
+/**************** copied and modified from cr_restart of BLCR ****************/
+static void signal_child (int, siginfo_t *, void *);
+
+static void
+signal_self(int sig)
+{
+	struct sigaction sa;
+
+	/* restore default (in kernel) handler */
+	sa.sa_handler = SIG_DFL;
+	sa.sa_flags = SA_RESTART | SA_NOMASK;
+	sigemptyset(&sa.sa_mask);
+	(void)sigaction(sig, &sa, NULL);
+
+	/* send to self */
+	raise(sig);
+
+	/* restore self as handler */
+	sa.sa_sigaction = &signal_child;
+	sa.sa_flags = SA_RESTART | SA_NOMASK | SA_SIGINFO;
+	(void)sigaction(sig, &sa, NULL);
+}
+
+static void
+signal_child (int sig, siginfo_t *siginfo, void *context)
+{
+	if (srun_pid == 0) {	/* srun not forked yet */
+		signal_self(sig);
+		return;
+	}
+	
+	if ((siginfo->si_code > 0) &&	/* si_code > 0 indicates sent by kernel */
+	    (sig == SIGILL || sig == SIGFPE || 
+	     sig == SIGBUS || sig == SIGSEGV )) {
+		/* This signal is OUR error, so we don't forward */
+		signal_self(sig);
+	} else if (sig == SIGTSTP || sig == SIGTTIN || sig == SIGTTOU) {
+		/* The catchable stop signals go to child AND self */
+		(void)kill(srun_pid, sig);
+		signal_self(sig);
+	} else {
+		/* default case */
+		kill(srun_pid, sig);
+	}
+}
+
+static void
+mimic_exit(int status)
+{
+	if (WIFEXITED(status)) {
+		/* easy to mimic normal return */
+		exit(WEXITSTATUS(status));
+	} else if (WIFSIGNALED(status)) {
+		/* disable generation of a 'core' */
+		struct rlimit r;
+		r.rlim_cur = r.rlim_max = 0;
+		(void)setrlimit(RLIMIT_CORE, &r);
+		
+		/* now raise the signal */
+		signal_self(WTERMSIG(status));
+	} else {
+		error("Unexpected status from child\n");
+		exit(-1);
+	}
+}
+/****************************************************************/
+static void
+on_child_exit(int signum)
+{
+	int status;
+
+	/* 
+  	 * if srun_cr is checkpoint/restart-ed after srun exited, 
+  	 * srun_pid will be the pid of the new srun.
+	 */
+	cr_enter_cs(cr_id);
+	if (waitpid(srun_pid, &status, WNOHANG) == srun_pid) {
+		verbose("srun(%d) exited, status: %d", srun_pid, status);
+		mimic_exit(status);
+	}
+	kill(srun_pid, SIGKILL);
+	cr_leave_cs(cr_id);
+}
+
+static int
+_slurm_debug_env_val (void)
+{
+	long int level = 0;
+	const char *val;
+
+	if ((val = getenv ("SLURM_DEBUG"))) {
+		char *p;
+		if ((level = strtol (val, &p, 10)) < -LOG_LEVEL_INFO)
+			level = -LOG_LEVEL_INFO;
+		if (p && *p != '\0')
+			level = 0;
+	}
+	return ((int) level);
+}
+
+
+static void
+update_env(char *name, char *val)
+{
+	char *buf = NULL;
+
+	xstrfmtcat (buf, "%s=%s", name, val);
+	if (putenv(buf)) {
+		fatal("failed to update env: %m");
+	}
+}
+
+static int
+init_srun_argv(int argc, char **argv)
+{
+	int i;
+	
+	srun_argv = (char **)xmalloc(sizeof(char *) * (argc + 3));
+
+	srun_argv[0] = cr_run_path;
+	srun_argv[1] = "--omit";
+	srun_argv[2] = srun_path;
+	for (i = 1; i < argc; i ++) {
+		srun_argv[i + 2] = argv[i];
+	}
+	srun_argv[argc + 2] = NULL;
+
+	return  0;
+}
+
+/* remove the listen socket file */
+static void
+remove_listen_socket(void)
+{
+	unlink(cr_sock_addr);
+}
+
+/*
+ * create_listen_socket - create a listening UNIX domain socket
+ *     for srun to connect
+ * RETURN: the socket fd on success, -1 on error
+ */
+static int
+create_listen_socket(void)
+{
+	struct sockaddr_un sa;
+	unsigned int sa_len;
+	int re_use_addr = 1;
+
+
+	close (listen_fd);	/* close possible old socket */
+	
+	sprintf(cr_sock_addr, "/tmp/sock.srun_cr.%u", (unsigned int)getpid());
+
+	listen_fd = socket(AF_UNIX, SOCK_STREAM, 0);
+	if (listen_fd < 0) {
+		error("failed to create listen socket: %m");
+		return -1;
+	}
+	
+	sa.sun_family = AF_UNIX;
+	strcpy(sa.sun_path, cr_sock_addr);
+	sa_len = strlen(sa.sun_path) + sizeof(sa.sun_family);
+
+	unlink(sa.sun_path);	/* remove possible old socket */
+
+	setsockopt(listen_fd, SOL_SOCKET, SO_REUSEADDR, 
+		   (void*)&re_use_addr, sizeof(int));
+
+	if (bind(listen_fd, (struct sockaddr *)&sa, sa_len) < 0) {
+		error("failed to bind listen socket: %m");
+		unlink(sa.sun_path);
+		return -1;
+	}
+
+	if (listen(listen_fd, 2) < 0) {
+		error("failed to listen: %m");
+		unlink(sa.sun_path);
+		return -1;
+	}
+
+	fd_set_nonblocking(listen_fd);
+	
+	return listen_fd;
+}
+
+/*
+ * fork_exec_srun - fork and exec srun
+ * GLOBALS cr_argv: arguments for running srun
+ * RETURN: 0 on success, otherwise on error
+ */
+static int
+fork_exec_srun(void)
+{
+	int rc = 0;
+	sigset_t sigset;
+
+	listen_fd = create_listen_socket();
+	if (listen_fd < 0) {
+		return -1;
+	}
+
+	srun_pid = fork();
+	if (srun_pid < 0) {
+		error("failed to fork child process: %m");
+		return -1;
+	} else if (srun_pid == 0) {	/* child */
+		/*
+		 * remove srun from the foreground process group,
+		 * or Ctrl-C will cause SIGINT duplicated
+		 */
+		setpgrp();
+		
+		update_env("SLURM_SRUN_CR_SOCKET", cr_sock_addr);
+
+		/*
+		 * BLCR blocks all signals in thread-context callback functions
+		 */
+		sigemptyset(&sigset);
+		pthread_sigmask(SIG_SETMASK, &sigset, NULL);
+		
+		execv(srun_argv[0], srun_argv);
+		perror("failed execv srun");
+		exit(-1);
+	}
+
+	return rc;
+}
+
+/*
+ * get_step_image_dir - get the dir to store step task images
+ * IN cr: checkpoint/restart
+ * RET image dir on success, NULL on error
+ *
+ * NOTE: only can be called in callbak
+ */
+static char *
+get_step_image_dir(int cr)
+{
+	const struct cr_checkpoint_info *ckpt_info;
+	const struct cr_restart_info *rstrt_info;
+	const char *dest;
+	char *rchar, *dir;
+
+	if (cr) {		/* checkpoint */
+		ckpt_info = cr_get_checkpoint_info();
+		if (!ckpt_info) {
+			error("failed to get checkpoint info: %s", 
+			      cr_strerror(errno));
+			return NULL;
+		}
+		dest = ckpt_info->dest;
+	} else {		/* retart */
+		rstrt_info = cr_get_restart_info();
+		if (!rstrt_info) {
+			error("failed to get restart info: %s", 
+			      cr_strerror(errno));
+			return NULL;
+		}
+		dest = rstrt_info->src;
+	}
+
+	rchar = strrchr(dest, '/');
+	if (rchar) {
+		dir = xstrndup(dest, rchar - dest + 1);
+	}
+	xstrfmtcat(dir, "%u.%u", jobid, stepid);
+
+	return dir;
+}
+
+static int
+cr_callback(void *unused)
+{
+	int rc;
+	char *step_image_dir = NULL;
+
+	rc = CR_CHECKPOINT_READY;
+	if (step_launched) {
+		step_image_dir = get_step_image_dir(1);
+		if (step_image_dir == NULL) {
+			error ("failed to get step image directory");
+			rc = CR_CHECKPOINT_PERM_FAILURE;
+		} else if (slurm_checkpoint_tasks(jobid,
+						  stepid,
+						  time(NULL), /* timestamp */
+						  step_image_dir,
+						  60, /* wait */
+						  nodelist) != SLURM_SUCCESS) {
+			error ("failed to checkpoint step tasks");
+			rc = CR_CHECKPOINT_PERM_FAILURE;
+		}
+		xfree(step_image_dir);
+	}
+	rc = cr_checkpoint(rc);	/* dump */
+	
+	if (rc < 0) {
+		fatal("checkpoint failed: %s", cr_strerror(errno));
+	} else if (rc == 0) {
+		/* continue, nothing to do */
+	} else {
+		/* restarted */
+		if (srun_pid) { /* srun forked */
+			if (step_launched) {
+				step_image_dir = get_step_image_dir(0);
+				if (step_image_dir == NULL) {
+					fatal("failed to get step image directory");
+				}
+				update_env("SLURM_RESTART_DIR", step_image_dir);
+				xfree(step_image_dir);
+			}
+
+			if (fork_exec_srun()) {
+				fatal("failed fork/exec srun");
+			}
+		}
+
+		/* XXX: step_launched => listen_fd valid */
+		step_launched = 0;
+		
+		debug2("step not launched.");
+
+		pthread_cond_broadcast(&step_launch_cond);
+	}
+
+	return 0;
+}
+
+int 
+main(int argc, char **argv)
+{
+	int debug_level, sig, srun_fd;
+	struct sigaction sa;
+	log_options_t logopt = LOG_OPTS_STDERR_ONLY;
+	struct sockaddr_un ca;
+	unsigned int ca_len = sizeof(ca);
+
+	atexit(remove_listen_socket);
+	
+	/* copied from srun */
+	debug_level = _slurm_debug_env_val();
+	logopt.stderr_level += debug_level;
+	log_init(xbasename(argv[0]), logopt, 0, NULL);
+
+	if (init_srun_argv(argc, argv)) {
+		fatal("failed to initialize arguments for running srun");
+	}
+	
+	if ((cr_id = cr_init()) < 0) {
+		fatal("failed to initialize libcr: %s", cr_strerror(errno));
+	}
+	(void)cr_register_callback(cr_callback, NULL, CR_THREAD_CONTEXT);
+	
+	/* forward signals. copied from cr_restart */
+	sa.sa_sigaction = signal_child;
+	sa.sa_flags = SA_RESTART | SA_NODEFER | SA_SIGINFO;
+	sigemptyset(&sa.sa_mask);
+	for (sig = 0;  sig < _NSIG; sig ++) {
+		if (sig == SIGSTOP ||
+		    sig == SIGKILL ||
+		    sig == SIGCHLD)
+			continue;
+		sigaction(sig, &sa, NULL);
+	}
+	sa.sa_sigaction = on_child_exit;
+	sa.sa_flags = SA_RESTART | SA_SIGINFO | SA_NOCLDSTOP;
+	sigaction(SIGCHLD, &sa, NULL);
+
+	cr_enter_cs(cr_id); /* BEGIN CS: avoid race condition of whether srun is forked */
+	if ( fork_exec_srun() ) {
+		fatal("failed fork/exec/wait srun");
+	}
+	cr_leave_cs(cr_id); /* END CS */
+
+	while (1) {
+		pthread_mutex_lock(&step_launch_mutex);
+		while (step_launched) {
+			/* just avoid busy waiting */
+			pthread_cond_wait(&step_launch_cond, 
+					  &step_launch_mutex);
+		}
+		pthread_mutex_unlock(&step_launch_mutex);
+
+		if (_wait_for_srun_connect() < 0)
+			continue;
+
+		cr_enter_cs(cr_id); /* BEGIN CS: checkpoint(callback) will be delayed */
+
+		srun_fd = accept(listen_fd, (struct sockaddr*)&ca, &ca_len);
+		if (srun_fd < 0) {
+			/* restarted before enter CS. socket will not be restored */
+			if (errno == EBADF) { 
+				cr_leave_cs(cr_id);
+				continue;
+			} else {
+				fatal("failed to accept socket: %m");
+			}
+		}
+
+		_read_info_from_srun(srun_fd);
+		close(srun_fd);
+		
+		step_launched = 1;
+		debug2("step launched");
+
+		cr_leave_cs(cr_id); /* END CS */
+	}
+
+	return 0;
+}
+
+static int
+_wait_for_srun_connect(void)
+{
+	struct pollfd fds[1];
+	int rc;
+
+	fds[0].fd = listen_fd;
+	fds[0].events = POLLIN;
+
+	while ((rc = poll(fds, 1, -1)) < 0) {
+		switch (errno) {
+		case EAGAIN:
+		case EINTR:
+			continue;
+		case EBADF:	/* restarted */
+			return -1;
+		case ENOMEM:
+		case EINVAL:
+		case EFAULT:
+			fatal("poll: %m");
+		default:
+			error("poll: %m. Continuing...");
+		}
+	}
+	return 0;
+}
+
+static void
+_read_info_from_srun(int srun_fd)
+{
+	int len;
+	
+	if (read(srun_fd, &jobid, sizeof(uint32_t)) != sizeof(uint32_t)) {
+		fatal("failed to read jobid: %m");
+	}
+
+	if (read(srun_fd, &stepid, sizeof(uint32_t)) != sizeof(uint32_t)) {
+		fatal("failed to read stepid: %m");
+	}
+
+	if (read(srun_fd, &len, sizeof(int)) != sizeof(int)) {
+		fatal("failed to read nodelist length: %m");
+	}
+
+	xfree(nodelist);
+	nodelist = (char *)xmalloc(len + 1);
+	if (!nodelist) {
+		fatal("failed to malloc nodelist: %m");
+	}
+	if (read(srun_fd, nodelist, len + 1) != len + 1) {
+		fatal("failed to read nodelist: %m");
+	}
+}
diff --git a/src/sshare/Makefile.am b/src/sshare/Makefile.am
new file mode 100644
index 0000000000000000000000000000000000000000..cb7a8feff1f4c792ca4e50ccf1fed874450dc8e5
--- /dev/null
+++ b/src/sshare/Makefile.am
@@ -0,0 +1,22 @@
+# Makefile for sshare
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+INCLUDES = -I$(top_srcdir)
+
+bin_PROGRAMS = sshare
+
+sshare_LDADD =  \
+	$(top_builddir)/src/api/libslurm.o -ldl\
+	$(READLINE_LIBS)
+
+sshare_SOURCES =	\
+	process.c               \
+	sshare.c sshare.h	
+
+sshare_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
+
+force:
+$(sshare_LDADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
diff --git a/src/sshare/Makefile.in b/src/sshare/Makefile.in
new file mode 100644
index 0000000000000000000000000000000000000000..cd9fbba6f7c72c7e8b374b8abbc13d8db5be41b3
--- /dev/null
+++ b/src/sshare/Makefile.in
@@ -0,0 +1,574 @@
+# Makefile.in generated by automake 1.10.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
+# 2003, 2004, 2005, 2006, 2007, 2008  Free Software Foundation, Inc.
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for sshare
+
+VPATH = @srcdir@
+pkgdatadir = $(datadir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+bin_PROGRAMS = sshare$(EXEEXT)
+subdir = src/sshare
+DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_federation.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)"
+binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
+PROGRAMS = $(bin_PROGRAMS)
+am_sshare_OBJECTS = process.$(OBJEXT) sshare.$(OBJEXT)
+sshare_OBJECTS = $(am_sshare_OBJECTS)
+am__DEPENDENCIES_1 =
+sshare_DEPENDENCIES = $(top_builddir)/src/api/libslurm.o \
+	$(am__DEPENDENCIES_1)
+sshare_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(sshare_LDFLAGS) \
+	$(LDFLAGS) -o $@
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+	--mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+	$(LDFLAGS) -o $@
+SOURCES = $(sshare_SOURCES)
+DIST_SOURCES = $(sshare_SOURCES)
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DSYMUTIL = @DSYMUTIL@
+ECHO = @ECHO@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+ELAN_LIBS = @ELAN_LIBS@
+EXEEXT = @EXEEXT@
+F77 = @F77@
+FEDERATION_LDFLAGS = @FEDERATION_LDFLAGS@
+FFLAGS = @FFLAGS@
+GREP = @GREP@
+GTK2_CFLAGS = @GTK2_CFLAGS@
+GTK2_LIBS = @GTK2_LIBS@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVEPGCONFIG = @HAVEPGCONFIG@
+HAVEPKGCONFIG = @HAVEPKGCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_ELAN = @HAVE_ELAN@
+HAVE_FEDERATION = @HAVE_FEDERATION@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+LDFLAGS = @LDFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NMEDIT = @NMEDIT@
+NUMA_LIBS = @NUMA_LIBS@
+OBJEXT = @OBJEXT@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PGSQL_CFLAGS = @PGSQL_CFLAGS@
+PGSQL_LIBS = @PGSQL_LIBS@
+PLPA_LIBS = @PLPA_LIBS@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+RELEASE = @RELEASE@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION = @SLURM_VERSION@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_F77 = @ac_ct_F77@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+INCLUDES = -I$(top_srcdir)
+sshare_LDADD = \
+	$(top_builddir)/src/api/libslurm.o -ldl\
+	$(READLINE_LIBS)
+
+sshare_SOURCES = \
+	process.c               \
+	sshare.c sshare.h	
+
+sshare_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
+		&& exit 0; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign  src/sshare/Makefile'; \
+	cd $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign  src/sshare/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+install-binPROGRAMS: $(bin_PROGRAMS)
+	@$(NORMAL_INSTALL)
+	test -z "$(bindir)" || $(MKDIR_P) "$(DESTDIR)$(bindir)"
+	@list='$(bin_PROGRAMS)'; for p in $$list; do \
+	  p1=`echo $$p|sed 's/$(EXEEXT)$$//'`; \
+	  if test -f $$p \
+	     || test -f $$p1 \
+	  ; then \
+	    f=`echo "$$p1" | sed 's,^.*/,,;$(transform);s/$$/$(EXEEXT)/'`; \
+	   echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) '$$p' '$(DESTDIR)$(bindir)/$$f'"; \
+	   $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(binPROGRAMS_INSTALL) "$$p" "$(DESTDIR)$(bindir)/$$f" || exit 1; \
+	  else :; fi; \
+	done
+
+uninstall-binPROGRAMS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_PROGRAMS)'; for p in $$list; do \
+	  f=`echo "$$p" | sed 's,^.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/'`; \
+	  echo " rm -f '$(DESTDIR)$(bindir)/$$f'"; \
+	  rm -f "$(DESTDIR)$(bindir)/$$f"; \
+	done
+
+clean-binPROGRAMS:
+	@list='$(bin_PROGRAMS)'; for p in $$list; do \
+	  f=`echo $$p|sed 's/$(EXEEXT)$$//'`; \
+	  echo " rm -f $$p $$f"; \
+	  rm -f $$p $$f ; \
+	done
+sshare$(EXEEXT): $(sshare_OBJECTS) $(sshare_DEPENDENCIES) 
+	@rm -f sshare$(EXEEXT)
+	$(sshare_LINK) $(sshare_OBJECTS) $(sshare_LDADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/process.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sshare.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(COMPILE) -c `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
+	list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonemtpy = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	mkid -fID $$unique
+tags: TAGS
+
+TAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	here=`pwd`; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	    $$tags $$unique; \
+	fi
+ctags: CTAGS
+CTAGS:  $(HEADERS) $(SOURCES)  $(TAGS_DEPENDENCIES) \
+		$(TAGS_FILES) $(LISP)
+	tags=; \
+	list='$(SOURCES) $(HEADERS)  $(LISP) $(TAGS_FILES)'; \
+	unique=`for i in $$list; do \
+	    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+	  done | \
+	  $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
+	      END { if (nonempty) { for (i in files) print i; }; }'`; \
+	test -z "$(CTAGS_ARGS)$$tags$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$tags $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && cd $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) $$here
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
+	    fi; \
+	    cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
+	  else \
+	    test -f $(distdir)/$$file \
+	    || cp -p $$d/$$file $(distdir)/$$file \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS)
+installdirs:
+	for dir in "$(DESTDIR)$(bindir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	  install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	  `test -z '$(STRIP)' || \
+	    echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
+mostlyclean-generic:
+
+clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-exec-am: install-binPROGRAMS
+
+install-html: install-html-am
+
+install-info: install-info-am
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-ps: install-ps-am
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS all all-am check check-am clean clean-binPROGRAMS \
+	clean-generic clean-libtool ctags distclean distclean-compile \
+	distclean-generic distclean-libtool distclean-tags distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-binPROGRAMS install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags uninstall uninstall-am \
+	uninstall-binPROGRAMS
+
+
+force:
+$(sshare_LDADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/sshare/process.c b/src/sshare/process.c
new file mode 100644
index 0000000000000000000000000000000000000000..c32c4c5699571e0d8fd6536a1cc076c2af131332
--- /dev/null
+++ b/src/sshare/process.c
@@ -0,0 +1,267 @@
+/*****************************************************************************\
+ *  process.c -  process the return from get_share_info. 
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/sshare/sshare.h"
+
+extern int long_flag;
+
+extern int process(shares_response_msg_t *resp)
+{
+	int rc = SLURM_SUCCESS;
+	association_shares_object_t *assoc = NULL;
+	ListIterator itr = NULL;
+	ListIterator itr2 = NULL;
+	char *object = NULL;
+	char *print_acct = NULL;
+	List tree_list = NULL;
+
+	int field_count = 0;
+
+	print_field_t *field = NULL;
+
+	List format_list = list_create(slurm_destroy_char);
+	List print_fields_list; /* types are of print_field_t */
+
+	enum {
+		PRINT_ACCOUNT,
+		PRINT_CLUSTER,
+		PRINT_EUSED,
+		PRINT_FSFACTOR,
+		PRINT_ID,
+		PRINT_NORMS,
+		PRINT_NORMU,
+		PRINT_RAWS,
+		PRINT_RAWU,
+		PRINT_USER,
+	};
+
+	if(!resp)
+		return SLURM_ERROR;
+
+	format_list = list_create(slurm_destroy_char);
+	if (long_flag) {
+		slurm_addto_char_list(format_list,
+				      "A,User,RawShares,NormShares,"
+				      "RawUsage,NormUsage,EffUsage,"
+				      "FSFctr");
+	} else {
+		slurm_addto_char_list(format_list,
+				      "A,User,RawShares,NormShares,"
+				      "RawUsage,EffUsage,FSFctr");
+	}
+
+	print_fields_list = list_create(destroy_print_field);
+
+	itr = list_iterator_create(format_list);
+	while((object = list_next(itr))) {
+		char *tmp_char = NULL;
+		field = xmalloc(sizeof(print_field_t));
+		if(!strncasecmp("Account", object, 1)) {
+			field->type = PRINT_ACCOUNT;
+			field->name = xstrdup("Account");
+			field->len = -20;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("Cluster", object, 1)) {
+			field->type = PRINT_CLUSTER;
+			field->name = xstrdup("Cluster");
+			field->len = 10;
+			field->print_routine = print_fields_str;
+		} else if(!strncasecmp("EffUsage", object, 1)) {
+			field->type = PRINT_EUSED;
+			field->name = xstrdup("Effectv Usage");
+			field->len = 13;
+			field->print_routine = print_fields_double;
+		} else if(!strncasecmp("FSFctr", object, 1)) {
+			field->type = PRINT_FSFACTOR;
+			field->name = xstrdup("Fair-share");
+			field->len = 10;
+			field->print_routine = print_fields_double;
+		} else if(!strncasecmp("ID", object, 1)) {
+			field->type = PRINT_ID;
+			field->name = xstrdup("ID");
+			field->len = 6;
+			field->print_routine = print_fields_uint;
+		} else if(!strncasecmp("NormShares", object, 5)) {
+			field->type = PRINT_NORMS;
+			field->name = xstrdup("Norm Shares");
+			field->len = 11;
+			field->print_routine = print_fields_double;
+		} else if(!strncasecmp("NormUsage", object, 5)) {
+			field->type = PRINT_NORMU;
+			field->name = xstrdup("Norm Usage");
+			field->len = 11;
+			field->print_routine = print_fields_double;
+		} else if(!strncasecmp("RawShares", object, 4)) {
+			field->type = PRINT_RAWS;
+			field->name = xstrdup("Raw Shares");
+			field->len = 10;
+			field->print_routine = print_fields_uint32;
+		} else if(!strncasecmp("RawUsage", object, 4)) {
+			field->type = PRINT_RAWU;
+			field->name = xstrdup("Raw Usage");
+			field->len = 11;
+			field->print_routine = print_fields_uint64;
+		} else if(!strncasecmp("User", object, 1)) {
+			field->type = PRINT_USER;
+			field->name = xstrdup("User");
+			field->len = 10;
+			field->print_routine = print_fields_str;
+		} else {
+			exit_code=1;
+			fprintf(stderr, "Unknown field '%s'\n", object);
+			exit(1);
+			xfree(field);
+			continue;
+		}
+		if((tmp_char = strstr(object, "\%"))) {
+			int newlen = atoi(tmp_char+1);
+			if(newlen) 
+				field->len = newlen;
+		}
+		list_append(print_fields_list, field);
+	}
+	list_iterator_destroy(itr);
+	list_destroy(format_list);
+
+	if(exit_code) {
+		list_destroy(print_fields_list);
+		return SLURM_ERROR;
+	}
+
+	itr2 = list_iterator_create(print_fields_list);
+	print_fields_header(print_fields_list);
+
+	field_count = list_count(print_fields_list);
+
+	if(!resp->assoc_shares_list || !list_count(resp->assoc_shares_list))
+		return SLURM_SUCCESS;
+	tree_list = list_create(destroy_acct_print_tree);
+	itr = list_iterator_create(resp->assoc_shares_list);
+	while((assoc = list_next(itr))) {
+		int curr_inx = 1;
+		char *tmp_char = NULL;
+		char *local_acct = NULL;
+
+		while((field = list_next(itr2))) {
+			switch(field->type) {
+			case PRINT_ACCOUNT:
+				if(assoc->user) 
+					local_acct = xstrdup_printf(
+						"|%s", assoc->name);
+				else 
+					local_acct = xstrdup(assoc->name);
+				
+				print_acct = get_tree_acct_name(
+					local_acct,
+					assoc->parent, tree_list);
+				xfree(local_acct);
+				field->print_routine(
+					field, 
+					print_acct,
+					(curr_inx == field_count));
+				break;
+			case PRINT_CLUSTER:
+				field->print_routine(
+					field,
+					assoc->cluster,
+					(curr_inx == field_count));
+				break;
+			case PRINT_EUSED:
+				field->print_routine(field, 
+						     assoc->usage_efctv,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_FSFACTOR:
+				field->print_routine(field,
+						     (assoc->shares_norm -
+						     (double)assoc->usage_efctv
+						      + 1.0) / 2.0,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_ID:
+				field->print_routine(field, 
+						     assoc->assoc_id,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_NORMS:
+				field->print_routine(field, 
+						     assoc->shares_norm,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_NORMU:
+				field->print_routine(field,
+						     assoc->usage_norm,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_RAWS:
+				field->print_routine(field,
+						     assoc->shares_raw,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_RAWU:
+				field->print_routine(field, 
+						     assoc->usage_raw,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_USER:
+				if(assoc->user)
+					tmp_char = assoc->name;
+				field->print_routine(field, 
+						     tmp_char,
+						     (curr_inx == field_count));
+				break;
+			default:
+				field->print_routine(
+					field, NULL,
+					(curr_inx == field_count));
+				break;
+			}
+			curr_inx++;
+		}
+		list_iterator_reset(itr2);
+		printf("\n");
+	}
+
+	if(tree_list) 
+		list_destroy(tree_list);
+			
+	list_iterator_destroy(itr2);
+	list_iterator_destroy(itr);
+	list_destroy(print_fields_list);
+	return rc;
+}
diff --git a/src/sshare/sshare.c b/src/sshare/sshare.c
new file mode 100644
index 0000000000000000000000000000000000000000..094deef174289e99d2681f5cc5442604462c6ee1
--- /dev/null
+++ b/src/sshare/sshare.c
@@ -0,0 +1,420 @@
+/*****************************************************************************\
+ *  sshare.c -   tool for listing the shares of association in
+ *               relationship to the cluster running on. 
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/sshare/sshare.h"
+#include <grp.h>
+
+
+#define BUFFER_SIZE 4096
+#define OPT_LONG_HELP  0x100
+#define OPT_LONG_USAGE 0x101
+
+int exit_code;		/* sshare's exit code, =1 on any error at any time */
+int quiet_flag;		/* quiet=1, verbose=-1, normal=0 */
+int long_flag;		/* exceeds 80 character limit with more info */
+int verbosity;		/* count of -v options */
+uint32_t my_uid = 0;
+
+static int      _get_info(shares_request_msg_t *shares_req, 
+			  shares_response_msg_t **shares_resp);
+static int      _addto_name_char_list(List char_list, char *names, bool gid);
+static char *   _convert_to_name(int id, bool gid);
+static void     _print_version( void );
+static void	_usage ();
+
+int 
+main (int argc, char *argv[]) 
+{
+	int error_code = SLURM_SUCCESS, opt_char;
+	log_options_t opts = LOG_OPTS_STDERR_ONLY;
+	shares_request_msg_t req_msg;
+	shares_response_msg_t *resp_msg = NULL;
+	char *temp = NULL;
+	int option_index;
+	bool all_users = 0;
+
+	static struct option long_options[] = {
+		{"accounts", 1, 0, 'A'},
+		{"all",      0, 0, 'a'},
+		{"long",     0, 0, 'l'},
+		{"noheader", 0, 0, 'h'},
+		{"parsable", 0, 0, 'p'},
+		{"parsable2",0, 0, 'P'},
+		{"users",    1, 0, 'u'},
+		{"verbose",  0, 0, 'v'},
+		{"version",  0, 0, 'V'},
+		{"help",     0, 0, OPT_LONG_HELP},
+		{"usage",    0, 0, OPT_LONG_USAGE},
+		{NULL,       0, 0, 0}
+	};
+
+	/* Check to see if we are running a supported accounting plugin */
+	temp = slurm_get_priority_type();
+	if(strcasecmp(temp, "priority/multifactor")) {
+		fprintf (stderr, "You are not running a supported "
+			 "priority plugin\n(%s).\n"
+			 "Only 'priority/multifactor' is supported.\n",
+			temp);
+		xfree(temp);
+		exit(1);
+	}
+	xfree(temp);
+
+	exit_code         = 0;
+	long_flag	  = 0;
+	quiet_flag        = 0;
+	verbosity         = 0;
+	memset(&req_msg, 0, sizeof(shares_request_msg_t));
+	log_init("sshare", opts, SYSLOG_FACILITY_DAEMON, NULL);
+
+	while((opt_char = getopt_long(argc, argv, "aA:hlnpPqu:t:vV",
+			long_options, &option_index)) != -1) {
+		switch (opt_char) {
+		case (int)'?':
+			fprintf(stderr, "Try \"sshare --help\" "
+				"for more information\n");
+			exit(1);
+			break;
+		case 'a':
+			all_users = 1;
+			break;
+		case 'A':
+			if(!req_msg.acct_list) 
+				req_msg.acct_list =
+					list_create(slurm_destroy_char);
+			slurm_addto_char_list(req_msg.acct_list, optarg);
+			break;
+		case 'h':
+			print_fields_have_header = 0;
+			break;
+			exit(exit_code);
+			break;
+		case 'l':
+			long_flag = 1;
+			break;
+		case 'n':
+			print_fields_have_header = 0;
+			break;
+		case 'p':
+			print_fields_parsable_print = 
+			PRINT_FIELDS_PARSABLE_ENDING;
+			break;
+		case 'P':
+			print_fields_parsable_print =
+			PRINT_FIELDS_PARSABLE_NO_ENDING;
+			break;
+		case 'u':
+			if(!strcmp(optarg, "-1")) {
+				all_users = 1;
+				break;
+			}
+			all_users = 0;
+			if(!req_msg.user_list)
+				req_msg.user_list = 
+					list_create(slurm_destroy_char);
+			_addto_name_char_list(req_msg.user_list, optarg, 0);
+			break;
+		case 'v':
+			quiet_flag = -1;
+			verbosity++;
+			break;
+		case 'V':
+			_print_version();
+			exit(exit_code);
+			break;
+		case OPT_LONG_HELP:
+		case OPT_LONG_USAGE:
+			_usage();
+			exit(0);
+		default:
+			exit_code = 1;
+			fprintf(stderr, "getopt error, returned %c\n", 
+				opt_char);
+			exit(exit_code);
+		}
+	}
+
+	if (verbosity) {
+		opts.stderr_level += verbosity;
+		opts.prefix_level = 1;
+		log_alter(opts, 0, NULL);
+	}
+
+	if(all_users) {
+		if(req_msg.user_list 
+		   && list_count(req_msg.user_list)) {
+			list_destroy(req_msg.user_list);
+			req_msg.user_list = NULL;
+		}
+		if(verbosity)
+			fprintf(stderr, "Users requested:\n\t: all\n");
+	} else if (verbosity && req_msg.user_list 
+	    && list_count(req_msg.user_list)) {
+		fprintf(stderr, "Users requested:\n");
+		ListIterator itr = list_iterator_create(req_msg.user_list);
+		while((temp = list_next(itr))) 
+			fprintf(stderr, "\t: %s\n", temp);
+		list_iterator_destroy(itr);
+	} else if(!req_msg.user_list || !list_count(req_msg.user_list)) {
+		struct passwd *pwd = getpwuid(getuid());
+		if(!req_msg.user_list)
+			req_msg.user_list = list_create(slurm_destroy_char);
+		temp = xstrdup(pwd->pw_name);
+		list_append(req_msg.user_list, temp);
+		if(verbosity) {
+			fprintf(stderr, "Users requested:\n");
+			fprintf(stderr, "\t: %s\n", temp);
+		}
+	}
+
+	if(req_msg.acct_list && list_count(req_msg.acct_list)) {
+		fprintf(stderr, "Accounts requested:\n");
+		ListIterator itr = list_iterator_create(req_msg.acct_list);
+		while((temp = list_next(itr))) 
+			fprintf(stderr, "\t: %s\n", temp);
+		list_iterator_destroy(itr);
+	} else {
+		if(req_msg.acct_list 
+		   && list_count(req_msg.acct_list)) {
+			list_destroy(req_msg.acct_list);
+			req_msg.acct_list = NULL;
+		}
+		if(verbosity)
+			fprintf(stderr, "Accounts requested:\n\t: all\n");
+
+	}
+
+	error_code = _get_info(&req_msg, &resp_msg);
+
+	if(req_msg.acct_list)
+		list_destroy(req_msg.acct_list);
+	if(req_msg.user_list)
+		list_destroy(req_msg.user_list);
+
+	if (error_code) {
+		slurm_perror("Couldn't get shares from controller");
+		exit(error_code);
+	}
+
+	/* do stuff with it */
+	process(resp_msg);
+
+	slurm_free_shares_response_msg(resp_msg);
+
+	exit(exit_code);
+}
+
+static int _get_info(shares_request_msg_t *shares_req, 
+		     shares_response_msg_t **shares_resp)
+{
+	int rc;
+        slurm_msg_t req_msg;
+        slurm_msg_t resp_msg;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+
+        req_msg.msg_type = REQUEST_SHARE_INFO;
+        req_msg.data     = shares_req;
+	
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+	
+	switch (resp_msg.msg_type) {
+	case RESPONSE_SHARE_INFO:
+		*shares_resp = (shares_response_msg_t *) resp_msg.data;
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);	
+		if (rc) 
+			slurm_seterrno_ret(rc);
+		*shares_resp = NULL;
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+
+	return SLURM_PROTOCOL_SUCCESS;  	
+}
+
+/* returns number of objects added to list */
+static int _addto_name_char_list(List char_list, char *names, bool gid)
+{
+	int i=0, start=0;
+	char *name = NULL, *tmp_char = NULL;
+	ListIterator itr = NULL;
+	char quote_c = '\0';
+	int quote = 0;
+	int count = 0;
+
+	if(!char_list) {
+		error("No list was given to fill in");
+		return 0;
+	}
+
+	itr = list_iterator_create(char_list);
+	if(names) {
+		if (names[i] == '\"' || names[i] == '\'') {
+			quote_c = names[i];
+			quote = 1;
+			i++;
+		}
+		start = i;
+		while(names[i]) {
+			//info("got %d - %d = %d", i, start, i-start);
+			if(quote && names[i] == quote_c)
+				break;
+			else if (names[i] == '\"' || names[i] == '\'')
+				names[i] = '`';
+			else if(names[i] == ',') {
+				if((i-start) > 0) {
+					name = xmalloc((i-start+1));
+					memcpy(name, names+start, (i-start));
+					//info("got %s %d", name, i-start);
+					if (isdigit((int) *name)) {
+						int id = atoi(name);
+						xfree(name);
+						name = _convert_to_name(
+							id, gid);
+					}
+					
+					while((tmp_char = list_next(itr))) {
+						if(!strcasecmp(tmp_char, name))
+							break;
+					}
+
+					if(!tmp_char) {
+						list_append(char_list, name);
+						count++;
+					} else 
+						xfree(name);
+					list_iterator_reset(itr);
+				}
+				i++;
+				start = i;
+				if(!names[i]) {
+					info("There is a problem with "
+					     "your request.  It appears you "
+					     "have spaces inside your list.");
+					break;
+				}
+			}
+			i++;
+		}
+		if((i-start) > 0) {
+			name = xmalloc((i-start)+1);
+			memcpy(name, names+start, (i-start));
+			
+			if (isdigit((int) *name)) {
+				int id = atoi(name);
+				xfree(name);
+				name = _convert_to_name(id, gid);
+			}
+			
+			while((tmp_char = list_next(itr))) {
+				if(!strcasecmp(tmp_char, name))
+					break;
+			}
+			
+			if(!tmp_char) {
+				list_append(char_list, name);
+				count++;
+			} else 
+				xfree(name);
+		}
+	}	
+	list_iterator_destroy(itr);
+	return count;
+} 
+
+static char *_convert_to_name(int id, bool gid)
+{
+	char *name = NULL;
+
+	if(gid) {
+		struct group *grp;
+		if (!(grp=getgrgid(id))) {
+			fprintf(stderr, "Invalid group id: %s\n", name);
+			exit(1);
+		}
+		name = xstrdup(grp->gr_name);
+	} else {
+		struct passwd *pwd;
+		if (!(pwd=getpwuid(id))) {
+			fprintf(stderr, "Invalid user id: %s\n", name);
+			exit(1);
+		}
+		name = xstrdup(pwd->pw_name);
+	}
+	return name;
+}
+
+static void _print_version(void)
+{
+	printf("%s %s\n", PACKAGE, SLURM_VERSION);
+	if (quiet_flag == -1) {
+		long version = slurm_api_version();
+		printf("slurm_api_version: %ld, %ld.%ld.%ld\n", version,
+			SLURM_VERSION_MAJOR(version), 
+			SLURM_VERSION_MINOR(version),
+			SLURM_VERSION_MICRO(version));
+	}
+}
+
+/* _usage - show the valid sshare options */
+void _usage () {
+	printf ("\
+Usage:  sshare [OPTION]                                                    \n\
+  Valid OPTIONs are:                                                       \n\
+    -a or --all            list all users                                  \n\
+    -A or --accounts=      display specific accounts (comma separated list)\n\
+    -h or --noheader       omit header from output                         \n\
+    -l or --long           include normalized usage in output              \n\
+    -p or --parsable       '|' delimited output with a trailing '|'        \n\
+    -P or --parsable2      '|' delimited output without a trailing '|'     \n\
+    -u or --users=         display specific users (comma separated list)   \n\
+    -v or --verbose        display more information                        \n\
+    -V or --version        display tool version number                     \n\
+          --help           display this usage description                  \n\
+          --usage          display this usage description                  \n\
+                                                                           \n\n");
+}
+
diff --git a/src/sshare/sshare.h b/src/sshare/sshare.h
new file mode 100644
index 0000000000000000000000000000000000000000..2be0a071dc5dd94f6a31a4d3321535ad4af6d13f
--- /dev/null
+++ b/src/sshare/sshare.h
@@ -0,0 +1,103 @@
+/*****************************************************************************\
+ *  sshare.h - definitions for all sshare modules.
+ *****************************************************************************
+ *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission 
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and 
+ *  distribute linked combinations including the two. You must obey the GNU 
+ *  General Public License in all respects for all of the code used other than 
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this 
+ *  exception to your version of the file(s), but you are not obligated to do 
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in 
+ *  the program, then also delete it here.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __SSHARE_H__
+#define __SSHARE_H__
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#if HAVE_GETOPT_H
+#  include <getopt.h>
+#else
+#  include "src/common/getopt.h"
+#endif
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifdef HAVE_STRING_H
+#  include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+#  include <strings.h>
+#endif
+#include <time.h>
+#include <unistd.h>
+
+#if HAVE_READLINE
+#  include <readline/readline.h>
+#  include <readline/history.h>
+#endif
+
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>
+#else  /* !HAVE_INTTYPES_H */
+#  if HAVE_STDINT_H
+#    include <stdint.h>
+#  endif
+#endif  /* HAVE_INTTYPES_H */
+
+#include <slurm/slurm.h>
+
+#include "src/common/parse_time.h"
+#include "src/common/slurm_accounting_storage.h"
+#include "src/common/xstring.h"
+#include "src/common/print_fields.h"
+
+#define CKPT_WAIT	10
+#define	MAX_INPUT_FIELDS 128
+
+typedef enum {
+	SSHARE_TIME_SECS,
+	SSHARE_TIME_MINS,
+	SSHARE_TIME_HOURS,
+} sshare_time_format_t;
+
+extern int exit_code;	/* sshare's exit code, =1 on any error at any time */
+extern int quiet_flag;	/* quiet=1, verbose=-1, normal=0 */
+extern uint32_t my_uid;
+extern sshare_time_format_t time_format;
+extern char *time_format_string;
+
+extern int process(shares_response_msg_t *msg);
+
+#endif
diff --git a/src/sstat/Makefile.in b/src/sstat/Makefile.in
index 41bd7d93c485d76eff43eac77b09cde3ba89719b..65356e2502c51fb95801226cd9d21e696bbd874e 100644
--- a/src/sstat/Makefile.in
+++ b/src/sstat/Makefile.in
@@ -46,14 +46,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -108,6 +112,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/sstat/options.c b/src/sstat/options.c
index 5316794091b4d2849f14ef1bdeaf4bbc6c73fa3e..9b116545de6265703c5358273298e8853c629142 100644
--- a/src/sstat/options.c
+++ b/src/sstat/options.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -53,9 +54,9 @@ void _help_fields_msg(void)
 	for (i = 0; fields[i].name; i++) {
 		if (i & 3)
 			printf("  ");
-		else
+		else if(i)
 			printf("\n");
-		printf("%-10s", fields[i].name);
+		printf("%-12s", fields[i].name);
 	}
 	printf("\n");
 	return;
@@ -63,40 +64,45 @@ void _help_fields_msg(void)
 
 void _help_msg(void)
 {
-	printf("\n"
-	       "By default, sstat displays status data for job/step stated\n"
-	       "Options:\n"
-	       "-a, --allsteps\n"
-	       "-C, --cluster\n"
-	       "    Job is running on this cluster.\n"
-	       "-F <field-list>, --fields=<field-list>\n"
-	       "    Display the specified data (use \"--help-fields\" for a\n"
-	       "    list of available fields). If no field option is specified,\n"
-	       "    we use \"--fields=jobid,vsize,rss,pages,cputime,ntasks,state\".\n"
-	       "-h, --help\n"
-	       "    Print a general help message.\n"
-	       "--help-fields\n"
-	       "    Print a list of fields that can be specified with the\n"
-	       "    \"--fields\" option\n"
-	       "-j <job(.step)>, --jobs=<job(.step)>\n"
-	       "    Display information about this job or comma-separated\n"
-	       "    list of jobs. The default is all jobs. Adding .step will\n"
-	       "    display the specfic job step of that job.\n"
-	       "--noheader\n"
-	       "    Print (or don't print) a header. The default is to print a\n"
-	       "    header; the option has no effect if --dump is specified\n"
-	       "--usage\n"
-	       "    Pointer to this message.\n"
-	       "-v, --verbose\n"
-	       "    Primarily for debugging purposes, report the state of various\n"
-	       "    variables during processing.\n");
+	printf("\
+sstat [<OPTION>] -j <job(.stepid)>                                          \n\
+    Valid <OPTION> values are:                                              \n\
+      -a, --allsteps:                                                       \n\
+                   Print all steps for the given job(s) when no step is     \n\
+                   specified.                                               \n\
+      -e, --helpformat:                                                     \n\
+	           Print a list of fields that can be specified with the    \n\
+	           '--format' option                                        \n\
+     -h, --help:   Print this description of use.                           \n\
+     -j, --jobs:                                                            \n\
+	           Format is <job(.step)>. Stat this job step               \n\
+                   or comma-separated list of job steps. This option is     \n\
+                   required.  The step portion will default to step 0 if not\n\
+                   specified, unless the --allsteps flag is set where not   \n\
+                   specifing a step will result in all running steps to be  \n\
+                   displayed.                                               \n\
+     -n, --noheader:                                                        \n\
+	           No header will be added to the beginning of output.      \n\
+                   The default is to print a header.                        \n\
+     -o, --format:                                                          \n\
+	           Comma seperated list of fields. (use \"--helpformat\"    \n\
+                   for a list of available fields).                         \n\
+     -p, --parsable: output will be '|' delimited with a '|' at the end     \n\
+     -P, --parsable2: output will be '|' delimited without a '|' at the end \n\
+     --usage:      Display brief usage message.                             \n\
+     -v, --verbose:                                                         \n\
+	           Primarily for debugging purposes, report the state of    \n\
+                   various variables during processing.                     \n\
+     -V, --version: Print version.                                          \n\
+\n");
 
 	return;
 }
 
 void _usage(void)
 {
-	printf("\nUsage: sstat [options]\n\tUse --help for help\n");
+	printf("Usage: sstat [options] -j <job(.stepid)>\n"
+	       "\tUse --help for help\n");
 }
 
 
@@ -267,12 +273,14 @@ void parse_command_line(int argc, char **argv)
 
 	static struct option long_options[] = {
 		{"allsteps", 0, 0, 'a'},
-		{"cluster", 1, 0, 'C'},
-		{"fields", 1, 0, 'F'},
-		{"help", 0, &params.opt_help, 1},
-		{"help-fields", 0, &params.opt_help, 2},
+		{"helpformat", 0, 0, 'e'},
+		{"help", 0, 0, 'h'},
 		{"jobs", 1, 0, 'j'},
-		{"noheader", 0, &params.opt_noheader, 1},
+		{"noheader", 0, 0, 'n'},
+		{"fields", 1, 0, 'o'},
+		{"format", 1, 0, 'o'},
+		{"parsable", 0, 0, 'p'},
+		{"parsable2", 0, 0, 'P'},
 		{"usage", 0, &params.opt_help, 3},
 		{"verbose", 0, 0, 'v'},
 		{"version", 0, 0, 'V'},
@@ -285,7 +293,7 @@ void parse_command_line(int argc, char **argv)
 	opterr = 1;		/* Let getopt report problems to the user */
 
 	while (1) {		/* now cycle through the command line */
-		c = getopt_long(argc, argv, "aF:hj:Vv",
+		c = getopt_long(argc, argv, "aehj:no:pPvV",
 				long_options, &optionIndex);
 		if (c == -1)
 			break;
@@ -293,11 +301,8 @@ void parse_command_line(int argc, char **argv)
 		case 'a':
 			params.opt_all_steps = 1;
 			break;
-		case 'F':
-			if(params.opt_field_list)
-				xfree(params.opt_field_list);
-			
-			xstrfmtcat(params.opt_field_list, "%s,", optarg);
+		case 'e':
+			params.opt_help = 2;
 			break;
 		case 'h':
 			params.opt_help = 1;
@@ -315,6 +320,20 @@ void parse_command_line(int argc, char **argv)
 					destroy_jobacct_selected_step);
 			_addto_job_list(params.opt_job_list, optarg);
 			break;
+		case 'n':
+			print_fields_have_header = 0;
+			break;
+		case 'o':
+			xstrfmtcat(params.opt_field_list, "%s,", optarg);
+			break;
+		case 'p':
+			print_fields_parsable_print = 
+				PRINT_FIELDS_PARSABLE_ENDING;
+			break;
+		case 'P':
+			print_fields_parsable_print = 
+				PRINT_FIELDS_PARSABLE_NO_ENDING;
+			break;
 		case 'v':
 			/* Handle -vvv thusly...
 			 * 0 - report only normal messages and errors
@@ -326,19 +345,9 @@ void parse_command_line(int argc, char **argv)
 			break;
 
 		case 'V':
-		{
-			char	obuf[20]; /* should be long enough */
-			char	*rev="$Revision: 7267 $";
-			char	*s;
-
-			s=strstr(rev, " ")+1;
-			for (i=0; s[i]!=' '; i++)
-				obuf[i]=s[i];
-			obuf[i] = 0;
-			printf("%s: %s\n", argv[0], obuf);
+			printf("%s %s\n", PACKAGE, SLURM_VERSION);
 			exit(0);
-		}
-
+			break;
 		case ':':
 		case '?':	/* getopt() has explained it */
 			exit(1); 
@@ -369,32 +378,23 @@ void parse_command_line(int argc, char **argv)
 		xstrfmtcat(params.opt_field_list, "%s,", STAT_FIELDS);
 
 	if (params.opt_verbose) {
-		fprintf(stderr, "Options selected:\n"
-			"\topt_field_list=%s\n"
-			"\topt_noheader=%d\n"
-			"\topt_help=%d\n"
-			"\topt_verbose=%d\n",
-			params.opt_field_list,
-			params.opt_noheader,
-			params.opt_help,
-			params.opt_verbose);
 		logopt.stderr_level += params.opt_verbose;
+		logopt.prefix_level = 1;
 		log_alter(logopt, 0, NULL);
-
 	}
 
 	/* specific jobs requested? */
 	if (params.opt_verbose && params.opt_job_list
 	    && list_count(params.opt_job_list)) { 
-		fprintf(stderr, "Jobs requested:\n");
+		debug("Jobs requested:\n");
 		itr = list_iterator_create(params.opt_job_list);
 		while((selected_step = list_next(itr))) {
 			if(selected_step->stepid != NO_VAL) 
-				fprintf(stderr, "\t: %d.%d\n",
+				debug("\t: %d.%d\n",
 					selected_step->jobid,
 					selected_step->stepid);
 			else	
-				fprintf(stderr, "\t: %d\n", 
+				debug("\t: %d\n", 
 					selected_step->jobid);
 		}
 		list_iterator_destroy(itr);
@@ -411,24 +411,21 @@ void parse_command_line(int argc, char **argv)
 			if (!strcasecmp(fields[i].name, start))
 				goto foundfield;
 		}
-		fprintf(stderr,
-			"Invalid field requested: \"%s\"\n",
-			start);
+		error("Invalid field requested: \"%s\"", start);
 		exit(1);
 	foundfield:
-		printfields[nprintfields++] = i;
+		list_append(print_fields_list, &fields[i]);
 		start = end + 1;
 	}
+	field_count = list_count(print_fields_list);
 
-	if (params.opt_verbose) {
-		fprintf(stderr, "%d field%s selected:\n",
-			nprintfields,
-			(nprintfields==1? "" : "s"));
-		for (i = 0; i < nprintfields; i++)
-			fprintf(stderr,
-				"\t%s\n",
-				fields[printfields[i]].name);
-	} 
+	if (optind < argc) {
+		debug2("Error: Unknown arguments:");
+		for (i=optind; i<argc; i++)
+			debug2(" %s", argv[i]);
+		debug2("\n");
+		exit(1);
+	}
 
 	return;
 }
diff --git a/src/sstat/print.c b/src/sstat/print.c
index 8057f4a12a2ce2a82c47463325b7cfc08be74561..69f33d1c17d8dafa88647ae168d7c3f8f6e3fef8 100644
--- a/src/sstat/print.c
+++ b/src/sstat/print.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -42,17 +43,17 @@
 #include "slurm.h"
 #define FORMAT_STRING_SIZE 34
 
-void _elapsed_time(long secs, long usecs, char *str);
+char *_elapsed_time(long secs, long usecs);
 
-void _elapsed_time(long secs, long usecs, char *str)
+char *_elapsed_time(long secs, long usecs)
 {
 	long	days, hours, minutes, seconds;
 	long    subsec = 0;
+	char *str = NULL;
+
+	if(secs < 0 || secs == NO_VAL)
+		return NULL;
 	
-	if(secs < 0) {
-		snprintf(str, FORMAT_STRING_SIZE, "'N/A'");
-		return;
-	}
 	
 	while (usecs >= 1E6) {
 		secs++;
@@ -68,371 +69,191 @@ void _elapsed_time(long secs, long usecs, char *str)
 	days    =  secs / 86400;
 
 	if (days) 
-		snprintf(str, FORMAT_STRING_SIZE,
-			 "%ld-%2.2ld:%2.2ld:%2.2ld",
-		         days, hours, minutes, seconds);
+		str = xstrdup_printf("%ld-%2.2ld:%2.2ld:%2.2ld",
+				     days, hours, minutes, seconds);
 	else if (hours)
-		snprintf(str, FORMAT_STRING_SIZE,
-			 "%ld:%2.2ld:%2.2ld",
-		         hours, minutes, seconds);
+		str = xstrdup_printf("%2.2ld:%2.2ld:%2.2ld",
+				     hours, minutes, seconds);
 	else
-		snprintf(str, FORMAT_STRING_SIZE,
-			 "%ld:%2.2ld.%3.3ld",
-		         minutes, seconds, subsec);
+		str = xstrdup_printf("%2.2ld:%2.2ld.%3.3ld",
+				     minutes, seconds, subsec);
+	return str;
 }
 
-extern void print_fields(type_t type, void *object)
+void print_fields(jobacct_step_rec_t *step)
 {
-	int f, pf;
-	for (f=0; f<nprintfields; f++) {
-		pf = printfields[f];
-		if (f)
-			printf(" ");
-		(fields[pf].print_routine)(type, object);
-	}
-	printf("\n");
-}
-
-/* Field-specific print routines */
-
-extern void print_cputime(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char outbuf[FORMAT_STRING_SIZE];
-	char buf1[FORMAT_STRING_SIZE];
-	char buf2[FORMAT_STRING_SIZE];
-	char buf3[FORMAT_STRING_SIZE];
-	sacct_t sacct;
-	char *nodes = NULL;
-	uint32_t pos;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-37s", "MinCPUtime/Node:Task - Ave");
-		break;
-	case UNDERSCORE:
-		printf("%-37s", "-------------------------------------");
-		break;
-	case JOB:
-		sacct = job->sacct;
-		nodes = job->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		_elapsed_time((int)sacct.min_cpu, 0, buf1);
-		if(job->track_steps)
-			snprintf(outbuf, FORMAT_STRING_SIZE, 
-				 "%s/- - -", buf1);
-		else {
-			_elapsed_time((int)sacct.ave_cpu, 0, buf2);
-			find_hostname(pos, nodes, buf3);
-			snprintf(outbuf, FORMAT_STRING_SIZE, 
-				 "%s/%s:%u - %s", 
-				 buf1,
-				 buf3, 
-				 sacct.min_cpu_id.taskid, 
-				 buf2);
-		}
-		printf("%-37s", outbuf);
-		break;
-	case JOBSTEP:
-		sacct = step->sacct;
-		nodes = step->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		_elapsed_time((int)sacct.min_cpu, 0, buf1);
-		_elapsed_time((int)sacct.ave_cpu, 0, buf2);
-		find_hostname(pos, nodes, buf3);
-		snprintf(outbuf, FORMAT_STRING_SIZE, 
-			 "%s/%s:%u - %s", 
-			 buf1,
-			 buf3, 
-			 sacct.min_cpu_id.taskid, 
-			 buf2);
-		printf("%-37s", outbuf);
-		break;
-	default:
-		printf("%-37s", "n/a");
-		break;
-	} 
-}
-
-extern void print_jobid(type_t type, void *object)
-{
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char outbuf[10];
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-10s", "JobID");
-		break;
-	case UNDERSCORE:
-		printf("%-10s", "----------");
-		break;
-	case JOB:
-		printf("%-10u", job->jobid);
-		break;
-	case JOBCOMP:
-		printf("%-10u", jobcomp->jobid);
-		break;
-	case JOBSTEP:
-		snprintf(outbuf, sizeof(outbuf), "%u.%u",
-			 step->jobid,
-			 step->stepid);
-		printf("%-10s", outbuf);
-		break;
-	default:
-		printf("%-10s", "n/a");
-		break;
-	} 
-
-}
-
-extern void print_ntasks(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-7s", "Ntasks");
-		break;
-	case UNDERSCORE:
-		printf("%-7s", "-------");
-		break;
-	case JOB:
-		printf("%-7u", job->alloc_cpus);
-		break;
-	case JOBSTEP:
-		printf("%-7u", step->ncpus);
-		break;
-	default:
-		printf("%-7s", "n/a");
-		break;
-	} 
-}
-
-extern void print_pages(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char outbuf[FORMAT_STRING_SIZE];
-	char buf1[FORMAT_STRING_SIZE];
-	char buf2[FORMAT_STRING_SIZE];
-	char buf3[FORMAT_STRING_SIZE];
-	sacct_t sacct;
-	char *nodes = NULL;
-	uint32_t pos;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-34s", "MaxPages/Node:Task - Ave");
-		break;
-	case UNDERSCORE:
-		printf("%-34s", "----------------------------------");
-		break;
-	case JOB:
-		sacct = job->sacct;
-		nodes = job->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_pages, 
-				 buf1, sizeof(buf1), UNIT_NONE);
-
-		if(job->track_steps)
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
-		else {
-			convert_num_unit((float)sacct.ave_pages,
-					 buf2, sizeof(buf2), UNIT_NONE);
-			find_hostname(pos, nodes, buf3);
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-				 buf1,
-				 buf3,
-				 sacct.max_pages_id.taskid, 
-				 buf2);
-		}
-		printf("%-34s", outbuf);
-		break;
-	case JOBSTEP:
-		sacct = step->sacct;
-		nodes = step->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_pages, buf1, sizeof(buf1),
-				 UNIT_NONE);
-		convert_num_unit((float)sacct.ave_pages, buf2, sizeof(buf2),
-				 UNIT_NONE);
-		find_hostname(pos, nodes, buf3);
-		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-			 buf1,
-			 buf3,
-			 sacct.max_pages_id.taskid, 
-			 buf2);
-		printf("%-34s", outbuf);
-		break;
-	default:
-		printf("%-34s", "n/a");
-		break;
-	} 
-}
-
-extern void print_rss(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
+	print_field_t *field = NULL;
+	int curr_inx = 1;
 	char outbuf[FORMAT_STRING_SIZE];
-	char buf1[FORMAT_STRING_SIZE];
-	char buf2[FORMAT_STRING_SIZE];
-	char buf3[FORMAT_STRING_SIZE];
-	sacct_t sacct;
-	char *nodes = NULL;
-	uint32_t pos;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-34s", "MaxRSS/Node:Task - Ave");
-		break;
-	case UNDERSCORE:
-		printf("%-34s", "----------------------------------");
-		break;
-	case JOB:
-		sacct = job->sacct;
-		nodes = job->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1),
-				 UNIT_KILO);
 
-		if(job->track_steps)
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
-		else {
-			convert_num_unit((float)sacct.ave_rss, 
-					 buf2, sizeof(buf2), UNIT_KILO);
-			find_hostname(pos, nodes, buf3);
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-				 buf1,
-				 buf3, 
-				 sacct.max_rss_id.taskid, 
-				 buf2);
-		}
-		printf("%-34s", outbuf);
-		break;
-	case JOBSTEP:
-		sacct = step->sacct;
-		nodes = step->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_rss, buf1, sizeof(buf1),
-				 UNIT_KILO);
-		convert_num_unit((float)sacct.ave_rss, buf2, sizeof(buf2),
-				 UNIT_KILO);
-		find_hostname(pos, nodes, buf3);
-		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-			 buf1,
-			 buf3, 
-			 sacct.max_rss_id.taskid, 
-			 buf2);
-		printf("%-34s", outbuf);
-		break;
-	default:
-		printf("%-34s", "n/a");
-		break;
-	} 
-}
+	list_iterator_reset(print_fields_itr);
+	while((field = list_next(print_fields_itr))) {
+		char *tmp_char = NULL;
+		
+		switch(field->type) {
+		case PRINT_AVECPU:
+			
+			tmp_char = _elapsed_time((int)step->sacct.ave_cpu, 0);
+			
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_AVEPAGES:
+			convert_num_unit((float)step->sacct.ave_pages,
+					 outbuf, sizeof(outbuf),
+					 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_AVERSS:
+			convert_num_unit((float)step->sacct.ave_rss,
+					 outbuf, sizeof(outbuf),
+					 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_AVEVSIZE:
+			convert_num_unit((float)step->sacct.ave_vsize,
+					 outbuf, sizeof(outbuf),
+					 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_JOBID:
+			snprintf(outbuf, sizeof(outbuf), "%u.%u",
+				 step->job_ptr->jobid,
+				 step->stepid);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXPAGES:
+			convert_num_unit((float)step->sacct.max_pages,
+					 outbuf, sizeof(outbuf),
+					 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXPAGESNODE:
+			tmp_char = find_hostname(
+					step->sacct.max_pages_id.nodeid,
+					step->nodes);
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MAXPAGESTASK:
+			field->print_routine(field,
+					     step->sacct.max_pages_id.taskid,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXRSS:
+			convert_num_unit((float)step->sacct.max_rss,
+					 outbuf, sizeof(outbuf),
+					 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXRSSNODE:
+			tmp_char = find_hostname(
+					step->sacct.max_rss_id.nodeid,
+					step->nodes);
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MAXRSSTASK:
+			field->print_routine(field,
+					     step->sacct.max_rss_id.taskid,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXVSIZE:
+			convert_num_unit((float)step->sacct.max_vsize,
+					 outbuf, sizeof(outbuf),
+					 UNIT_KILO);
+			
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MAXVSIZENODE:
+			tmp_char = find_hostname(
+					step->sacct.max_vsize_id.nodeid,
+					step->nodes);
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MAXVSIZETASK:
+			field->print_routine(field,
+					     step->sacct.max_vsize_id.taskid,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_MINCPU:
+			tmp_char = _elapsed_time((int)step->sacct.min_cpu, 0);
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MINCPUNODE:
+			tmp_char = find_hostname(
+					step->sacct.min_cpu_id.nodeid,
+					step->nodes);
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_MINCPUTASK:
+			field->print_routine(field,
+					     step->sacct.min_cpu_id.taskid,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_NTASKS:
+			field->print_routine(field,
+					     step->ntasks,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_SYSTEMCPU:
+			tmp_char = _elapsed_time(step->sys_cpu_sec,
+						 step->sys_cpu_usec);
 
-extern void print_state(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobcomp_job_rec_t *jobcomp = (jobcomp_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_TOTALCPU:
+			tmp_char = _elapsed_time(step->tot_cpu_sec, 
+						 step->tot_cpu_usec);
 
-	switch(type) {
-	case HEADLINE:
-		printf("%-20s", "State");
-		break;
-	case UNDERSCORE:
-		printf("%-20s", "--------------------");
-		break;
-	case JOB:
-		if ( job->state == JOB_CANCELLED) {
-			printf ("%-10s by %6d",
-				job_state_string(job->state), job->requid);
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		default:
+			break;
 		}
-		else {
-			printf("%-20s", job_state_string(job->state));
-		}
-		break;
-	case JOBCOMP:
-		printf("%-20s", jobcomp->state);
-		break;
-	case JOBSTEP:
-		if ( step->state == JOB_CANCELLED) {
-			printf ("%-10s by %6d",
-				job_state_string(step->state), step->requid);
-		}
-		else {
-			printf("%-20s", job_state_string(step->state));
-		}
-		break;
-	default:
-		printf("%-20s", "n/a");
-		break;
-	} 
+		curr_inx++;
+	}
+	printf("\n");
 }
 
-extern void print_vsize(type_t type, void *object)
-{ 
-	jobacct_job_rec_t *job = (jobacct_job_rec_t *)object;
-	jobacct_step_rec_t *step = (jobacct_step_rec_t *)object;
-	char outbuf[FORMAT_STRING_SIZE];
-	char buf1[FORMAT_STRING_SIZE];
-	char buf2[FORMAT_STRING_SIZE];
-	char buf3[FORMAT_STRING_SIZE];
-	sacct_t sacct;
-	char *nodes = NULL;
-	uint32_t pos;
-
-	switch(type) {
-	case HEADLINE:
-		printf("%-34s", "MaxVSIZE/Node:Task - Ave");
-		break;
-	case UNDERSCORE:
-		printf("%-34s", "----------------------------------");
-		break;
-	case JOB:
-		sacct = job->sacct;
-		nodes = job->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_vsize, 
-				 buf1, sizeof(buf1),UNIT_KILO);
-		if(job->track_steps)
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/- - -", buf1);
-		else {
-			convert_num_unit((float)sacct.ave_vsize,
-					 buf2, sizeof(buf2), UNIT_KILO);
-			find_hostname(pos, nodes, buf3);
-			snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-				 buf1,
-				 buf3, 
-				 sacct.max_vsize_id.taskid, 
-				 buf2);
-		}
-		printf("%-34s", outbuf);
-		break;
-	case JOBSTEP:
-		sacct = step->sacct;
-		nodes = step->nodes;
-		pos = sacct.min_cpu_id.nodeid;				 
-		convert_num_unit((float)sacct.max_vsize, buf1, sizeof(buf1), 
-				 UNIT_KILO);
-		convert_num_unit((float)sacct.ave_vsize, buf2, sizeof(buf2),
-				 UNIT_KILO);
-		find_hostname(pos, nodes, buf3);
-		snprintf(outbuf, FORMAT_STRING_SIZE, "%s/%s:%u - %s", 
-			 buf1,
-			 buf3, 
-			 sacct.max_vsize_id.taskid, 
-			 buf2);
-		printf("%-34s", outbuf);
-		break;
-	default:
-		printf("%-34s", "n/a");
-		break;
-	} 
-}
diff --git a/src/sstat/process.c b/src/sstat/process.c
index 433433519a9d748c6fe1ca781a449a4c9ab4afe7..df11df8242c1e4c2ea2c189d7444cb03e360c922 100644
--- a/src/sstat/process.c
+++ b/src/sstat/process.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -40,25 +41,22 @@
 #include "sstat.h"
 
 
-void find_hostname(uint32_t pos, char *hosts, char *host)
+char *find_hostname(uint32_t pos, char *hosts)
 {
 	hostlist_t hostlist = NULL;
-	char *temp = NULL;
+	char *temp = NULL, *host = NULL;
 
-	if(pos == (uint32_t)NO_VAL) {
-		snprintf(host, 50, "'N/A'");
-		return;
-	}
+	if(!hosts || (pos == (uint32_t)NO_VAL))
+		return NULL;
+	
 	hostlist = hostlist_create(hosts);
 	temp = hostlist_nth(hostlist, pos);
 	if(temp) {
-		snprintf(host, 50, "%s", temp);
+		host = xstrdup(temp);
 		free(temp);
-	} else {
-		snprintf(host, 50, "'N/A'");
-	}
+	} 
 	hostlist_destroy(hostlist);
-	return;
+	return host;
 }
 
 void aggregate_sacct(sacct_t *dest, sacct_t *from)
diff --git a/src/sstat/sstat.c b/src/sstat/sstat.c
index 38b76ce09f1d7f58f4aa66ef3516c8d05d802f25..6bb5eb17250c080a58ce50bb4f069a0617c21f41 100644
--- a/src/sstat/sstat.c
+++ b/src/sstat/sstat.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -49,39 +50,35 @@ int _do_stat(uint32_t jobid, uint32_t stepid);
  * Globals
  */
 sstat_parameters_t params;
-fields_t fields[] = {{"cputime", print_cputime}, 
-		     {"jobid", print_jobid}, 
-		     {"ntasks", print_ntasks}, 
-		     {"pages", print_pages}, 
-		     {"rss", print_rss},
-		     {"state", print_state}, 
-		     {"vsize", print_vsize}, 
-		     {NULL, NULL}};
+print_field_t fields[] = {
+	{10, "AveCPU", print_fields_str, PRINT_AVECPU}, 
+	{10, "AvePages", print_fields_str, PRINT_AVEPAGES}, 
+	{10, "AveRSS", print_fields_str, PRINT_AVERSS}, 
+	{10, "AveVMSize", print_fields_str, PRINT_AVEVSIZE}, 
+	{10, "JobID", print_fields_str, PRINT_JOBID}, 
+	{8, "MaxPages", print_fields_str, PRINT_MAXPAGES}, 
+	{12, "MaxPagesNode", print_fields_str, PRINT_MAXPAGESNODE}, 
+	{14, "MaxPagesTask", print_fields_int, PRINT_MAXPAGESTASK}, 
+	{10, "MaxRSS", print_fields_str, PRINT_MAXRSS},
+	{10, "MaxRSSNode", print_fields_str, PRINT_MAXRSSNODE},
+	{10, "MaxRSSTask", print_fields_int, PRINT_MAXRSSTASK},
+	{10, "MaxVMSize", print_fields_str, PRINT_MAXVSIZE}, 
+	{14, "MaxVMSizeNode", print_fields_str, PRINT_MAXVSIZENODE}, 
+	{14, "MaxVMSizeTask", print_fields_int, PRINT_MAXVSIZETASK}, 
+	{10, "MinCPU", print_fields_str, PRINT_MINCPU}, 
+	{10, "MinCPUNode", print_fields_str, PRINT_MINCPUNODE}, 
+	{10, "MinCPUTask", print_fields_int, PRINT_MINCPUTASK}, 
+	{8, "NTasks", print_fields_int, PRINT_NTASKS},
+	{10, "SystemCPU", print_fields_str, PRINT_SYSTEMCPU}, 
+	{10, "TotalCPU", print_fields_str, PRINT_TOTALCPU}, 
+	{0, NULL, NULL, 0}};
 
 List jobs = NULL;
+jobacct_job_rec_t job;
 jobacct_step_rec_t step;
-
-int printfields[MAX_PRINTFIELDS],	/* Indexed into fields[] */
-	nprintfields = 0;
-
-void _print_header(void)
-{
-	int	i,j;
-	for (i=0; i<nprintfields; i++) {
-		if (i)
-			printf(" ");
-		j=printfields[i];
-		(fields[j].print_routine)(HEADLINE, 0);
-	}
-	printf("\n");
-	for (i=0; i<nprintfields; i++) {
-		if (i)
-			printf(" ");
-		j=printfields[i];
-		(fields[j].print_routine)(UNDERSCORE, 0);
-	}
-	printf("\n");
-}
+List print_fields_list = NULL;
+ListIterator print_fields_itr = NULL;
+int field_count = 0;
 
 int _sstat_query(slurm_step_layout_t *step_layout, uint32_t job_id,
 		 uint32_t step_id)
@@ -99,12 +96,17 @@ int _sstat_query(slurm_step_layout_t *step_layout, uint32_t job_id,
 	debug("getting the stat of job %d on %d nodes", 
 	      job_id, step_layout->node_cnt);
 
+	memset(&job, 0, sizeof(jobacct_job_rec_t));
+	job.jobid = job_id;
+
+	memset(&step, 0, sizeof(jobacct_step_rec_t));
+	
 	memset(&temp_sacct, 0, sizeof(sacct_t));
 	temp_sacct.min_cpu = (float)NO_VAL;
 	memset(&step.sacct, 0, sizeof(sacct_t));
 	step.sacct.min_cpu = (float)NO_VAL;
 
-	step.jobid = job_id;
+	step.job_ptr = &job;
 	step.stepid = step_id;
 	step.nodes = step_layout->node_list;
 	step.stepname = NULL;
@@ -166,17 +168,12 @@ cleanup:
 		step.sacct.ave_rss /= tot_tasks;
 		step.sacct.ave_vsize /= tot_tasks;
 		step.sacct.ave_pages /= tot_tasks;
+		step.ntasks = tot_tasks;
 	}
 	jobacct_gather_g_destroy(r.jobacct);	
 	return SLURM_SUCCESS;
 }
 
-int _process_results()
-{
-	print_fields(JOBSTEP, &step);
-	return SLURM_SUCCESS;
-}
-
 int _do_stat(uint32_t jobid, uint32_t stepid)
 {
 	slurm_msg_t req_msg;
@@ -219,7 +216,7 @@ int _do_stat(uint32_t jobid, uint32_t stepid)
 
 	_sstat_query(step_layout, jobid, stepid);
 	
-	_process_results();
+	print_fields(&step);
 	
 	slurm_step_layout_destroy(step_layout);	
 	
@@ -232,14 +229,16 @@ int main(int argc, char **argv)
 	uint32_t stepid = 0;
 	jobacct_selected_step_t *selected_step = NULL;
 	
+	print_fields_list = list_create(NULL);
+	print_fields_itr = list_iterator_create(print_fields_list);
+
 	parse_command_line(argc, argv);
 	if(!params.opt_job_list || !list_count(params.opt_job_list)) {
 		error("You didn't give me any jobs to stat.");
 		return 1;
 	}
 
-	if (!params.opt_noheader) 	/* give them something to look */
-		_print_header();/* at while we think...        */
+	print_fields_header(print_fields_list);
 	itr = list_iterator_create(params.opt_job_list);
 	while((selected_step = list_next(itr))) {
 		if(selected_step->stepid != NO_VAL)
@@ -265,8 +264,15 @@ int main(int argc, char **argv)
 		_do_stat(selected_step->jobid, stepid);
 	}
 	list_iterator_destroy(itr);
-		
-	list_destroy(params.opt_job_list);
+
+	xfree(params.opt_field_list);
+	if(params.opt_job_list)	
+		list_destroy(params.opt_job_list);
+
+	if(print_fields_itr)
+		list_iterator_destroy(print_fields_itr);
+	if(print_fields_list)
+		list_destroy(print_fields_list);
 
 	return 0;
 }
diff --git a/src/sstat/sstat.h b/src/sstat/sstat.h
index 21559444bdb73cea1806979c8be5468177c90272..242e803689d9611947a61c5abc425295e3f6e695 100644
--- a/src/sstat/sstat.h
+++ b/src/sstat/sstat.h
@@ -7,10 +7,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -61,10 +62,11 @@
 #include "src/common/slurm_jobacct_gather.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_jobcomp.h"
+#include "src/common/print_fields.h"
 
 #define ERROR 2
 
-#define STAT_FIELDS "jobid,vsize,rss,pages,cputime,ntasks,state"
+#define STAT_FIELDS "jobid,maxvmsize,maxvmsizenode,maxvmsizetask,avevmsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks"
 
 #define BUFFER_SIZE 4096
 #define STATE_COUNT 10
@@ -77,12 +79,29 @@
 
 /* On output, use fields 12-37 from JOB_STEP */
 
-typedef enum {	HEADLINE,
-		UNDERSCORE,
-		JOB,
-		JOBSTEP,
-		JOBCOMP
-} type_t;
+typedef enum {
+		PRINT_AVECPU,
+		PRINT_AVEPAGES,
+		PRINT_AVERSS,
+		PRINT_AVEVSIZE,
+		PRINT_JOBID,
+		PRINT_MAXPAGES,
+		PRINT_MAXPAGESNODE,
+		PRINT_MAXPAGESTASK,
+		PRINT_MAXRSS,
+		PRINT_MAXRSSNODE,
+		PRINT_MAXRSSTASK,
+		PRINT_MAXVSIZE,
+		PRINT_MAXVSIZENODE,
+		PRINT_MAXVSIZETASK,
+		PRINT_MINCPU,
+		PRINT_MINCPUNODE,
+		PRINT_MINCPUTASK,
+		PRINT_NTASKS,
+		PRINT_SYSTEMCPU,
+		PRINT_TOTALCPU,
+} sstat_print_types_t;
+
 
 typedef struct {
 	int opt_all_steps;	/* --allsteps */
@@ -93,13 +112,11 @@ typedef struct {
 	int opt_verbose;	/* --verbose */
 } sstat_parameters_t;
 
-typedef struct fields {
-	char *name;		/* Specified in --fields= */
-	void (*print_routine) ();	/* Who gets to print it? */
-} fields_t;
-
-extern fields_t fields[];
+extern List print_fields_list;
+extern ListIterator print_fields_itr;
+extern print_field_t fields[];
 extern sstat_parameters_t params;
+extern int field_count;
 
 extern List jobs;
 
@@ -107,19 +124,11 @@ extern int printfields[MAX_PRINTFIELDS],	/* Indexed into fields[] */
 	nprintfields;
 
 /* process.c */
-void find_hostname(uint32_t pos, char *hosts, char *host);
+char *find_hostname(uint32_t pos, char *hosts);
 void aggregate_sacct(sacct_t *dest, sacct_t *from);
 
 /* print.c */
-void print_cputime(type_t type, void *object);
-void print_fields(type_t type, void *object);
-void print_jobid(type_t type, void *object);
-void print_ntasks(type_t type, void *object);
-void print_pages(type_t type, void *object);
-void print_rss(type_t type, void *object);
-void print_state(type_t type, void *object);
-void print_vsize(type_t type, void *object);
-
+void print_fields(jobacct_step_rec_t *step);
 
 /* options.c */
 void parse_command_line(int argc, char **argv);
diff --git a/src/strigger/Makefile.in b/src/strigger/Makefile.in
index b95d6ecee5ab472cac0998736175dafb32995607..d62a72bf4801181d8023eb74f51af8440bbf2e73 100644
--- a/src/strigger/Makefile.in
+++ b/src/strigger/Makefile.in
@@ -47,14 +47,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -108,6 +112,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/src/strigger/opts.c b/src/strigger/opts.c
index 499e3e60feb5e389723bad5230b52f105eb1425a..4b4ec563350e8006d88badbc210ce9b606994db7 100644
--- a/src/strigger/opts.c
+++ b/src/strigger/opts.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -367,7 +368,8 @@ Usage: strigger [--set | --get | --clear] [OPTIONS]\n\
   -p, --program=path  pathname of program to execute when triggered\n\
   -r, --reconfig      trigger event on configuration changes\n\
   -t, --time          trigger event on job's time limit\n\
-  -u, --up            trigger event when node returned to service from DOWN state\n\
+  -u, --up            trigger event when node returned to service from DOWN \n\
+                      state\n\
       --user          a user name or ID to filter triggers by\n\
   -v, --verbose       print detailed event logging\n\
   -V, --version       print version information and exit\n\
diff --git a/src/strigger/strigger.c b/src/strigger/strigger.c
index 6371787d564f810c8defa84072652bf47c605492..e2ae18228811b87e2c8196b00b3725ae1efb0d53 100644
--- a/src/strigger/strigger.c
+++ b/src/strigger/strigger.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -98,7 +99,7 @@ static int _clear_trigger(void)
 	trigger_info_t ti;
 	char tmp_c[128];
 
-	bzero(&ti, sizeof(trigger_info_t));
+	memset(&ti, 0, sizeof(trigger_info_t));
 	ti.trig_id	= params.trigger_id;
 	ti.user_id	= params.user_id;
 	if (params.job_id) {
@@ -128,7 +129,7 @@ static int _set_trigger(void)
 	trigger_info_t ti;
 	char tmp_c[128];
 
-	bzero(&ti, sizeof(trigger_info_t));
+	memset(&ti, 0, sizeof(trigger_info_t));
 	if (params.job_id) {
 		ti.res_type = TRIGGER_RES_TYPE_JOB;
 		snprintf(tmp_c, sizeof(tmp_c), "%u", params.job_id);
diff --git a/src/strigger/strigger.h b/src/strigger/strigger.h
index a91c08b0febfff804c5551cbb734ab31aef8fd26..f973da29be7f66733d4d275c7da5b571adf9bc53 100644
--- a/src/strigger/strigger.h
+++ b/src/strigger/strigger.h
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sview/Makefile.am b/src/sview/Makefile.am
index ed216cbb742b5eb0511006e426bbfddced75db29..1a5047c84b297a6bf1a21a7a56e19ebd5439c9e4 100644
--- a/src/sview/Makefile.am
+++ b/src/sview/Makefile.am
@@ -15,7 +15,7 @@ sview_LDADD =					  \
 
 noinst_HEADERS = sview.h
 sview_SOURCES = sview.c popups.c grid.c part_info.c job_info.c \
-	block_info.c node_info.c \
+	block_info.c node_info.c resv_info.c \
 	submit_info.c admin_info.c common.c
 
 force:
@@ -28,7 +28,7 @@ sview_CFLAGS = $(GTK2_CFLAGS)
 else
 
 EXTRA_sview_SOURCES = sview.h sview.c popups.c grid.c part_info.c job_info.c \
-	block_info.c node_info.c \
+	block_info.c node_info.c resv_info.c \
 	submit_info.c admin_info.c common.c
 
 endif
diff --git a/src/sview/Makefile.in b/src/sview/Makefile.in
index 8b76d0d06c15aa0d6ad842bc041b0125f60a4531..f8ae8de486c077cf273f90770f8aa36e77629f7a 100644
--- a/src/sview/Makefile.in
+++ b/src/sview/Makefile.in
@@ -48,14 +48,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -76,20 +80,21 @@ am__installdirs = "$(DESTDIR)$(bindir)"
 binPROGRAMS_INSTALL = $(INSTALL_PROGRAM)
 PROGRAMS = $(bin_PROGRAMS)
 am__sview_SOURCES_DIST = sview.c popups.c grid.c part_info.c \
-	job_info.c block_info.c node_info.c submit_info.c admin_info.c \
-	common.c
+	job_info.c block_info.c node_info.c resv_info.c submit_info.c \
+	admin_info.c common.c
 @HAVE_GTK_TRUE@am_sview_OBJECTS = sview-sview.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-popups.$(OBJEXT) sview-grid.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-part_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-job_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-block_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-node_info.$(OBJEXT) \
+@HAVE_GTK_TRUE@	sview-resv_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-submit_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-admin_info.$(OBJEXT) \
 @HAVE_GTK_TRUE@	sview-common.$(OBJEXT)
 am__EXTRA_sview_SOURCES_DIST = sview.h sview.c popups.c grid.c \
-	part_info.c job_info.c block_info.c node_info.c submit_info.c \
-	admin_info.c common.c
+	part_info.c job_info.c block_info.c node_info.c resv_info.c \
+	submit_info.c admin_info.c common.c
 sview_OBJECTS = $(am_sview_OBJECTS)
 @HAVE_GTK_TRUE@sview_DEPENDENCIES = $(top_builddir)/src/plugins/select/bluegene/block_allocator/libbluegene_block_allocator.la \
 @HAVE_GTK_TRUE@	$(top_builddir)/src/api/libslurm.o
@@ -126,6 +131,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -293,13 +302,13 @@ INCLUDES = -I$(top_srcdir) $(BG_INCLUDES)
 
 @HAVE_GTK_TRUE@noinst_HEADERS = sview.h
 @HAVE_GTK_TRUE@sview_SOURCES = sview.c popups.c grid.c part_info.c job_info.c \
-@HAVE_GTK_TRUE@	block_info.c node_info.c \
+@HAVE_GTK_TRUE@	block_info.c node_info.c resv_info.c \
 @HAVE_GTK_TRUE@	submit_info.c admin_info.c common.c
 
 @HAVE_GTK_TRUE@sview_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(BG_LDFLAGS) $(GTK2_LIBS) 
 @HAVE_GTK_TRUE@sview_CFLAGS = $(GTK2_CFLAGS)
 @HAVE_GTK_FALSE@EXTRA_sview_SOURCES = sview.h sview.c popups.c grid.c part_info.c job_info.c \
-@HAVE_GTK_FALSE@	block_info.c node_info.c \
+@HAVE_GTK_FALSE@	block_info.c node_info.c resv_info.c \
 @HAVE_GTK_FALSE@	submit_info.c admin_info.c common.c
 
 all: all-am
@@ -381,6 +390,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-node_info.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-part_info.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-popups.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-resv_info.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-submit_info.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-sview.Po@am__quote@
 
@@ -503,6 +513,20 @@ sview-node_info.obj: node_info.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -c -o sview-node_info.obj `if test -f 'node_info.c'; then $(CYGPATH_W) 'node_info.c'; else $(CYGPATH_W) '$(srcdir)/node_info.c'; fi`
 
+sview-resv_info.o: resv_info.c
+@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -MT sview-resv_info.o -MD -MP -MF $(DEPDIR)/sview-resv_info.Tpo -c -o sview-resv_info.o `test -f 'resv_info.c' || echo '$(srcdir)/'`resv_info.c
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/sview-resv_info.Tpo $(DEPDIR)/sview-resv_info.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='resv_info.c' object='sview-resv_info.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -c -o sview-resv_info.o `test -f 'resv_info.c' || echo '$(srcdir)/'`resv_info.c
+
+sview-resv_info.obj: resv_info.c
+@am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -MT sview-resv_info.obj -MD -MP -MF $(DEPDIR)/sview-resv_info.Tpo -c -o sview-resv_info.obj `if test -f 'resv_info.c'; then $(CYGPATH_W) 'resv_info.c'; else $(CYGPATH_W) '$(srcdir)/resv_info.c'; fi`
+@am__fastdepCC_TRUE@	mv -f $(DEPDIR)/sview-resv_info.Tpo $(DEPDIR)/sview-resv_info.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	source='resv_info.c' object='sview-resv_info.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -c -o sview-resv_info.obj `if test -f 'resv_info.c'; then $(CYGPATH_W) 'resv_info.c'; else $(CYGPATH_W) '$(srcdir)/resv_info.c'; fi`
+
 sview-submit_info.o: submit_info.c
 @am__fastdepCC_TRUE@	$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -MT sview-submit_info.o -MD -MP -MF $(DEPDIR)/sview-submit_info.Tpo -c -o sview-submit_info.o `test -f 'submit_info.c' || echo '$(srcdir)/'`submit_info.c
 @am__fastdepCC_TRUE@	mv -f $(DEPDIR)/sview-submit_info.Tpo $(DEPDIR)/sview-submit_info.Po
diff --git a/src/sview/admin_info.c b/src/sview/admin_info.c
index cb1cb51ffcffe6c586e4f894118871dd48a71a9f..8262aa7175d948c3f453c313a34aeeae32aa9529 100644
--- a/src/sview/admin_info.c
+++ b/src/sview/admin_info.c
@@ -7,10 +7,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sview/block_info.c b/src/sview/block_info.c
index 1ae5873e830ee625a5ac7a0a5f92f2194b9f674e..0ad3790eef0666dd7c5b1cfc2d286132b108738f 100644
--- a/src/sview/block_info.c
+++ b/src/sview/block_info.c
@@ -7,10 +7,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  * 
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -76,6 +77,7 @@ enum {
 #ifdef HAVE_BGL
 	SORTID_USE,
 #endif
+	SORTID_NODE_INX,
 	SORTID_USER,
 	SORTID_CNT
 };
@@ -117,6 +119,8 @@ static display_data_t display_data_block[] = {
 #endif
 	{G_TYPE_STRING, SORTID_MLOADERIMAGE, "Mloader Image",
 	 FALSE, EDIT_NONE, refresh_block, create_model_block, admin_edit_block},
+	{G_TYPE_POINTER, SORTID_NODE_INX,  NULL, FALSE, EDIT_NONE, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_INT, SORTID_UPDATED, NULL, FALSE, EDIT_NONE, refresh_block,
 	 create_model_block, admin_edit_block},
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
@@ -133,6 +137,7 @@ static display_data_t options_data_block[] = {
 	{G_TYPE_STRING, PART_PAGE, "Partition", TRUE, BLOCK_PAGE},
 	{G_TYPE_STRING, NODE_PAGE, "Base Partitions", TRUE, BLOCK_PAGE},
 	{G_TYPE_STRING, SUBMIT_PAGE, "Job Submit", FALSE, BLOCK_PAGE},
+	{G_TYPE_STRING, RESV_PAGE, "Reservation", TRUE, BLOCK_PAGE},
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
 
@@ -336,6 +341,9 @@ static void _update_block_record(sview_block_info_t *block_ptr,
 	gtk_tree_store_set(treestore, iter, SORTID_NODELIST,
 			   block_ptr->nodes, -1);
 
+	gtk_tree_store_set(treestore, iter, 
+			   SORTID_NODE_INX, block_ptr->bp_inx, -1);
+
 #ifdef HAVE_BGL
 	gtk_tree_store_set(treestore, iter, SORTID_BLRTSIMAGE,
 			   block_ptr->blrtsimage, -1);
@@ -444,6 +452,27 @@ static void _update_info_block(List block_list,
 	remove_old(model, SORTID_UPDATED);
 }
 
+static int _sview_block_sort_aval_dec(sview_block_info_t* rec_a,
+				      sview_block_info_t* rec_b)
+{
+	int size_a = rec_a->node_cnt;
+	int size_b = rec_b->node_cnt;
+
+	if (size_a > size_b)
+		return -1;
+	else if (size_a < size_b)
+		return 1;
+
+	if(rec_a->nodes && rec_b->nodes) {
+		size_a = strcmp(rec_a->nodes, rec_b->nodes);
+		if (size_a > 0)
+			return -1;
+		else if (size_a < 0)
+			return 1;
+	}
+	return 0;
+}
+
 static List _create_block_list(partition_info_msg_t *part_info_ptr,
 			       node_select_info_msg_t *node_select_ptr,
 			       int changed)
@@ -457,7 +486,7 @@ static List _create_block_list(partition_info_msg_t *part_info_ptr,
 	if(!changed && block_list) {
 		return block_list;
 	}
-	
+
 	if(block_list) {
 		list_destroy(block_list);
 	}
@@ -518,12 +547,16 @@ static List _create_block_list(partition_info_msg_t *part_info_ptr,
 				break;
 			}
 		}
-		if(block_ptr->bg_conn_type == SELECT_SMALL)
+		if(block_ptr->bg_conn_type >= SELECT_SMALL)
 			block_ptr->size = 0;
 
 		list_append(block_list, block_ptr);
 	}
 	
+	list_sort(block_list,
+		  (ListCmpF)_sview_block_sort_aval_dec);
+
+
 	return block_list;
 }
 
@@ -566,7 +599,7 @@ need_refresh:
 				change_grid_color(
 					popup_win->grid_button_list,
 					block_ptr->bp_inx[j],
-					block_ptr->bp_inx[j+1], i);
+					block_ptr->bp_inx[j+1], i, true);
 				j += 2;
 			}
 			_layout_block_record(treeview, block_ptr, update);
@@ -627,6 +660,8 @@ extern int get_new_info_node_select(node_select_info_msg_t **node_select_ptr,
 	static bool changed = 0;
 		
 	if(!force && ((now - last) < global_sleep_time)) {
+		if(*node_select_ptr != bg_info_ptr)
+			error_code = SLURM_SUCCESS;
 		*node_select_ptr = bg_info_ptr;
 		if(changed) 
 			return SLURM_SUCCESS;
@@ -651,6 +686,10 @@ extern int get_new_info_node_select(node_select_info_msg_t **node_select_ptr,
 	}
 
 	bg_info_ptr = new_bg_ptr;
+
+	if(*node_select_ptr != bg_info_ptr) 
+		error_code = SLURM_SUCCESS;
+	
 	*node_select_ptr = new_bg_ptr;
 #endif
 	return error_code;
@@ -839,10 +878,11 @@ extern void get_info_block(GtkTable *table, display_data_t *display_data)
 
 	if((block_error_code = get_new_info_node_select(&node_select_ptr, 
 							force_refresh))
-	   == SLURM_NO_CHANGE_IN_DATA) { 
+	   == SLURM_NO_CHANGE_IN_DATA) {
 		if((!display_widget || view == ERROR_VIEW) 
-		   || (part_error_code != SLURM_NO_CHANGE_IN_DATA))
+		   || (part_error_code != SLURM_NO_CHANGE_IN_DATA)) {
 			goto display_it;
+		}
 		changed = 0;
 	} else if (block_error_code != SLURM_SUCCESS) {
 		if(view == ERROR_VIEW)
@@ -879,7 +919,7 @@ display_it:
 						  bp_inx[j],
 						  sview_block_info_ptr->
 						  bp_inx[j+1],
-						  i);
+						  i, true);
 			j += 2;
 		}
 		i++;
@@ -926,9 +966,7 @@ extern void specific_info_block(popup_info_t *popup_win)
 	int changed = 1;
 	sview_block_info_t *block_ptr = NULL;
 	int j=0, i=-1;
-	char *host = NULL, *host2 = NULL;
-	hostlist_t hostlist = NULL;
-	int found = 0;
+	hostset_t hostset = NULL;
 	ListIterator itr = NULL;
 	
 	if(!spec_info->display_widget) {
@@ -1015,11 +1053,8 @@ display_it:
 				 popup_win->display_data, SORTID_CNT);
 	}
 
-	if(!popup_win->grid_button_list) {
-		popup_win->grid_button_list = copy_main_button_list();
-		put_buttons_in_table(popup_win->grid_table,
-				     popup_win->grid_button_list);
-	}
+	setup_popup_grid_list(popup_win);
+
 	spec_info->view = INFO_VIEW;
 	if(spec_info->type == INFO_PAGE) {
 		_display_info_block(block_list, popup_win);
@@ -1039,29 +1074,18 @@ display_it:
 				  search_info->gchar_data)) 
 				continue;
 			break;
+		case RESV_PAGE:
 		case NODE_PAGE:
 			if(!block_ptr->nodes)
 				continue;
 			
-			hostlist = hostlist_create(search_info->gchar_data);
-			host = hostlist_shift(hostlist);
-			hostlist_destroy(hostlist);
-			if(!host) 
+			if(!(hostset = hostset_create(search_info->gchar_data)))
 				continue;
-
-			hostlist = hostlist_create(block_ptr->nodes);
-			found = 0;
-			while((host2 = hostlist_shift(hostlist))) { 
-				if(!strcmp(host, host2)) {
-					free(host2);
-					found = 1;
-					break; 
-				}
-				free(host2);
-			}
-			hostlist_destroy(hostlist);
-			if(!found)
+			if(!hostset_intersects(hostset, block_ptr->nodes)) {
+				hostset_destroy(hostset);
 				continue;
+			}
+			hostset_destroy(hostset);				
 			break;
 		case BLOCK_PAGE:
 			switch(search_info->search_type) {
@@ -1107,7 +1131,7 @@ display_it:
 			change_grid_color(
 				popup_win->grid_button_list,
 				block_ptr->bp_inx[j],
-				block_ptr->bp_inx[j+1], i);
+				block_ptr->bp_inx[j+1], i, false);
 			j += 2;
 		}
 	}
@@ -1153,6 +1177,7 @@ extern void popup_all_block(GtkTreeModel *model, GtkTreeIter *iter, int id)
 	int i=0;
 
 	gtk_tree_model_get(model, iter, SORTID_BLOCK, &name, -1);
+
 	switch(id) {
 	case JOB_PAGE:
 		snprintf(title, 100, "Jobs(s) in block %s", name);
@@ -1160,6 +1185,10 @@ extern void popup_all_block(GtkTreeModel *model, GtkTreeIter *iter, int id)
 	case PART_PAGE:
 		snprintf(title, 100, "Partition(s) containing block %s", name);
 		break;
+	case RESV_PAGE:
+		snprintf(title, 100, "Reservations(s) containing block %s",
+			 name);
+		break;
 	case NODE_PAGE:
 		snprintf(title, 100, "Base Partition(s) in block %s", name);
 		break;
@@ -1192,6 +1221,14 @@ extern void popup_all_block(GtkTreeModel *model, GtkTreeIter *iter, int id)
 		gtk_window_present(GTK_WINDOW(popup_win->popup));
 		return;
 	}
+
+	/* Pass the model and the structs from the iter so we can always get
+	   the current node_inx.
+	*/
+	popup_win->model = model;
+	popup_win->iter = *iter;
+	popup_win->node_inx_id = SORTID_NODE_INX;
+
 	switch(id) {
 	case JOB_PAGE:
 		popup_win->spec_info->search_info->gchar_data = name;
@@ -1201,6 +1238,7 @@ extern void popup_all_block(GtkTreeModel *model, GtkTreeIter *iter, int id)
 		gtk_tree_model_get(model, iter, SORTID_PARTITION, &name, -1);
 		popup_win->spec_info->search_info->gchar_data = name;
 		break;
+	case RESV_PAGE: 
 	case NODE_PAGE: 
 		g_free(name);
 		gtk_tree_model_get(model, iter, SORTID_NODELIST, &name, -1);
diff --git a/src/sview/common.c b/src/sview/common.c
index 65bba164889c56bc5357bf372ece78b852407bb3..b8b5e983428b2569ec2ee18b2fced3a45d8bf94c 100644
--- a/src/sview/common.c
+++ b/src/sview/common.c
@@ -1,14 +1,16 @@
 /*****************************************************************************\
  *  common.c - common functions used by tabs in sview
  *****************************************************************************
- *  Copyright (C) 2004-2006 The Regents of the University of California.
+ *  Copyright (C) 2004-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -165,6 +167,67 @@ cleanup:
 	return ret;
 }
 
+/* Make a BlueGene node name into a numeric representation of 
+ * its location. 
+ * Value is low_coordinate * 1,000,000 + 
+ *          high_coordinate * 1,000 + I/O node (999 if none)
+ * (e.g. bg123[4] -> 123,123,004, bg[234x235] -> 234,235,999)
+ */
+static int _bp_coordinate(const char *name)
+{
+	int i, io_val = 999, low_val = -1, high_val;
+
+	for (i=0; name[i]; i++) {
+		if (name[i] == '[') {
+			i++;
+			if (low_val < 0) {
+				char *end_ptr;
+				low_val = strtol(name+i, &end_ptr, 10);
+				if ((end_ptr[0] != '\0') &&
+				    (isdigit(end_ptr[1])))
+					high_val = atoi(end_ptr + 1);
+				else
+					high_val = low_val;
+			} else
+				io_val = atoi(name+i);
+			break;
+		} else if ((low_val < 0) && (isdigit(name[i])))
+			low_val = high_val = atoi(name+i);
+	}
+
+	if (low_val < 0)
+		return low_val;
+	return ((low_val * 1000000) + (high_val * 1000) + io_val);
+}
+
+static int _sort_iter_compare_func_bp_list(GtkTreeModel *model,
+					   GtkTreeIter  *a,
+					   GtkTreeIter  *b,
+					   gpointer      userdata)
+{
+	int sortcol = GPOINTER_TO_INT(userdata);
+	int ret = 0;
+	gchar *name1 = NULL, *name2 = NULL;
+	
+	gtk_tree_model_get(model, a, sortcol, &name1, -1);
+	gtk_tree_model_get(model, b, sortcol, &name2, -1);
+	
+	if ((name1 == NULL) || (name2 == NULL)) {
+		if ((name1 == NULL) && (name2 == NULL))
+			goto cleanup; /* both equal => ret = 0 */
+		
+		ret = (name1 == NULL) ? -1 : 1;
+	} else {
+		/* Sort in numeric order based upon coordinates */
+		ret = _bp_coordinate(name1) - _bp_coordinate(name2);
+	}
+cleanup:
+	g_free(name1);
+	g_free(name2);
+	
+	return ret;
+}
+
 static void _editing_started(GtkCellRenderer *cell,
 			     GtkCellEditable *editable,
 			     const gchar     *path,
@@ -287,6 +350,10 @@ static void _selected_page(GtkMenuItem *menuitem,
 		popup_all_block(treedata->model, &treedata->iter, 
 				display_data->id);
 		break;
+	case RESV_PAGE: 
+		popup_all_resv(treedata->model, &treedata->iter, 
+			       display_data->id);
+		break;
 	case ADMIN_PAGE:
 		switch(display_data->id) {
 		case JOB_PAGE:
@@ -301,6 +368,10 @@ static void _selected_page(GtkMenuItem *menuitem,
 			admin_block(treedata->model, &treedata->iter, 
 				    display_data->name);
 			break;
+		case RESV_PAGE:
+			admin_resv(treedata->model, &treedata->iter, 
+				   display_data->name);
+			break;
 		case NODE_PAGE:
 			admin_node(treedata->model, &treedata->iter, 
 				   display_data->name);
@@ -591,7 +662,7 @@ extern GtkTreeStore *create_treestore(GtkTreeView *tree_view,
 	
 	treestore = gtk_tree_store_newv(count, types);
 	if(!treestore) {
-		g_error("Can't create treestore.\n");
+		g_print("Can't create treestore.\n");
 		return NULL;
 	}
 	
@@ -612,7 +683,9 @@ extern GtkTreeStore *create_treestore(GtkTreeView *tree_view,
 			
 			break;
 		case G_TYPE_STRING:
-			if(!strcasecmp(display_data[i].name, "Nodes")) {
+			if(!strcasecmp(display_data[i].name, "Nodes")
+			   || !strcasecmp(display_data[i].name, "Real Memory")
+			   || !strcasecmp(display_data[i].name, "Tmp Disk")) {
 				gtk_tree_sortable_set_sort_func(
 					GTK_TREE_SORTABLE(treestore), 
 					display_data[i].id, 
@@ -620,6 +693,15 @@ extern GtkTreeStore *create_treestore(GtkTreeView *tree_view,
 					GINT_TO_POINTER(display_data[i].id), 
 					NULL); 
 				break;
+			} else if(!strcasecmp(display_data[i].name,
+					      "BP List")) {
+				gtk_tree_sortable_set_sort_func(
+					GTK_TREE_SORTABLE(treestore), 
+					display_data[i].id, 
+					_sort_iter_compare_func_bp_list,
+					GINT_TO_POINTER(display_data[i].id), 
+					NULL); 
+				break;
 			} else {
 				gtk_tree_sortable_set_sort_func(
 					GTK_TREE_SORTABLE(treestore), 
@@ -702,7 +784,7 @@ extern popup_info_t *create_popup_info(int type, int dest_type, char *title)
 	GtkWidget *label = NULL;
 	GtkWidget *table = NULL;
 	popup_info_t *popup_win = xmalloc(sizeof(popup_info_t));
-	
+
 	list_push(popup_list, popup_win);
 	
 	popup_win->spec_info = xmalloc(sizeof(specific_info_t));
@@ -712,7 +794,7 @@ extern popup_info_t *create_popup_info(int type, int dest_type, char *title)
 	popup_win->spec_info->search_info->gchar_data = NULL;
 	popup_win->spec_info->search_info->int_data = NO_VAL;
 	popup_win->spec_info->search_info->int_data2 = NO_VAL;
-	
+
 	popup_win->spec_info->type = type;
 	popup_win->spec_info->title = xstrdup(title);
 	popup_win->popup = gtk_dialog_new_with_buttons(
@@ -760,17 +842,9 @@ extern popup_info_t *create_popup_info(int type, int dest_type, char *title)
 	bin = GTK_BIN(&view->bin);
 	popup_win->grid_table = GTK_TABLE(bin->child);
 	popup_win->grid_button_list = NULL;
-#ifdef HAVE_BG
-	if(dest_type != NODE_PAGE || type != INFO_PAGE) {
-//	gtk_widget_set_size_request(GTK_WIDGET(window), 164, -1);
-		popup_win->grid_button_list = copy_main_button_list();
-		put_buttons_in_table(popup_win->grid_table,
-				     popup_win->grid_button_list);
-	}
-#endif
 
 	table = gtk_table_new(1, 2, FALSE);
-
+	
 	gtk_table_attach(GTK_TABLE(table), GTK_WIDGET(window), 0, 1, 0, 1,
 			 GTK_SHRINK, GTK_EXPAND | GTK_FILL,
 			 0, 0);
@@ -935,6 +1009,9 @@ extern void *popup_thr(popup_info_t *popup_win)
 	case BLOCK_PAGE: 
 		specifc_info = specific_info_block;
 		break;
+	case RESV_PAGE: 
+		specifc_info = specific_info_resv;
+		break;
 	case SUBMIT_PAGE: 
 	default:
 		g_print("thread got unknown type %d\n", popup_win->type);
diff --git a/src/sview/grid.c b/src/sview/grid.c
index d8df368987b8dd5a4e25371d283ea39c6f23bf5f..663de187a794934a24d3d921c475afa103420d99 100644
--- a/src/sview/grid.c
+++ b/src/sview/grid.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -37,6 +38,10 @@
 \*****************************************************************************/
 #include "sview.h"
 
+#ifdef HAVE_BG
+#include "src/plugins/select/bluegene/plugin/bluegene.h"
+#endif
+
 List grid_button_list = NULL;
 List blinking_button_list = NULL;
 
@@ -45,6 +50,8 @@ char *sview_colors[] = {"#0000FF", "#00FF00", "#00FFFF", "#FFFF00",
 			"#715627", "#6A8CA2", "#4C7127", "#25B9B9",
 			"#A020F0", "#8293ED", "#FFA500", "#FFC0CB",
 			"#8B6914", "#18A24E", "#F827FC", "#B8A40C"};
+char *blank_color = "#919191";
+
 int sview_colors_cnt = 20;
 
 GStaticMutex blinking_mutex = G_STATIC_MUTEX_INIT;
@@ -167,6 +174,7 @@ void _put_button_as_down(grid_button_t *grid_button, int state)
 	GdkColor color;
 
 	if(GTK_IS_EVENT_BOX(grid_button->button)) {
+		//gtk_widget_set_sensitive (grid_button->button, TRUE);
 		return;
 	}
 	gtk_widget_destroy(grid_button->button);		
@@ -212,6 +220,7 @@ void _put_button_as_down(grid_button_t *grid_button, int state)
 void _put_button_as_up(grid_button_t *grid_button)
 {
 	if(GTK_IS_BUTTON(grid_button->button)) {
+		//gtk_widget_set_sensitive (grid_button->button, TRUE);
 		return;
 	}
 	gtk_widget_destroy(grid_button->button);		
@@ -237,6 +246,37 @@ void _put_button_as_up(grid_button_t *grid_button)
 	return;
 }
 
+void _put_button_as_inactive(grid_button_t *grid_button)
+{
+	if(GTK_IS_BUTTON(grid_button->button)) {
+		//gtk_widget_set_sensitive (grid_button->button, FALSE);
+		return;
+	}
+	gtk_widget_destroy(grid_button->button);		
+	grid_button->button = gtk_button_new();
+	gtk_widget_set_size_request(grid_button->button, 10, 10);
+	//gtk_widget_set_sensitive (grid_button->button, FALSE);
+
+	gtk_tooltips_set_tip(grid_button->tip,
+			     grid_button->button,
+			     grid_button->node_name,
+			     "click for node stats");
+	g_signal_connect(G_OBJECT(grid_button->button), 
+			 "button-press-event",
+			 G_CALLBACK(_open_node),
+			 grid_button);
+	if(grid_button->table) 
+		gtk_table_attach(grid_button->table, grid_button->button,
+				 grid_button->table_x,
+				 (grid_button->table_x+1), 
+				 grid_button->table_y,
+				 (grid_button->table_y+1),
+				 GTK_SHRINK, GTK_SHRINK,
+				 1, 1);
+	gtk_widget_show_all(grid_button->button);
+	return;
+}
+
 #ifdef HAVE_BG
 static int _block_in_node(int *bp_inx, int inx)
 {
@@ -281,14 +321,20 @@ extern grid_button_t *create_grid_button_from_another(
 		return NULL;
 	if(color_inx >= 0)
 		color_inx %= sview_colors_cnt;
-			
+       			
 	send_grid_button = xmalloc(sizeof(grid_button_t));
 	memcpy(send_grid_button, grid_button, sizeof(grid_button_t));
 	node_base_state = send_grid_button->state & NODE_STATE_BASE;
 	/* need to set the table to empty because we will want to fill
 	   this into the new table later */
 	send_grid_button->table = NULL;
-	if((color_inx >= 0) && node_base_state == NODE_STATE_DOWN) {
+	if(color_inx == MAKE_BLACK) {
+		send_grid_button->button = gtk_button_new();
+		//gtk_widget_set_sensitive (send_grid_button->button, FALSE);
+		gdk_color_parse(blank_color, &color);
+		gtk_widget_modify_bg(send_grid_button->button, 
+				     GTK_STATE_NORMAL, &color);
+	} else if((color_inx >= 0) && node_base_state == NODE_STATE_DOWN) {
 		GtkWidget *image = gtk_image_new_from_stock(
 			GTK_STOCK_CANCEL,
 			GTK_ICON_SIZE_SMALL_TOOLBAR);
@@ -349,8 +395,9 @@ extern grid_button_t *create_grid_button_from_another(
 	return send_grid_button;
 }
 
+/* start == -1 for all */
 extern char *change_grid_color(List button_list, int start, int end,
-			       int color_inx)
+			       int color_inx, bool change_unused)
 {
 	ListIterator itr = NULL;
 	grid_button_t *grid_button = NULL;
@@ -360,21 +407,45 @@ extern char *change_grid_color(List button_list, int start, int end,
 	if(!button_list)
 		return NULL;
 
+	if(color_inx >= 0) {
+		color_inx %= sview_colors_cnt;
+		gdk_color_parse(sview_colors[color_inx], &color);
+	} else if(color_inx == MAKE_BLACK) 
+		gdk_color_parse(blank_color, &color);
+	else 
+		gdk_color_parse("#FFFFFF", &color);
+
 	itr = list_iterator_create(button_list);
-	color_inx %= sview_colors_cnt;
-	gdk_color_parse(sview_colors[color_inx], &color);
 	while((grid_button = list_next(itr))) {
-		if ((grid_button->inx < start)
-		    ||  (grid_button->inx > end)) 
+		if(start != -1)
+			if ((grid_button->inx < start)
+			    ||  (grid_button->inx > end)) 
+				continue;
+		
+		if(!change_unused && !grid_button->used)
 			continue;
+
+		if(color_inx == MAKE_BLACK) {
+			_put_button_as_inactive(grid_button);
+			grid_button->color = blank_color;
+			gtk_widget_modify_bg(grid_button->button, 
+					     GTK_STATE_NORMAL, &color);
+
+			continue;
+		}
+
 		node_base_state = grid_button->state & NODE_STATE_BASE;
+	
 		if (node_base_state == NODE_STATE_DOWN) {
 			_put_button_as_down(grid_button, NODE_STATE_DOWN);
 		} else if (grid_button->state & NODE_STATE_DRAIN) {
 			_put_button_as_down(grid_button, NODE_STATE_DRAIN);
 		} else {
 			_put_button_as_up(grid_button);
-			grid_button->color = sview_colors[color_inx];
+			if(color_inx == MAKE_WHITE) 
+				grid_button->color = "#FFFFFF";
+			else
+				grid_button->color = sview_colors[color_inx];
 			gtk_widget_modify_bg(grid_button->button, 
 					     GTK_STATE_NORMAL, &color);
 		}
@@ -383,6 +454,28 @@ extern char *change_grid_color(List button_list, int start, int end,
 	return sview_colors[color_inx];
 }
 
+extern void set_grid_used(List button_list, int start, int end,
+			  bool used)
+{
+	ListIterator itr = NULL;
+	grid_button_t *grid_button = NULL;
+
+	if(!button_list)
+		return;
+
+	itr = list_iterator_create(button_list);
+	while((grid_button = list_next(itr))) {
+		if(start != -1)
+			if ((grid_button->inx < start)
+			    ||  (grid_button->inx > end)) 
+				continue;
+		grid_button->used = used;
+	}
+	list_iterator_destroy(itr);
+
+	return;
+}
+
 extern void get_button_list_from_main(List *button_list, int start, int end,
 				      int color_inx)
 {
@@ -423,7 +516,7 @@ extern void get_button_list_from_main(List *button_list, int start, int end,
 	return;
 }
 
-extern List copy_main_button_list()
+extern List copy_main_button_list(int initial_color)
 {
 	ListIterator itr = NULL;
 	grid_button_t *grid_button = NULL;
@@ -433,12 +526,13 @@ extern List copy_main_button_list()
 	itr = list_iterator_create(grid_button_list);
 	while((grid_button = list_next(itr))) {
 		send_grid_button = create_grid_button_from_another(
-			grid_button, grid_button->node_name, -1);
+			grid_button, grid_button->node_name, initial_color);
 		if(send_grid_button) {
 			g_signal_connect(G_OBJECT(send_grid_button->button),
 					 "button-press-event",
 					 G_CALLBACK(_open_node),
 					 send_grid_button);
+			send_grid_button->used = false;
 			list_append(button_list, send_grid_button);
 		}
 	}
@@ -785,7 +879,7 @@ end_it:
 
 extern void sview_init_grid()
 {
-	node_info_msg_t *node_info_ptr = NULL;
+	static node_info_msg_t *node_info_ptr = NULL;
 	int error_code = SLURM_SUCCESS;
 	node_info_t *node_ptr = NULL;
 	int i = 0;
@@ -794,20 +888,163 @@ extern void sview_init_grid()
 	grid_button_t *grid_button = NULL;
 	GdkColor color;
 
+#ifdef HAVE_BG
+	int bg_error_code = SLURM_SUCCESS;
+	int part_error_code = SLURM_SUCCESS;
+	bg_info_record_t *bg_info_record = NULL;
+	static partition_info_msg_t *part_info_ptr = NULL;
+	static node_select_info_msg_t *node_select_ptr = NULL;
+	int j = 0;
+	static int node_scaling = 0;
+	int alter = 0;
+#endif
+
 	if((error_code = get_new_info_node(&node_info_ptr, force_refresh))
 	   == SLURM_NO_CHANGE_IN_DATA) { 
+#ifdef HAVE_BG
+		goto get_bg;
+#else
+		/* need to clear out old data */
+		sview_reset_grid();
 		return;
+#endif
 	} else if (error_code != SLURM_SUCCESS) {
 		return;
 	}
 
+#ifdef HAVE_BG
+get_bg:
+	if((part_error_code = get_new_info_part(&part_info_ptr, force_refresh))
+	   == SLURM_NO_CHANGE_IN_DATA) { 
+		// just goto the new info node 
+	} else if(part_error_code != SLURM_SUCCESS) {
+		return;
+	}
+
+	if((bg_error_code = get_new_info_node_select(&node_select_ptr, 
+						     force_refresh))
+	   == SLURM_NO_CHANGE_IN_DATA) {
+		if(error_code == SLURM_NO_CHANGE_IN_DATA
+		   && part_error_code == SLURM_NO_CHANGE_IN_DATA) {
+			/* need to clear out old data */
+			sview_reset_grid();
+			return;		
+		}
+	} else if(bg_error_code != SLURM_SUCCESS) {
+		return;
+	}
+
+	node_scaling = part_info_ptr->partition_array[0].node_scaling;
+
+	/* Here we need to reset the nodes off of what the blocks say */
+	for (i=0; i<node_info_ptr->record_count; i++) {
+		node_ptr = &(node_info_ptr->node_array[i]);
+		/* in each node_ptr we overload the threads var
+		 * with the number of cnodes in the used_cpus var
+		 * will be used to tell how many cnodes are
+		 * allocated and the cores will represent the cnodes
+		 * in an error state. So we can get an idle count by
+		 * subtracting those 2 numbers from the total possible
+		 * cnodes (which are the idle cnodes).
+		 */
+		node_ptr->threads = node_scaling;
+		node_ptr->cores = 0;
+		node_ptr->used_cpus = 0;
+		if((node_ptr->node_state & NODE_STATE_BASE) == NODE_STATE_DOWN) 
+			continue;
+
+		if(node_ptr->node_state & NODE_STATE_DRAIN) {
+			if(node_ptr->node_state & NODE_STATE_FAIL) {
+				node_ptr->node_state &= ~NODE_STATE_DRAIN;
+				node_ptr->node_state &= ~NODE_STATE_FAIL;
+			} else {
+				node_ptr->cores += node_scaling;
+			}
+		}
+		node_ptr->node_state |= NODE_STATE_IDLE;
+	}
+
+	for (i=0; i<node_select_ptr->record_count; i++) {
+		bg_info_record = &(node_select_ptr->bg_info_array[i]);
+
+		/* this block is idle we won't mark it */
+		if (bg_info_record->job_running == NO_JOB_RUNNING)
+			continue;
+
+		if(bg_info_record->conn_type == SELECT_SMALL) 
+			alter = bg_info_record->node_cnt;
+		else
+			alter = node_scaling;
+
+/* 		g_print("Got here for %s with %d and %d\n", */
+/* 			bg_info_record->bg_block_id, */
+/* 			bg_info_record->state, */
+/* 			bg_info_record->job_running); */
+		/*adjust the drained or error blocks and jobs running
+		  on other blocks as explained below. */
+		j = 0;
+		while(bg_info_record->bp_inx[j] >= 0) {
+			int i2 = 0;
+			for(i2 = bg_info_record->bp_inx[j];
+			    i2 <= bg_info_record->bp_inx[j+1];
+			    i2++) {
+				node_ptr = &(node_info_ptr->node_array[i2]);
+				/* cores is overloaded to be the
+				 * cnodes in an error state and
+				 * used_cpus is overloaded to be the nodes in
+				 * use.  No block should be sent in
+				 * here if it isn't in use (that
+				 * doesn't mean in a free state, it means
+				 * the user isn't slurm or the block 
+				 * is in an error state.  
+				 */
+				if((node_ptr->node_state & NODE_STATE_BASE) 
+				   == NODE_STATE_DOWN) 
+					continue;
+				
+				if(bg_info_record->state
+				   == RM_PARTITION_ERROR) {
+					node_ptr->cores += alter;
+					node_ptr->node_state 
+						|= NODE_STATE_DRAIN;
+					node_ptr->node_state
+						|= NODE_STATE_FAIL;
+				} else if(bg_info_record->job_running
+					  > NO_JOB_RUNNING)  
+					node_ptr->used_cpus += alter;
+				else 
+					g_print("Hey we didn't get anything "
+						"here\n");
+			}
+			j += 2;
+		}
+	}
+	
+	/* now set up the extra nodes with the correct
+	   information */
+	/* for (i=0; i<node_info_ptr->record_count; i++) { */
+/* 		node_ptr = &(node_info_ptr->node_array[i]); */
+		
+/* 		if((node_ptr->node_state & NODE_STATE_BASE) == NODE_STATE_DOWN)  */
+/* 			continue; */
+		
+/* 		/\* get the error node count *\/ */
+/* 		if(!node_ptr->cores) { */
+/* 			node_ptr->node_state &= ~NODE_STATE_DRAIN; */
+/* 			continue; */
+/* 		} */
+/* 		/\* just to get this on all the charts we will drain it */
+/* 		   here.  This must be removed in the part info *\/ */
+/* 		node_ptr->node_state |= NODE_STATE_DRAIN; */
+/* 	} */
+#endif
+
 	if(!grid_button_list) {
 		g_print("you need to run get_system_stats() first\n");
 		exit(0);
 	}
 	
 	gdk_color_parse("white", &color);
-	
 	itr = list_iterator_create(grid_button_list);
 	for(i=0; i<node_info_ptr->record_count; i++) {
 		node_ptr = &node_info_ptr->node_array[i];
@@ -861,3 +1098,46 @@ extern void sview_reset_grid()
 	}
 	list_iterator_destroy(itr);
 }
+
+/* clear the grid */
+extern void setup_popup_grid_list(popup_info_t *popup_win)
+{
+	int def_color = MAKE_BLACK;
+
+	if(!popup_win->model) 
+		def_color = MAKE_WHITE;
+
+	if(popup_win->grid_button_list) {
+		change_grid_color(popup_win->grid_button_list, -1, -1,
+				  def_color, true);
+		set_grid_used(popup_win->grid_button_list, -1, -1, false);
+	} else {	     
+		popup_win->grid_button_list =
+			copy_main_button_list(def_color);
+		put_buttons_in_table(popup_win->grid_table,
+				     popup_win->grid_button_list);
+		popup_win->full_grid = 1;
+	}
+
+	/* refresh the pointer */
+	if(popup_win->model) 
+		gtk_tree_model_get(popup_win->model, &popup_win->iter,
+				   popup_win->node_inx_id,
+				   &popup_win->node_inx, -1);
+
+	if(popup_win->node_inx) {
+		int j=0;
+		while(popup_win->node_inx[j] >= 0) {
+			set_grid_used(popup_win->grid_button_list,
+				      popup_win->node_inx[j],
+				      popup_win->node_inx[j+1], true);
+			change_grid_color(
+				popup_win->grid_button_list,
+				popup_win->node_inx[j],
+				popup_win->node_inx[j+1], MAKE_WHITE, true);
+			j += 2;
+		}
+	} else
+		set_grid_used(popup_win->grid_button_list, -1, -1, true);	
+
+}
diff --git a/src/sview/job_info.c b/src/sview/job_info.c
index d6b863331953334a4c641eaeb763f334339f03c8..8e01556150c846ad1fd559f1af84d06126c0913c 100644
--- a/src/sview/job_info.c
+++ b/src/sview/job_info.c
@@ -3,14 +3,15 @@
  *  mode of sview.
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394. 
+ *  CODE-OCEC-09-009. All rights reserved. 
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -62,6 +63,9 @@ enum {
 	SORTID_ACTION,
 	SORTID_ALLOC, 
 	SORTID_ALLOC_NODE,
+#ifdef HAVE_CRAY_XT
+	SORTID_ALPS_RESV_ID,
+#endif
 	SORTID_BATCH,
 #ifdef HAVE_BG
 	SORTID_BLRTSIMAGE,
@@ -122,6 +126,7 @@ enum {
 	SORTID_REASON,
 	SORTID_REQ_NODELIST,
 	SORTID_REQ_PROCS,
+	SORTID_RESV_NAME,
 #ifdef HAVE_BG
 	SORTID_ROTATE,
 #endif
@@ -138,9 +143,10 @@ enum {
 	SORTID_TIME,
 	SORTID_TIMELIMIT,
 	SORTID_TMP_DISK,
-	SORTID_WCKEY,
 	SORTID_UPDATED,
 	SORTID_USER, 
+	SORTID_WCKEY,
+	SORTID_NODE_INX, 
 	SORTID_CNT
 };
 
@@ -179,6 +185,10 @@ static display_data_t display_data_job[] = {
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_RAMDISKIMAGE, "Ramdisk Image",
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
+#endif
+#ifdef HAVE_CRAY_XT
+	{G_TYPE_STRING, SORTID_ALPS_RESV_ID, "ALPS Resv ID", TRUE, EDIT_NONE, 
+	 refresh_job, create_model_job, admin_edit_job},
 #endif
 	{G_TYPE_STRING, SORTID_USER, "User", TRUE, EDIT_NONE, refresh_job,
 	 create_model_job, admin_edit_job},
@@ -242,6 +252,8 @@ static display_data_t display_data_job[] = {
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_REQ_PROCS, "Requested Procs", 
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_RESV_NAME, "Reservation Name",
+	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_MIN_NODES, "Min Nodes", 
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_MAX_NODES, "Max Nodes", 
@@ -272,7 +284,7 @@ static display_data_t display_data_job[] = {
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_ACCOUNT, "Account Charged", 
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
-	{G_TYPE_STRING, SORTID_REASON, "Wait Reason", 
+	{G_TYPE_STRING, SORTID_REASON, "Reason Waiting", 
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_FEATURES, "Features", 
 	 FALSE, EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
@@ -290,9 +302,11 @@ static display_data_t display_data_job[] = {
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_COMMENT, "Comment", 
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_POINTER, SORTID_NODE_INX,  NULL, FALSE, EDIT_NONE, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_INT, SORTID_UPDATED, NULL, FALSE, EDIT_NONE, refresh_job,
 	 create_model_job, admin_edit_job},
-	{G_TYPE_NONE, EDIT_NONE, NULL, FALSE, EDIT_NONE}
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
 
 static display_data_t options_data_job[] = {
@@ -309,6 +323,7 @@ static display_data_t options_data_job[] = {
 #else
 	{G_TYPE_STRING, NODE_PAGE, "Nodes", TRUE, JOB_PAGE},
 #endif
+	{G_TYPE_STRING, RESV_PAGE, "Reservation", TRUE, JOB_PAGE},
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
 
@@ -605,7 +620,6 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 			goto return_error;
 		}
 		job_msg->nice = NICE_OFFSET + temp_int;
-		
 		break;
 	case SORTID_REQ_PROCS:
 		temp_int = strtol(new_text, (char **)NULL, 10);
@@ -615,6 +629,10 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 			goto return_error;
 		job_msg->num_procs = (uint32_t)temp_int;
 		break;
+	case SORTID_RESV_NAME:
+		job_msg->reservation = xstrdup(new_text);
+		type = "reservation name";
+		break;
 	case SORTID_MIN_NODES:
 		temp_int = strtol(new_text, (char **)NULL, 10);
 		
@@ -664,27 +682,23 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 		type = "name";
 		break;
 	case SORTID_WCKEY:		
-		xstrfmtcat(job_msg->name, "\"%s", new_text);
+		job_msg->wckey = xstrdup(new_text);
 		type = "wckey";
 		break;
 	case SORTID_SHARED:
-		if (!strcasecmp(new_text, "yes")) {
+		if (!strcasecmp(new_text, "yes")) 
 			job_msg->shared = 1;
-			
-		} else {
+		else 
 			job_msg->shared = 0;
 			
-		}
 		type = "shared";
 		break;
 	case SORTID_CONTIGUOUS:
-		if (!strcasecmp(new_text, "yes")) {
+		if (!strcasecmp(new_text, "yes")) 
 			job_msg->contiguous = 1;
-			
-		} else {
+		else 
 			job_msg->contiguous = 0;
 			
-		}
 		type = "contiguous";	
 		break;
 	case SORTID_REQ_NODELIST:		
@@ -1039,7 +1053,7 @@ static void _layout_job_record(GtkTreeView *treeview,
 			       sview_job_info_t *sview_job_info_ptr, 
 			       int update)
 {
-	char *nodes = NULL, *uname = NULL, *jname = NULL, *wckey = NULL;
+	char *nodes = NULL, *reason = NULL, *uname = NULL;
 	char tmp_char[50];
 	time_t now_time = time(NULL);
 	job_info_t *job_ptr = sview_job_info_ptr->job_ptr;
@@ -1050,23 +1064,6 @@ static void _layout_job_record(GtkTreeView *treeview,
 	GtkTreeStore *treestore = 
 		GTK_TREE_STORE(gtk_tree_view_get_model(treeview));
 
-	if (job_ptr->name && job_ptr->name[0]) {
-		char *temp = NULL;
-		/* first set the jname to the job_ptr->name */
-		jname = xstrdup(job_ptr->name);
-		/* then grep for " since that is the delimiter for
-		   the wckey */
-		if((temp = strchr(jname, '\"'))) {
-			/* if we have a wckey set the " to NULL to
-			 * end the jname */
-			temp[0] = '\0';
-			/* increment and copy the remainder */
-			temp++;
-			wckey = xstrdup(temp);
-		}
-	}
-	
-
 	if(!treestore)
 		return;
 	if(!job_ptr->nodes || !strcasecmp(job_ptr->nodes,"waiting...")) {
@@ -1155,6 +1152,16 @@ static void _layout_job_record(GtkTreeView *treeview,
 					   tmp_char, 
 					   sizeof(tmp_char), 
 					   SELECT_PRINT_BG_ID));
+#endif
+#ifdef HAVE_CRAY_XT
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_job,
+						 SORTID_ALPS_RESV_ID), 
+				   select_g_sprint_jobinfo(
+					   job_ptr->select_jobinfo, 
+					   tmp_char, 
+					   sizeof(tmp_char), 
+					   SELECT_PRINT_RESV_ID));
 #endif
 	uname = uid_to_string((uid_t)job_ptr->user_id);
 	add_display_treestore_line(update, treestore, &iter, 
@@ -1177,12 +1184,12 @@ static void _layout_job_record(GtkTreeView *treeview,
 	add_display_treestore_line(update, treestore, &iter, 
 				   find_col_name(display_data_job,
 						 SORTID_NAME), 
-				   jname);
+				   job_ptr->name);
 	
 	add_display_treestore_line(update, treestore, &iter, 
 				   find_col_name(display_data_job,
 						 SORTID_WCKEY), 
-				   wckey);
+				   job_ptr->wckey);
 	
 	sprintf(tmp_char, "%u", job_ptr->priority);
 	add_display_treestore_line(update, treestore, &iter, 
@@ -1336,6 +1343,16 @@ static void _layout_job_record(GtkTreeView *treeview,
 					   sizeof(tmp_char), 
 					   SELECT_PRINT_RAMDISK_IMAGE));
 	
+#endif
+#ifdef HAVE_CRAY_XT
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_job,
+						 SORTID_ALPS_RESV_ID), 
+				   select_g_sprint_jobinfo(
+					   job_ptr->select_jobinfo, 
+					   tmp_char, 
+					   sizeof(tmp_char), 
+					   SELECT_PRINT_RESV_ID));
 #endif
 
 	if(job_ptr->contiguous)
@@ -1433,10 +1450,13 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_ACCOUNT),
 				   job_ptr->account);
 
+	if (job_ptr->state_desc)
+		reason = job_ptr->state_desc;
+	else
+		reason = job_reason_string(job_ptr->state_reason);
 	add_display_treestore_line(update, treestore, &iter, 
 				   find_col_name(display_data_job,
-						 SORTID_REASON),
-				   job_reason_string(job_ptr->state_reason));
+						 SORTID_REASON), reason);
 
 	add_display_treestore_line(update, treestore, &iter, 
 				   find_col_name(display_data_job,
@@ -1447,15 +1467,13 @@ static void _layout_job_record(GtkTreeView *treeview,
 				   find_col_name(display_data_job,
 						 SORTID_COMMENT),
 				   job_ptr->comment);
-	xfree(jname);
-	xfree(wckey);
 }
 
 static void _update_job_record(sview_job_info_t *sview_job_info_ptr, 
 			       GtkTreeStore *treestore,
 			       GtkTreeIter *iter)
 {
-	char *nodes = NULL, *uname = NULL, *jname = NULL, *wckey = NULL;
+	char *nodes = NULL, *reason = NULL, *uname = NULL;
 	char tmp_char[50];
 	time_t now_time = time(NULL);
 	GtkTreeIter step_iter;
@@ -1464,22 +1482,6 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 	struct group *group_info = NULL;
 	uint16_t term_sig = 0;
      
-	if (job_ptr->name && job_ptr->name[0]) {
-		char *temp = NULL;
-		/* first set the jname to the job_ptr->name */
-		jname = xstrdup(job_ptr->name);
-		/* then grep for " since that is the delimiter for
-		   the wckey */
-		if((temp = strchr(jname, '\"'))) {
-			/* if we have a wckey set the " to NULL to
-			 * end the jname */
-			temp[0] = '\0';
-			/* increment and copy the remainder */
-			temp++;
-			wckey = xstrdup(temp);
-		}
-	}
-	
 	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
 	if(!job_ptr->nodes || !strcasecmp(job_ptr->nodes,"waiting...")) {
 		sprintf(tmp_char,"00:00:00");
@@ -1609,6 +1611,15 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 				   sizeof(tmp_char), 
 				   SELECT_PRINT_RAMDISK_IMAGE), -1);
 	
+#endif
+#ifdef HAVE_CRAY_XT
+	gtk_tree_store_set(treestore, iter, 
+			   SORTID_ALPS_RESV_ID, 
+			   select_g_sprint_jobinfo(
+				   job_ptr->select_jobinfo, 
+				   tmp_char, 
+				   sizeof(tmp_char), 
+				   SELECT_PRINT_RESV_ID), -1);
 #endif
 	uname = uid_to_string((uid_t)job_ptr->user_id);
 	gtk_tree_store_set(treestore, iter, 
@@ -1626,8 +1637,8 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 			   SORTID_GROUP, 
 			   tmp_char, -1);
 		
-	gtk_tree_store_set(treestore, iter, SORTID_NAME, jname, -1);
-	gtk_tree_store_set(treestore, iter, SORTID_WCKEY, wckey, -1);
+	gtk_tree_store_set(treestore, iter, SORTID_NAME, job_ptr->name, -1);
+	gtk_tree_store_set(treestore, iter, SORTID_WCKEY, job_ptr->wckey, -1);
 	gtk_tree_store_set(treestore, iter, 
 			   SORTID_STATE, 
 			   job_state_string(job_ptr->job_state), -1);
@@ -1654,6 +1665,10 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 			   SORTID_NUM_PROCS, tmp_char, -1);
 	
 	gtk_tree_store_set(treestore, iter, SORTID_NODELIST, nodes, -1);
+
+	gtk_tree_store_set(treestore, iter, 
+			   SORTID_NODE_INX, job_ptr->node_inx, -1);
+
 	gtk_tree_store_set(treestore, iter, SORTID_REQ_NODELIST,
 			   job_ptr->req_nodes, -1);
 	gtk_tree_store_set(treestore, iter, SORTID_EXC_NODELIST,
@@ -1697,6 +1712,9 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 	gtk_tree_store_set(treestore, iter,
 			   SORTID_REQ_PROCS, tmp_char, -1);
 
+	gtk_tree_store_set(treestore, iter,
+			   SORTID_RESV_NAME, job_ptr->resv_name, -1);
+
 	sprintf(tmp_char, "%u", job_ptr->min_sockets);
 	gtk_tree_store_set(treestore, iter,
 			   SORTID_MIN_SOCKETS, tmp_char, -1);
@@ -1746,9 +1764,13 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 
 	gtk_tree_store_set(treestore, iter,
 			   SORTID_FEATURES, job_ptr->features, -1);
+	if (job_ptr->state_desc)
+		reason = job_ptr->state_desc;
+	else
+		reason = job_reason_string(job_ptr->state_reason);
 	gtk_tree_store_set(treestore, iter,
-			   SORTID_REASON,
-			   job_reason_string(job_ptr->state_reason), -1);
+			   SORTID_REASON, reason, -1);
+
 	gtk_tree_store_set(treestore, iter,
 			   SORTID_NETWORK, job_ptr->network, -1);
 	gtk_tree_store_set(treestore, iter,
@@ -1765,8 +1787,6 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 		_update_info_step(sview_job_info_ptr, 
 				  GTK_TREE_MODEL(treestore), NULL, iter);
 		
-	xfree(jname);
-	xfree(wckey);
 	return;
 }
 
@@ -2125,6 +2145,26 @@ static void _job_info_list_del(void *object)
 	}
 }
 
+static int _sview_job_sort_aval_dec(sview_job_info_t* rec_a,
+				    sview_job_info_t* rec_b)
+{
+	int size_a = rec_a->node_cnt;
+	int size_b = rec_b->node_cnt;
+
+	if (size_a > size_b)
+		return -1;
+	else if (size_a < size_b)
+		return 1;
+
+	if(rec_a->nodes && rec_b->nodes) {
+		size_a = strcmp(rec_a->nodes, rec_b->nodes);
+		if (size_a > 0)
+			return -1;
+		else if (size_a < 0)
+			return 1;
+	}
+	return 0;
+}
 
 static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 				  job_step_info_response_msg_t *step_info_ptr,
@@ -2231,6 +2271,14 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 		}
 		list_append(info_list, sview_job_info_ptr);
 	}
+
+
+	list_sort(info_list,
+		  (ListCmpF)_sview_job_sort_aval_dec);
+
+	list_sort(odd_info_list,
+		  (ListCmpF)_sview_job_sort_aval_dec);
+
 update_color:
 	if(want_odd_states)
 		return odd_info_list;
@@ -2249,14 +2297,11 @@ void _display_info_job(List info_list, popup_info_t *popup_win)
 	GtkTreeView *treeview = NULL;
 	int update = 0;
 	int i = -1, j = 0;
-	int first_time = 0;
 
 	if(spec_info->search_info->int_data == NO_VAL) {
 	/* 	info = xstrdup("No pointer given!"); */
 		goto finished;
 	}
-	if(!list_count(popup_win->grid_button_list)) 
-		first_time = 1;
 
 need_refresh:
 	if(!spec_info->display_widget) {
@@ -2283,18 +2328,11 @@ need_refresh:
 	} else if(spec_info->search_info->int_data2 == NO_VAL) {
 		j=0;
 		while(sview_job_info->job_ptr->node_inx[j] >= 0) {
-			if(!first_time)
 				change_grid_color(
 					popup_win->grid_button_list,
 					sview_job_info->job_ptr->node_inx[j],
 					sview_job_info->job_ptr->node_inx[j+1],
-					i);
-			else
-				get_button_list_from_main(
-					&popup_win->grid_button_list,
-					sview_job_info->job_ptr->node_inx[j],
-					sview_job_info->job_ptr->node_inx[j+1],
-					i);
+					i, true);
 			j += 2;
 		}
 		_layout_job_record(treeview, sview_job_info, update);
@@ -2308,23 +2346,13 @@ need_refresh:
 			   spec_info->search_info->int_data2) {
 				j=0;
 				while(step_ptr->node_inx[j] >= 0) {
-					if(!first_time) 
-						change_grid_color(
-							popup_win->
-							grid_button_list,
-							step_ptr->node_inx[j],
-							step_ptr->
-							node_inx[j+1],
-							i);
-					else
-						get_button_list_from_main(
-							&popup_win->
-							grid_button_list,
-							step_ptr->node_inx[j],
-							step_ptr->
-							node_inx[j+1],
-							i);
-
+					change_grid_color(
+						popup_win->
+						grid_button_list,
+						step_ptr->node_inx[j],
+						step_ptr->
+						node_inx[j+1],
+						i, true);
 					j += 2;
 				}
 				_layout_step_record(treeview, 
@@ -2365,9 +2393,6 @@ need_refresh:
 			
 			goto need_refresh;
 		}
-		
-		put_buttons_in_table(popup_win->grid_table,
-				     popup_win->grid_button_list);
 	}
 	gtk_widget_show_all(spec_info->display_widget);
 
@@ -2396,7 +2421,8 @@ extern int get_new_info_job(job_info_msg_t **info_ptr,
 	static bool changed = 0;
 		
 	if(!force && ((now - last) < global_sleep_time)) {
-		error_code = SLURM_NO_CHANGE_IN_DATA;
+		if(*info_ptr != job_info_ptr) 
+			error_code = SLURM_SUCCESS;
 		*info_ptr = job_info_ptr;
 		if(changed) 
 			return SLURM_SUCCESS;
@@ -2421,6 +2447,10 @@ extern int get_new_info_job(job_info_msg_t **info_ptr,
 		changed = 1;
 	}
 	job_info_ptr = new_job_ptr;
+
+	if(*info_ptr != job_info_ptr) 
+		error_code = SLURM_SUCCESS;
+
 	*info_ptr = new_job_ptr;
 	return error_code;
 }
@@ -2434,10 +2464,14 @@ extern int get_new_info_job_step(job_step_info_response_msg_t **info_ptr,
 	int error_code = SLURM_NO_CHANGE_IN_DATA;
 	time_t now = time(NULL);
 	static time_t last;
+	static bool changed = 0;
 		
 	if(!force && ((now - last) < global_sleep_time)) {
-		error_code = SLURM_NO_CHANGE_IN_DATA;
+		if(*info_ptr != old_step_ptr) 
+			error_code = SLURM_SUCCESS;
 		*info_ptr = old_step_ptr;
+		if(changed) 
+			return SLURM_SUCCESS;
 		return error_code;
 	}
 	last = now;
@@ -2446,16 +2480,24 @@ extern int get_new_info_job_step(job_step_info_response_msg_t **info_ptr,
 		error_code = slurm_get_job_steps(old_step_ptr->last_update, 
 						 0, 0, &new_step_ptr, 
 						 show_flags);
-		if (error_code ==  SLURM_SUCCESS)
+		if (error_code == SLURM_SUCCESS) {
 			slurm_free_job_step_info_response_msg(old_step_ptr);
-		else if (slurm_get_errno () == SLURM_NO_CHANGE_IN_DATA) {
+			changed = 1;
+		} else if (slurm_get_errno () == SLURM_NO_CHANGE_IN_DATA) {
 			error_code = SLURM_NO_CHANGE_IN_DATA;
 			new_step_ptr = old_step_ptr;
+			changed = 0;
 		}
-	} else
+	} else {
 		error_code = slurm_get_job_steps((time_t) NULL, 0, 0, 
 						 &new_step_ptr, show_flags);
+		changed = 1;
+	}
 	old_step_ptr = new_step_ptr;
+
+	if(*info_ptr != old_step_ptr) 
+		error_code = SLURM_SUCCESS;
+
 	*info_ptr = new_step_ptr;
 	return error_code;
 }
@@ -2727,7 +2769,7 @@ display_it:
 				change_grid_color(grid_button_list,
 						  job_ptr->node_inx[j],
 						  job_ptr->node_inx[j+1],
-						  i);
+						  i, true);
 			j += 2;
 		}
 		i++;
@@ -2779,9 +2821,8 @@ extern void specific_info_job(popup_info_t *popup_win)
 	job_info_t *job_ptr = NULL;	
 	ListIterator itr = NULL;
 	char name[30], *uname = NULL;
-	char *host = NULL, *host2 = NULL;
-	hostlist_t hostlist = NULL;
-	int found = 0, name_diff;
+	hostset_t hostset = NULL;
+	int name_diff;
 	
 	if(!spec_info->display_widget)
 		setup_popup_info(popup_win, display_data_job, SORTID_CNT);
@@ -2863,15 +2904,7 @@ display_it:
 				 SORTID_CNT);
 	}
 
-	if(popup_win->grid_button_list) {
-		list_destroy(popup_win->grid_button_list);
-	}	       
-	
-#ifdef HAVE_3D
-	popup_win->grid_button_list = copy_main_button_list();
-#else
-	popup_win->grid_button_list = list_create(destroy_grid_button);
-#endif	
+	setup_popup_grid_list(popup_win);
 
 	spec_info->view = INFO_VIEW;
 	if(spec_info->type == INFO_PAGE) {
@@ -2944,6 +2977,11 @@ display_it:
 				  job_ptr->partition))
 				continue;
 			break;
+		case RESV_PAGE:
+			if(strcmp(search_info->gchar_data,
+				  job_ptr->resv_name))
+				continue;
+			break;
 		case BLOCK_PAGE:
 			select_g_sprint_jobinfo(
 				job_ptr->select_jobinfo, 
@@ -2957,25 +2995,13 @@ display_it:
 			if(!job_ptr->nodes)
 				continue;
 			
-			hostlist = hostlist_create(search_info->gchar_data);
-			host = hostlist_shift(hostlist);
-			hostlist_destroy(hostlist);
-			if(!host)
+			if(!(hostset = hostset_create(search_info->gchar_data)))
 				continue;
-			
-			hostlist = hostlist_create(job_ptr->nodes);
-			found = 0;
-			while((host2 = hostlist_shift(hostlist))) { 
-				if(!strcmp(host, host2)) {
-					free(host2);
-					found = 1;
-					break; 
-				}
-				free(host2);
-			}
-			hostlist_destroy(hostlist);
-			if(!found)
+			if(!hostset_intersects(hostset, job_ptr->nodes)) {
+				hostset_destroy(hostset);
 				continue;
+			}
+			hostset_destroy(hostset);				
 			break;
 		default:
 			continue;
@@ -2984,25 +3010,15 @@ display_it:
 		list_push(send_info_list, sview_job_info_ptr);
 		j=0;
 		while(job_ptr->node_inx[j] >= 0) {
-#ifdef HAVE_3D
 			change_grid_color(
 				popup_win->grid_button_list,
 				job_ptr->node_inx[j],
-				job_ptr->node_inx[j+1], i);
-#else
-			get_button_list_from_main(
-				&popup_win->grid_button_list,
-				job_ptr->node_inx[j],
-				job_ptr->node_inx[j+1], i);
-#endif
+				job_ptr->node_inx[j+1], i, false);
 			j += 2;
 		}
 	}
 	list_iterator_destroy(itr);
 
-	put_buttons_in_table(popup_win->grid_table,
-			     popup_win->grid_button_list);
-
 	_update_info_job(send_info_list,
 			 GTK_TREE_VIEW(spec_info->display_widget));
 			
@@ -3045,6 +3061,7 @@ extern void popup_all_job(GtkTreeModel *model, GtkTreeIter *iter, int id)
 
 	gtk_tree_model_get(model, iter, SORTID_JOBID, &jobid, -1);
 	gtk_tree_model_get(model, iter, SORTID_ALLOC, &stepid, -1);
+
 	if(stepid)
 		stepid = NO_VAL;
 	else {
@@ -3060,6 +3077,13 @@ extern void popup_all_job(GtkTreeModel *model, GtkTreeIter *iter, int id)
 			snprintf(title, 100, "Partition with job %d.%d",
 				 jobid, stepid);			
 		break;
+	case RESV_PAGE:
+		if(stepid == NO_VAL)
+			snprintf(title, 100, "Reservation with job %d", jobid);
+		else
+			snprintf(title, 100, "Reservation with job %d.%d",
+				 jobid, stepid);			
+		break;
 	case NODE_PAGE:
 		if(stepid == NO_VAL) {
 #ifdef HAVE_BG
@@ -3124,6 +3148,13 @@ extern void popup_all_job(GtkTreeModel *model, GtkTreeIter *iter, int id)
 		return;
 	}
 	
+	/* Pass the model and the structs from the iter so we can always get
+	   the current node_inx.
+	*/
+	popup_win->model = model;
+	popup_win->iter = *iter;
+	popup_win->node_inx_id = SORTID_NODE_INX;
+
 	switch(id) {
 	case NODE_PAGE:
 		gtk_tree_model_get(model, iter, SORTID_NODELIST, &name, -1);
@@ -3133,6 +3164,10 @@ extern void popup_all_job(GtkTreeModel *model, GtkTreeIter *iter, int id)
 		gtk_tree_model_get(model, iter, SORTID_PARTITION, &name, -1);
 		popup_win->spec_info->search_info->gchar_data = name;
 		break;
+	case RESV_PAGE:
+		gtk_tree_model_get(model, iter, SORTID_RESV_NAME, &name, -1);
+		popup_win->spec_info->search_info->gchar_data = name;
+		break;
 #ifdef HAVE_BG
 	case BLOCK_PAGE: 
 		gtk_tree_model_get(model, iter, SORTID_BLOCK, &name, -1);
diff --git a/src/sview/node_info.c b/src/sview/node_info.c
index 9e05eab988b7d77abc8b3a232834be79cd1913d8..c164836d3ff7c9a66930f91686394677cbb5a840 100644
--- a/src/sview/node_info.c
+++ b/src/sview/node_info.c
@@ -6,10 +6,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -106,6 +107,7 @@ static display_data_t options_data_node[] = {
 	{G_TYPE_STRING, BLOCK_PAGE, "Blocks", TRUE, NODE_PAGE},
 #endif
 	{G_TYPE_STRING, PART_PAGE, "Partition", TRUE, NODE_PAGE},
+	{G_TYPE_STRING, RESV_PAGE, "Reservation", TRUE, NODE_PAGE},
 	{G_TYPE_STRING, SUBMIT_PAGE, "Job Submit", FALSE, NODE_PAGE},
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
@@ -467,6 +469,8 @@ extern int get_new_info_node(node_info_msg_t **info_ptr, int force)
 	static bool changed = 0;
 
 	if(!force && ((now - last) < global_sleep_time)) {
+		if(*info_ptr != node_info_ptr)
+			error_code = SLURM_SUCCESS;
 		*info_ptr = node_info_ptr;
 		if(changed) 
 			return SLURM_SUCCESS;
@@ -493,6 +497,10 @@ extern int get_new_info_node(node_info_msg_t **info_ptr, int force)
 		changed = 1;
 	}
 	node_info_ptr = new_node_ptr;
+
+	if(*info_ptr != node_info_ptr) 
+		error_code = SLURM_SUCCESS;
+	
 	*info_ptr = new_node_ptr;
 	return error_code;
 }
@@ -828,7 +836,7 @@ display_it:
 	/* set up the grid */
 	itr = list_iterator_create(info_list);
 	while ((sview_node_info_ptr = list_next(itr))) {
-		change_grid_color(grid_button_list, i, i, i);
+		change_grid_color(grid_button_list, i, i, i, true);
 		i++;
 	}
 	list_iterator_destroy(itr);
@@ -907,7 +915,7 @@ extern void specific_info_node(popup_info_t *popup_win)
 					  label,
 					  0, 1, 0, 1); 
 		gtk_widget_show(label);	
-		spec_info->display_widget = gtk_widget_ref(GTK_WIDGET(label));
+		spec_info->display_widget = gtk_widget_ref(label);
 		return;
 	}
 display_it:	
@@ -946,14 +954,7 @@ display_it:
 		goto end_it;
 	}
 
-	if(popup_win->grid_button_list) {
-		list_destroy(popup_win->grid_button_list);
-	}	       
-#ifdef HAVE_3D
-	popup_win->grid_button_list = copy_main_button_list();
-#else
-	popup_win->grid_button_list = list_create(destroy_grid_button);
-#endif	
+	setup_popup_grid_list(popup_win);
 	
 	/* just linking to another list, don't free the inside, just
 	   the list */
@@ -1023,13 +1024,8 @@ display_it:
 			continue;
 		
 		list_push(send_info_list, sview_node_info_ptr);
-#ifdef HAVE_3D
 		change_grid_color(popup_win->grid_button_list,
-				  i, i, 0);
-#else
-		get_button_list_from_main(&popup_win->grid_button_list,
-					  i, i, 0);		
-#endif
+				  i, i, 0, true);
 	}
 	list_iterator_destroy(itr);
 
@@ -1038,10 +1034,6 @@ display_it:
 		hostlist_destroy(hostlist);
 	}
 
-
-	put_buttons_in_table(popup_win->grid_table,
-			     popup_win->grid_button_list);
-
 	_update_info_node(send_info_list, 
 			  GTK_TREE_VIEW(spec_info->display_widget));
 	list_destroy(send_info_list);
@@ -1094,6 +1086,9 @@ extern void popup_all_node(GtkTreeModel *model, GtkTreeIter *iter, int id)
 	case PART_PAGE:
 		snprintf(title, 100, "Partition(s) with %s %s", node, name);
 		break;
+	case RESV_PAGE:
+		snprintf(title, 100, "Reservation(s) with %s %s", node, name);
+		break;
 	case BLOCK_PAGE: 
 		snprintf(title, 100, "Blocks(s) with %s %s", node, name);
 		break;
diff --git a/src/sview/part_info.c b/src/sview/part_info.c
index ca4821005fc2d5cda009bd5a6ae2706ff0212b6c..9c3d4328b7811f6b5fb5bf8de3f17dd36fa8142b 100644
--- a/src/sview/part_info.c
+++ b/src/sview/part_info.c
@@ -6,10 +6,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *   
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -86,6 +87,7 @@ enum {
 #ifndef HAVE_BG
 	SORTID_NODELIST, 
 #endif
+	SORTID_NODE_INX,
 	SORTID_NODES, 
 	SORTID_ONLY_LINE, 
 	SORTID_PRIORITY,
@@ -154,6 +156,8 @@ static display_data_t display_data_part[] = {
 	 create_model_part, admin_edit_part},
 	{G_TYPE_INT, SORTID_ONLY_LINE, NULL, FALSE, EDIT_NONE, refresh_part,
 	 create_model_part, admin_edit_part},
+	{G_TYPE_POINTER, SORTID_NODE_INX,  NULL, FALSE, EDIT_NONE, 
+	 refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_INT, SORTID_UPDATED, NULL, FALSE, EDIT_NONE, refresh_part,
 	 create_model_part, admin_edit_part},
 
@@ -190,23 +194,10 @@ static display_data_t options_data_part[] = {
 	{G_TYPE_STRING, NODE_PAGE, "Nodes", TRUE, PART_PAGE},
 #endif
 	{G_TYPE_STRING, SUBMIT_PAGE, "Job Submit", FALSE, PART_PAGE},
+	{G_TYPE_STRING, RESV_PAGE, "Reservations", TRUE, PART_PAGE},
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
 
-#ifdef HAVE_BG
-static void _update_nodes_for_bg(int node_scaling,
-				 node_info_msg_t *node_msg,
-				 bg_info_record_t *bg_info_record);
-/* ERROR_STATE must be last since that will affect the state of the rest of the
-   midplane.
-*/
-enum {
-	SVIEW_BG_IDLE_STATE,
-	SVIEW_BG_ALLOC_STATE,
-	SVIEW_BG_ERROR_STATE
-};
-#endif
-
 static display_data_t *local_display_data = NULL;
 
 static char *got_edit_signal = NULL;
@@ -220,45 +211,6 @@ static void _append_part_sub_record(sview_part_sub_t *sview_part_sub,
 				    int line);
 static node_info_t *_find_node(char *node_name, node_info_msg_t *node_msg);
 
-#ifdef HAVE_BG
-
-static void _update_nodes_for_bg(int node_scaling,
-				 node_info_msg_t *node_msg,
-				 bg_info_record_t *bg_info_record)
-{
-	node_info_t *node_ptr = NULL;
-	hostlist_t hl;
-	char *node_name = NULL;
-
-	/* we are using less than one node */
-	if(bg_info_record->conn_type == SELECT_SMALL) 
-		node_scaling = bg_info_record->node_cnt;
-       		   
-	hl = hostlist_create(bg_info_record->nodes);
-	while (1) {
-		if (node_name)
-			free(node_name);
-		node_name = hostlist_shift(hl);
-		if (!node_name)
-			break;
-		node_ptr = _find_node(node_name, node_msg);
-		if (!node_ptr)
-			continue;
-		/* cores is overloaded to be the cnodes in an error
-		 * state and used_cpus is overloaded to be the nodes in
-		 * use.  No block should be sent in here if it isn't
-		 * in use (that doesn't mean in a free state, it means
-		 * the user isn't slurm or the block is in an error state.  
-		 */
-		if(bg_info_record->state == RM_PARTITION_ERROR) 
-			node_ptr->cores += node_scaling;
-		else
-			node_ptr->used_cpus += node_scaling;
-	}
-	hostlist_destroy(hl);
-	
-}
-#endif
 
 static int 
 _build_min_max_16_string(char *buffer, int buf_size, 
@@ -1050,6 +1002,9 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 
 	gtk_tree_store_set(treestore, iter, SORTID_NODELIST, 
 			   part_ptr->nodes, -1);
+
+	gtk_tree_store_set(treestore, iter, 
+			   SORTID_NODE_INX, part_ptr->node_inx, -1);
 	
 	gtk_tree_store_set(treestore, iter, SORTID_ONLY_LINE, 0, -1);
 	/* clear out info for the main listing */
@@ -1416,6 +1371,27 @@ static sview_part_info_t *_create_sview_part_info(partition_info_t* part_ptr)
 	return sview_part_info;
 }
 
+static int _sview_part_sort_aval_dec(sview_part_info_t* rec_a,
+				     sview_part_info_t* rec_b)
+{
+	int size_a = rec_a->part_ptr->total_nodes;
+	int size_b = rec_b->part_ptr->total_nodes;
+
+	if (size_a > size_b)
+		return -1;
+	else if (size_a < size_b)
+		return 1;
+
+	if(rec_a->part_ptr->nodes && rec_b->part_ptr->nodes) {
+		size_a = strcmp(rec_a->part_ptr->nodes, rec_b->part_ptr->nodes);
+		if (size_a > 0)
+			return -1;
+		else if (size_a < 0)
+			return 1;
+	}
+	return 0;
+}
+
 static List _create_part_info_list(partition_info_msg_t *part_info_ptr,
 				   node_info_msg_t *node_info_ptr,
 				   node_select_info_msg_t *node_select_ptr,
@@ -1432,9 +1408,8 @@ static List _create_part_info_list(partition_info_msg_t *part_info_ptr,
 	hostlist_t hl;
 #ifdef HAVE_BG
 	int j;
-	bg_info_record_t *bg_info_record = NULL;
 	int node_scaling = part_info_ptr->partition_array[0].node_scaling;
-	char *slurm_user = NULL;
+	int block_error = 0;
 #endif
 	if(!changed && info_list) {
 		return info_list;
@@ -1449,39 +1424,6 @@ static List _create_part_info_list(partition_info_msg_t *part_info_ptr,
 		return NULL;
 	}
 
-#ifdef HAVE_BG
-	slurm_user = xstrdup(slurmctld_conf.slurm_user_name);
-
-	for (i=0; i<node_info_ptr->record_count; i++) {
-		node_ptr = &(node_info_ptr->node_array[i]);
-		/* in each node_ptr we overload the threads var
-		 * with the number of cnodes in the used_cpus var
-		 * will be used to tell how many cnodes are
-		 * allocated and the cores will represent the cnodes
-		 * in an error state. So we can get an idle count by
-		 * subtracting those 2 numbers from the total possible
-		 * cnodes (which are the idle cnodes).
-		 */
-		node_ptr->threads = node_scaling;
-		node_ptr->cores = 0;
-		node_ptr->used_cpus = 0;
-	}
-
-	for (i=0; i<node_select_ptr->record_count; i++) {
-		bg_info_record = &(node_select_ptr->bg_info_array[i]);
-		
-		/* this block is idle we won't mark it */
-		if (bg_info_record->state != RM_PARTITION_ERROR
-		    && !strcmp(slurm_user, bg_info_record->owner_name))
-			continue;
-		_update_nodes_for_bg(node_scaling, node_info_ptr,
-				     bg_info_record);
-	}
-	xfree(slurm_user);
-
-#endif
-
-
 	for (i=0; i<part_info_ptr->record_count; i++) {
 		part_ptr = &(part_info_ptr->partition_array[i]);
 		if (!part_ptr->nodes || (part_ptr->nodes[0] == '\0'))
@@ -1493,6 +1435,14 @@ static List _create_part_info_list(partition_info_msg_t *part_info_ptr,
 			node_ptr = _find_node(node_name, node_info_ptr);
 			free(node_name);
 #ifdef HAVE_BG
+			if((node_ptr->node_state & NODE_STATE_DRAIN) 
+			   && (node_ptr->node_state & NODE_STATE_FAIL)) {
+				node_ptr->node_state &= ~NODE_STATE_DRAIN;
+				node_ptr->node_state &= ~NODE_STATE_FAIL;
+				block_error = 1;
+			} else
+				block_error = 0;
+			node_ptr->threads = node_scaling;
 			for(j=0; j<3; j++) {
 				int norm = 0;
 				switch(j) {
@@ -1545,8 +1495,11 @@ static List _create_part_info_list(partition_info_msg_t *part_info_ptr,
 						continue;
 					node_ptr->node_state &=
 						NODE_STATE_FLAGS;
-					node_ptr->node_state |= 
+					node_ptr->node_state |=
 						NODE_STATE_DRAIN;
+					if(block_error)
+						node_ptr->node_state
+							|= NODE_STATE_FAIL;
 					node_ptr->threads = node_ptr->cores;
 					break;
 				default:
@@ -1592,6 +1545,10 @@ static List _create_part_info_list(partition_info_msg_t *part_info_ptr,
 		hostlist_destroy(hl);
 		list_append(info_list, sview_part_info);
 	}
+
+	list_sort(info_list,
+		  (ListCmpF)_sview_part_sort_aval_dec);
+
 	return info_list;
 }
 
@@ -1633,17 +1590,11 @@ need_refresh:
 		if(!strcmp(part_ptr->name, name)) {
 			j=0;
 			while(part_ptr->node_inx[j] >= 0) {
-				if(!first_time)
-					change_grid_color(
-						popup_win->grid_button_list,
-						part_ptr->node_inx[j],
-						part_ptr->node_inx[j+1], i);
-				else
-					get_button_list_from_main(
-						&popup_win->grid_button_list,
-						part_ptr->node_inx[j],
-						part_ptr->node_inx[j+1],
-						i);
+				change_grid_color(
+					popup_win->grid_button_list,
+					part_ptr->node_inx[j],
+					part_ptr->node_inx[j+1], i,
+					true);
 				j += 2;
 			}
 			_layout_part_record(treeview, sview_part_info, update);
@@ -1674,9 +1625,6 @@ need_refresh:
 			
 			goto need_refresh;
 		}
-		put_buttons_in_table(popup_win->grid_table,
-				     popup_win->grid_button_list);
-
 	}
 	gtk_widget_show(spec_info->display_widget);
 		
@@ -1705,6 +1653,8 @@ extern int get_new_info_part(partition_info_msg_t **part_ptr, int force)
 	static bool changed = 0;
 		
 	if(!force && ((now - last) < global_sleep_time)) {
+		if(*part_ptr != part_info_ptr)
+			error_code = SLURM_SUCCESS;
 		*part_ptr = part_info_ptr;
 		if(changed) 
 			return SLURM_SUCCESS;
@@ -1729,6 +1679,10 @@ extern int get_new_info_part(partition_info_msg_t **part_ptr, int force)
 	}
 	
 	part_info_ptr = new_part_ptr;
+
+	if(*part_ptr != part_info_ptr) 
+		error_code = SLURM_SUCCESS;
+
 	*part_ptr = new_part_ptr;
 	return error_code;
 }
@@ -2045,7 +1999,7 @@ display_it:
 				change_grid_color(grid_button_list,
 						  part_ptr->node_inx[j],
 						  part_ptr->node_inx[j+1],
-						  i);
+						  i, true);
 			j += 2;
 		}
 		i++;
@@ -2096,9 +2050,7 @@ extern void specific_info_part(popup_info_t *popup_win)
 	sview_part_info_t *sview_part_info_ptr = NULL;
 	partition_info_t *part_ptr = NULL;
 	ListIterator itr = NULL;
-	char *host = NULL, *host2 = NULL;
-	hostlist_t hostlist = NULL;
-	int found = 0;
+	hostset_t hostset = NULL;
 	
 	if(!spec_info->display_widget)
 		setup_popup_info(popup_win, display_data_part, SORTID_CNT);
@@ -2203,15 +2155,7 @@ display_it:
 				 SORTID_CNT);
 	}
 	
-	if(popup_win->grid_button_list) {
-		list_destroy(popup_win->grid_button_list);
-	}	       
-	
-#ifdef HAVE_3D
-	popup_win->grid_button_list = copy_main_button_list();
-#else
-	popup_win->grid_button_list = list_create(destroy_grid_button);
-#endif	
+	setup_popup_grid_list(popup_win);
 
 	spec_info->view = INFO_VIEW;
 	if(spec_info->type == INFO_PAGE) {
@@ -2224,34 +2168,24 @@ display_it:
 	send_info_list = list_create(NULL);	
 	
 	itr = list_iterator_create(info_list);
+	i = -1;
 	while ((sview_part_info_ptr = list_next(itr))) {
 		i++;
-		part_ptr = sview_part_info_ptr->part_ptr;	
+		part_ptr = sview_part_info_ptr->part_ptr;
 		switch(spec_info->type) {
+		case RESV_PAGE:
 		case NODE_PAGE:
 			if(!part_ptr->nodes)
 				continue;
 
-			hostlist = hostlist_create(
-				spec_info->search_info->gchar_data);
-			host = hostlist_shift(hostlist);
-			hostlist_destroy(hostlist);
-			if(!host) 
+			if(!(hostset = hostset_create(
+				     spec_info->search_info->gchar_data)))
 				continue;
-			
-			hostlist = hostlist_create(part_ptr->nodes);
-			found = 0;
-			while((host2 = hostlist_shift(hostlist))) { 
-				if(!strcmp(host, host2)) {
-					free(host2);
-					found = 1;
-					break; 
-				}
-				free(host2);
-			}
-			hostlist_destroy(hostlist);
-			if(!found)
+			if(!hostset_intersects(hostset, part_ptr->nodes)) {
+				hostset_destroy(hostset);
 				continue;
+			}
+			hostset_destroy(hostset);				
 			break;
 		case PART_PAGE:
 		case BLOCK_PAGE:
@@ -2268,23 +2202,14 @@ display_it:
 		list_push(send_info_list, sview_part_info_ptr);
 		j=0;
 		while(part_ptr->node_inx[j] >= 0) {
-#ifdef HAVE_3D
 			change_grid_color(
 				popup_win->grid_button_list,
 				part_ptr->node_inx[j],
-				part_ptr->node_inx[j+1], i);
-#else
-			get_button_list_from_main(
-				&popup_win->grid_button_list,
-				part_ptr->node_inx[j],
-				part_ptr->node_inx[j+1], i);
-#endif
+				part_ptr->node_inx[j+1], i, false);
 			j += 2;
 		}
 	}
 	list_iterator_destroy(itr);
-	put_buttons_in_table(popup_win->grid_table,
-			     popup_win->grid_button_list);
 	 
 	_update_info_part(send_info_list, 
 			  GTK_TREE_VIEW(spec_info->display_widget));
@@ -2333,6 +2258,9 @@ extern void popup_all_part(GtkTreeModel *model, GtkTreeIter *iter, int id)
 	case JOB_PAGE:
 		snprintf(title, 100, "Job(s) in partition %s", name);
 		break;
+	case RESV_PAGE:
+		snprintf(title, 100, "Reservation(s) in partition %s", name);
+		break;
 	case NODE_PAGE:
 		gtk_tree_model_get(model, iter, SORTID_ONLY_LINE,
 				   &only_line, -1);
@@ -2394,6 +2322,13 @@ extern void popup_all_part(GtkTreeModel *model, GtkTreeIter *iter, int id)
 		return;
 	}
 
+	/* Pass the model and the structs from the iter so we can always get
+	   the current node_inx.
+	*/
+	popup_win->model = model;
+	popup_win->iter = *iter;
+	popup_win->node_inx_id = SORTID_NODE_INX;
+
 	switch(id) {
 	case JOB_PAGE:
 	case BLOCK_PAGE: 
@@ -2401,6 +2336,7 @@ extern void popup_all_part(GtkTreeModel *model, GtkTreeIter *iter, int id)
 		popup_win->spec_info->search_info->gchar_data = name;
 		//specific_info_job(popup_win);
 		break;
+	case RESV_PAGE:
 	case NODE_PAGE:
 		g_free(name);
 		gtk_tree_model_get(model, iter, SORTID_NODELIST, &name, -1);
diff --git a/src/sview/popups.c b/src/sview/popups.c
index 1bd3af40446ac30c94e3cb467dc5b107f5983028..4c83ca03b4fe8aabe14801ec7bb43655007c85c1 100644
--- a/src/sview/popups.c
+++ b/src/sview/popups.c
@@ -2,14 +2,15 @@
  *  popups.c - put different popup displays here
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Portions Copyright (C) 2008 Vijay Ramasubramanian
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -151,6 +152,11 @@ void _search_entry(sview_search_info_t *sview_search_info)
 		xfree(lower);
 		
 		break;
+	case SEARCH_RESERVATION_NAME:
+		id = RESV_PAGE;
+		snprintf(title, 100, "Reservation %s info",
+			 sview_search_info->gchar_data);
+		break;
 	default:
 		g_print("unknown search type %d.\n",
 			sview_search_info->search_type);
@@ -258,6 +264,11 @@ static void _layout_ctl_conf(GtkTreeStore *treestore,
 	add_display_treestore_line(update, treestore, &iter, 
 				   "CheckpointType",
 				   slurm_ctl_conf_ptr->checkpoint_type);
+	snprintf(temp_str, sizeof(temp_str), "%u", 
+		 slurm_ctl_conf_ptr->complete_wait);
+	add_display_treestore_line(update, treestore, &iter, 
+				   "CompleteWait", 
+				   temp_str);
 	add_display_treestore_line(update, treestore, &iter, 
 				   "ControlAddr", 
 				   slurm_ctl_conf_ptr->control_addr);
@@ -267,11 +278,17 @@ static void _layout_ctl_conf(GtkTreeStore *treestore,
 	add_display_treestore_line(update, treestore, &iter, 
 				   "CryptoType", 
 				   slurm_ctl_conf_ptr->crypto_type);
-	snprintf(temp_str, sizeof(temp_str), "%u", 
-		 slurm_ctl_conf_ptr->def_mem_per_task);
-	add_display_treestore_line(update, treestore, &iter, 
-				   "DefMemPerTask", 
-				   temp_str);
+	if (slurm_ctl_conf_ptr->def_mem_per_task & MEM_PER_CPU) {
+		snprintf(temp_str, sizeof(temp_str), "%u", 
+			 slurm_ctl_conf_ptr->def_mem_per_task & (~MEM_PER_CPU));
+		add_display_treestore_line(update, treestore, &iter, 
+					   "DefMemPerCPU", temp_str);
+	} else {
+		snprintf(temp_str, sizeof(temp_str), "%u", 
+			 slurm_ctl_conf_ptr->def_mem_per_task);
+		add_display_treestore_line(update, treestore, &iter, 
+					   "DefMemPerNode", temp_str);
+	}
 	add_display_treestore_line(update, treestore, &iter, 
 				   "Epilog", 
 				   slurm_ctl_conf_ptr->epilog);
@@ -324,7 +341,8 @@ static void _layout_ctl_conf(GtkTreeStore *treestore,
 
 	add_display_treestore_line(update, treestore, &iter, 
 				   "JobCredentialPrivateKey", 
-				   slurm_ctl_conf_ptr->job_credential_private_key);
+				   slurm_ctl_conf_ptr->
+				   job_credential_private_key);
 	add_display_treestore_line(update, treestore, &iter, 
 				   "JobCredentialPublicCertificate", 
 				   slurm_ctl_conf_ptr->
@@ -346,11 +364,17 @@ static void _layout_ctl_conf(GtkTreeStore *treestore,
 	add_display_treestore_line(update, treestore, &iter, 
 				   "MaxJobCount", 
 				   temp_str);
-	snprintf(temp_str, sizeof(temp_str), "%u", 
-		 slurm_ctl_conf_ptr->max_mem_per_task);
-	add_display_treestore_line(update, treestore, &iter, 
-				   "MaxMemPerTask", 
-				   temp_str);
+	if (slurm_ctl_conf_ptr->max_mem_per_task & MEM_PER_CPU) {
+		snprintf(temp_str, sizeof(temp_str), "%u", 
+			 slurm_ctl_conf_ptr->max_mem_per_task & (~MEM_PER_CPU));
+		add_display_treestore_line(update, treestore, &iter, 
+					   "MaxMemPerCPU", temp_str);
+	} else {
+		snprintf(temp_str, sizeof(temp_str), "%u", 
+			 slurm_ctl_conf_ptr->max_mem_per_task);
+		add_display_treestore_line(update, treestore, &iter, 
+					   "MaxMemPerNode", temp_str);
+	}
 	snprintf(temp_str, sizeof(temp_str), "%u", 
 		 slurm_ctl_conf_ptr->msg_timeout);
 	add_display_treestore_line(update, treestore, &iter, 
@@ -364,6 +388,9 @@ static void _layout_ctl_conf(GtkTreeStore *treestore,
 	add_display_treestore_line(update, treestore, &iter, 
 				   "MpiDefault",
 				   slurm_ctl_conf_ptr->mpi_default);
+	add_display_treestore_line(update, treestore, &iter, 
+				   "MpiParams",
+				   slurm_ctl_conf_ptr->mpi_params);
 #ifdef MULTIPLE_SLURMD
 	add_display_treestore_line(update, treestore, &iter, 
 				   "MULTIPLE_SLURMD", "1");
@@ -777,7 +804,11 @@ extern void create_search_popup(GtkAction *action, gpointer user_data)
 		label = gtk_label_new("Which state?");
 	}
 #endif
-	else {
+	else if(!strcmp(name, "reservation_name")) {
+		sview_search_info.search_type = SEARCH_RESERVATION_NAME;
+		entry = create_entry();
+		label = gtk_label_new("Which reservation");
+	} else {
 		sview_search_info.search_type = 0;
 		goto end_it;
 	}
@@ -819,6 +850,7 @@ extern void create_search_popup(GtkAction *action, gpointer user_data)
 		case SEARCH_BLOCK_SIZE:
 		case SEARCH_PARTITION_NAME:
 		case SEARCH_NODE_NAME:
+		case SEARCH_RESERVATION_NAME:
 			sview_search_info.gchar_data =
 				g_strdup(gtk_entry_get_text(GTK_ENTRY(entry)));
 			break;
diff --git a/src/sview/resv_info.c b/src/sview/resv_info.c
new file mode 100644
index 0000000000000000000000000000000000000000..068cac31abc2066bb6c1b0e74d458877f7b2f932
--- /dev/null
+++ b/src/sview/resv_info.c
@@ -0,0 +1,1419 @@
+/*****************************************************************************\
+ *  resv_info.c - Functions related to advanced reservation display 
+ *  mode of sview.
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved. 
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+\*****************************************************************************/
+
+#include "src/common/uid.h"
+#include "src/sview/sview.h"
+#include "src/common/parse_time.h"
+ 
+#define _DEBUG 0
+
+/* Collection of data for printing reports. Like data is combined here */
+typedef struct {
+	reserve_info_t *resv_ptr;
+	char *color;
+} sview_resv_info_t;
+
+enum { 
+	EDIT_REMOVE = 1,
+	EDIT_EDIT
+};
+
+/* These need to be in alpha order (except POS and CNT) */
+enum { 
+	SORTID_POS = POS_LOC,
+	SORTID_ACCOUNTS,
+	SORTID_ACTION,
+	SORTID_DURATION,
+	SORTID_END_TIME,
+	SORTID_FEATURES,
+	SORTID_FLAGS,
+	SORTID_NAME,
+	SORTID_NODE_CNT,
+	SORTID_NODE_LIST,
+	SORTID_NODE_INX,
+	SORTID_PARTITION,
+	SORTID_START_TIME,
+	SORTID_UPDATED,
+	SORTID_USERS,
+	SORTID_CNT
+};
+
+/* extra field here is for choosing the type of edit you that will
+ * take place.  If you choose EDIT_MODEL (means only display a set of
+ * known options) create it in function create_model_*.  
+ */
+
+static display_data_t display_data_resv[] = {
+	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_NAME,       "Name", TRUE, EDIT_NONE, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_ACTION,     "Action", FALSE, EDIT_MODEL,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_NODE_CNT,   "Nodes", TRUE, EDIT_TEXTBOX, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_NODE_LIST,  "NodeList", TRUE, EDIT_TEXTBOX, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_START_TIME, "StartTime", TRUE, EDIT_TEXTBOX, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_END_TIME,   "EndTime", TRUE, EDIT_TEXTBOX, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_DURATION, "Duration", FALSE, EDIT_TEXTBOX, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_ACCOUNTS,   "Accounts", FALSE, EDIT_TEXTBOX, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_USERS,      "Users", FALSE, EDIT_TEXTBOX, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_PARTITION,  "Partition", FALSE, EDIT_TEXTBOX, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_FEATURES,   "Features", FALSE, EDIT_TEXTBOX, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_FLAGS,      "Flags", FALSE, EDIT_NONE, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_POINTER, SORTID_NODE_INX,  NULL, FALSE, EDIT_NONE, 
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_INT,    SORTID_UPDATED,    NULL, FALSE, EDIT_NONE,
+	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
+};
+
+static display_data_t options_data_resv[] = {
+	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE},
+	{G_TYPE_STRING, INFO_PAGE, "Full Info", TRUE, RESV_PAGE},
+	{G_TYPE_STRING, RESV_PAGE, "Remove", TRUE, ADMIN_PAGE},
+	{G_TYPE_STRING, RESV_PAGE, "Edit Reservation", TRUE, ADMIN_PAGE},
+	{G_TYPE_STRING, JOB_PAGE, "Jobs", TRUE, RESV_PAGE},
+	{G_TYPE_STRING, PART_PAGE, "Partitions", TRUE, RESV_PAGE},
+#ifdef HAVE_BG
+	{G_TYPE_STRING, BLOCK_PAGE, "Blocks", TRUE, RESV_PAGE},
+	{G_TYPE_STRING, NODE_PAGE, "Base Partitions", TRUE, RESV_PAGE},
+#else
+	{G_TYPE_STRING, NODE_PAGE, "Nodes", TRUE, RESV_PAGE},
+#endif
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
+};
+
+
+static display_data_t *local_display_data = NULL;
+
+static char *got_edit_signal = NULL;
+
+/*
+ *  _parse_flags  is used to parse the Flags= option.  It handles
+ *  daily, weekly, and maint, optionally preceded by + or -, 
+ *  separated by a comma but no spaces.
+ */
+static uint32_t _parse_flags(const char *flagstr)
+{
+	int flip;
+	uint32_t outflags = 0;
+	const char *curr = flagstr;
+	int taglen = 0;
+
+	while (*curr != '\0') {
+		flip = 0;
+		if (*curr == '+') {
+			curr++;
+		} else if (*curr == '-') {
+			flip = 1;
+			curr++;
+		}
+		taglen = 0;
+		while (curr[taglen] != ',' && curr[taglen] != '\0')
+			taglen++;
+
+		if (strncasecmp(curr, "Maintenance", MAX(taglen,1)) == 0) {
+			curr += taglen;
+			if (flip)
+				outflags |= RESERVE_FLAG_NO_MAINT;
+			else 
+				outflags |= RESERVE_FLAG_MAINT;
+		} else if (strncasecmp(curr, "Daily", MAX(taglen,1)) == 0) {
+			curr += taglen;
+			if (flip)
+				outflags |= RESERVE_FLAG_NO_DAILY;
+			else 
+				outflags |= RESERVE_FLAG_DAILY;
+		} else if (strncasecmp(curr, "Weekly", MAX(taglen,1)) == 0) {
+			curr += taglen;
+			if (flip)
+				outflags |= RESERVE_FLAG_NO_WEEKLY;
+			else 
+				outflags |= RESERVE_FLAG_WEEKLY;
+		} else {
+			char *temp = g_strdup_printf("Error parsing flags %s.",
+						     flagstr);
+			display_edit_note(temp);
+			g_free(temp);
+			outflags = (uint32_t)NO_VAL;
+			break;
+		}
+
+		if (*curr == ',') 
+			curr++;
+	}
+	return outflags;
+}
+
+static void _set_active_combo_resv(GtkComboBox *combo, 
+				   GtkTreeModel *model, GtkTreeIter *iter,
+				   int type)
+{
+	char *temp_char = NULL;
+	int action = 0;
+
+	gtk_tree_model_get(model, iter, type, &temp_char, -1);
+	if(!temp_char)
+		goto end_it;
+	switch(type) {
+	case SORTID_ACTION:
+		if(!strcmp(temp_char, "none"))
+			action = 0;
+		else if(!strcmp(temp_char, "remove"))
+			action = 1;
+		else 
+			action = 0;
+				
+		break;
+	default:
+		break;
+	}
+	g_free(temp_char);
+end_it:
+	gtk_combo_box_set_active(combo, action);
+	
+}
+
+/* don't free this char */
+static const char *_set_resv_msg(resv_desc_msg_t *resv_msg,
+				 const char *new_text,
+				 int column)
+{
+	char *type = NULL;
+	int temp_int = 0;
+	uint32_t f;
+	
+	/* need to clear errno here (just in case) */
+	errno = 0;
+
+	if(!resv_msg)
+		return NULL;
+	
+	switch(column) {
+	case SORTID_ACCOUNTS:		
+		resv_msg->accounts = xstrdup(new_text);
+		type = "accounts";
+		break;
+	case SORTID_ACTION:
+		xfree(got_edit_signal);
+		if(!strcasecmp(new_text, "None"))
+			got_edit_signal = NULL;
+		else
+			got_edit_signal = xstrdup(new_text);
+		break;
+	case SORTID_DURATION:
+		temp_int = time_str2mins((char *)new_text);
+		if(temp_int <= 0)
+			goto return_error;
+		resv_msg->duration = temp_int;
+		type = "duration";
+		break;
+	case SORTID_END_TIME:
+		resv_msg->end_time = parse_time((char *)new_text, 0);
+		type = "end time";
+		break;
+	case SORTID_FEATURES:
+		resv_msg->features = xstrdup(new_text);
+		type = "features";
+		break;
+	case SORTID_FLAGS:
+		f = _parse_flags(new_text);
+		type = "flags";
+		if(f == (uint32_t)NO_VAL) 
+			goto return_error;
+		resv_msg->flags = f;
+		break;
+	case SORTID_NAME:
+		resv_msg->name = xstrdup(new_text);
+		type = "name";
+		break;
+	case SORTID_NODE_CNT:
+		temp_int = strtol(new_text, (char **)NULL, 10);
+		
+		type = "Node Count";
+		if(temp_int <= 0)
+			goto return_error;
+		resv_msg->node_cnt = temp_int;
+		break;
+	case SORTID_NODE_LIST:
+		resv_msg->node_list = xstrdup(new_text);
+		type = "node list";
+		break;
+	case SORTID_PARTITION:
+		resv_msg->partition = xstrdup(new_text);
+		type = "partition";
+		break;
+	case SORTID_START_TIME:
+		resv_msg->start_time = parse_time((char *)new_text, 0);
+		type = "start time";
+		break;
+	case SORTID_USERS:		
+		resv_msg->users = xstrdup(new_text);
+		type = "users";
+		break;
+	default:
+		type = "unknown";
+		break;
+	}
+
+	return type;
+
+return_error:
+	errno = 1;
+	return type;
+}
+
+static void _resv_info_list_del(void *object)
+{
+	sview_resv_info_t *sview_resv_info = (sview_resv_info_t *)object;
+
+	if (sview_resv_info) {
+		xfree(sview_resv_info);
+	}
+}
+
+static void _admin_edit_combo_box_resv(GtkComboBox *combo,
+				       resv_desc_msg_t *resv_msg)
+{
+	GtkTreeModel *model = NULL;
+	GtkTreeIter iter;
+	int column = 0;
+	char *name = NULL;
+	
+	if(!resv_msg)
+		return;
+
+	if(!gtk_combo_box_get_active_iter(combo, &iter)) {
+		g_print("nothing selected\n");
+		return;
+	}
+	model = gtk_combo_box_get_model(combo);
+	if(!model) {
+		g_print("nothing selected\n");
+		return;
+	}
+
+	gtk_tree_model_get(model, &iter, 0, &name, -1);
+	gtk_tree_model_get(model, &iter, 1, &column, -1);
+
+	_set_resv_msg(resv_msg, name, column);
+
+	g_free(name);
+}
+
+
+
+static gboolean _admin_focus_out_resv(GtkEntry *entry,
+				      GdkEventFocus *event, 
+				      resv_desc_msg_t *resv_msg)
+{
+	int type = gtk_entry_get_max_length(entry);
+	const char *name = gtk_entry_get_text(entry);
+	type -= DEFAULT_ENTRY_LENGTH;
+	_set_resv_msg(resv_msg, name, type);
+	
+	return false;
+}
+
+static GtkWidget *_admin_full_edit_resv(resv_desc_msg_t *resv_msg, 
+					GtkTreeModel *model, GtkTreeIter *iter)
+{
+	GtkScrolledWindow *window = create_scrolled_window();
+	GtkBin *bin = NULL;
+	GtkViewport *view = NULL;
+	GtkTable *table = NULL;
+	GtkWidget *label = NULL;
+	GtkWidget *entry = NULL;
+	GtkTreeModel *model2 = NULL; 
+	GtkCellRenderer *renderer = NULL;
+	int i = 0, row = 0;
+	char *temp_char = NULL;
+
+	gtk_scrolled_window_set_policy(window,
+				       GTK_POLICY_NEVER,
+				       GTK_POLICY_AUTOMATIC);
+	bin = GTK_BIN(&window->container);
+	view = GTK_VIEWPORT(bin->child);
+	bin = GTK_BIN(&view->bin);
+	table = GTK_TABLE(bin->child);
+	gtk_table_resize(table, SORTID_CNT, 2);
+	
+	gtk_table_set_homogeneous(table, FALSE);	
+
+	for(i = 0; i < SORTID_CNT; i++) {
+		if(display_data_resv[i].extra == EDIT_MODEL) {
+			/* edittable items that can only be known
+			   values */
+			model2 = GTK_TREE_MODEL(
+				create_model_resv(display_data_resv[i].id));
+			if(!model2) {
+				g_print("no model set up for %d(%s)\n",
+					display_data_resv[i].id,
+					display_data_resv[i].name);
+				continue;
+			}
+			entry = gtk_combo_box_new_with_model(model2);
+			g_object_unref(model2);
+			
+			_set_active_combo_resv(GTK_COMBO_BOX(entry), model,
+					      iter, display_data_resv[i].id);
+			
+			g_signal_connect(entry, "changed",
+					 G_CALLBACK(
+						 _admin_edit_combo_box_resv),
+					 resv_msg);
+			
+			renderer = gtk_cell_renderer_text_new();
+			gtk_cell_layout_pack_start(GTK_CELL_LAYOUT(entry),
+						   renderer, TRUE);
+			gtk_cell_layout_add_attribute(GTK_CELL_LAYOUT(entry),
+						      renderer, "text", 0);
+		} else if(display_data_resv[i].extra == EDIT_TEXTBOX) {
+			/* other edittable items that are unknown */
+			entry = create_entry();
+			gtk_tree_model_get(model, iter,
+					   display_data_resv[i].id,
+					   &temp_char, -1);
+			gtk_entry_set_max_length(GTK_ENTRY(entry), 
+						 (DEFAULT_ENTRY_LENGTH +
+						  display_data_resv[i].id));
+			
+			if(temp_char) {
+				gtk_entry_set_text(GTK_ENTRY(entry),
+						   temp_char);
+				g_free(temp_char);
+			}
+			g_signal_connect(entry, "focus-out-event",
+					 G_CALLBACK(_admin_focus_out_resv),
+					 resv_msg);
+		} else /* others can't be altered by the user */
+			continue;
+		label = gtk_label_new(display_data_resv[i].name);
+		gtk_table_attach(table, label, 0, 1, row, row+1,
+				 GTK_FILL | GTK_EXPAND, GTK_SHRINK, 
+				 0, 0);
+		gtk_table_attach(table, entry, 1, 2, row, row+1,
+				 GTK_FILL, GTK_SHRINK,
+				 0, 0);
+		row++;
+	}
+	gtk_table_resize(table, row, 2);
+	
+	return GTK_WIDGET(window);
+}
+
+static void _layout_resv_record(GtkTreeView *treeview, 
+				sview_resv_info_t *sview_resv_info,
+				int update)
+{
+	GtkTreeIter iter;
+	char time_buf[20];
+	reserve_info_t *resv_ptr = sview_resv_info->resv_ptr;
+	char *temp_char = NULL;
+
+	GtkTreeStore *treestore = 
+		GTK_TREE_STORE(gtk_tree_view_get_model(treeview));
+	
+
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_NAME),
+				   resv_ptr->name);
+
+	convert_num_unit((float)resv_ptr->node_cnt, 
+			 time_buf, sizeof(time_buf), UNIT_NONE);
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_NODE_CNT), 
+				   time_buf);
+
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_NODE_LIST),
+				   resv_ptr->node_list);
+
+	slurm_make_time_str((time_t *)&resv_ptr->start_time, time_buf,
+			    sizeof(time_buf));
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_START_TIME), 
+				   time_buf);
+	slurm_make_time_str((time_t *)&resv_ptr->end_time, time_buf,
+			    sizeof(time_buf));
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_END_TIME), 
+				   time_buf);
+
+	secs2time_str((uint32_t)difftime(resv_ptr->end_time,
+					 resv_ptr->start_time),
+		      time_buf, sizeof(time_buf));
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_DURATION), 
+				   time_buf);
+
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_ACCOUNTS),
+				   resv_ptr->accounts);
+
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_USERS),
+				   resv_ptr->users);
+
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_PARTITION),
+				   resv_ptr->partition);
+
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_FEATURES),
+				   resv_ptr->features);
+
+	temp_char = reservation_flags_string(resv_ptr->flags);
+	add_display_treestore_line(update, treestore, &iter, 
+				   find_col_name(display_data_resv,
+						 SORTID_FLAGS),
+				   temp_char);
+	xfree(temp_char);
+}
+
+static void _update_resv_record(sview_resv_info_t *sview_resv_info_ptr, 
+				GtkTreeStore *treestore,
+				GtkTreeIter *iter)
+{
+	char *tmp_ptr = NULL;
+	char tmp_char[50];
+	reserve_info_t *resv_ptr = sview_resv_info_ptr->resv_ptr;
+      
+	gtk_tree_store_set(treestore, iter, SORTID_UPDATED, 1, -1);
+
+	gtk_tree_store_set(treestore, iter,
+			   SORTID_ACCOUNTS, resv_ptr->accounts, -1);
+
+	secs2time_str((uint32_t)difftime(resv_ptr->end_time, 
+					  resv_ptr->start_time), 
+		      tmp_char, sizeof(tmp_char));
+	gtk_tree_store_set(treestore, iter, SORTID_DURATION, tmp_char, -1);
+
+	slurm_make_time_str((time_t *)&resv_ptr->end_time, tmp_char,
+			    sizeof(tmp_char));
+	gtk_tree_store_set(treestore, iter, SORTID_END_TIME, tmp_char, -1);
+
+	gtk_tree_store_set(treestore, iter, SORTID_FEATURES,
+			   resv_ptr->features, -1);
+
+	tmp_ptr = reservation_flags_string(resv_ptr->flags);
+	gtk_tree_store_set(treestore, iter, SORTID_FLAGS,
+			   tmp_ptr, -1);
+	xfree(tmp_ptr);
+
+	gtk_tree_store_set(treestore, iter, SORTID_NAME, resv_ptr->name, -1);
+
+	convert_num_unit((float)resv_ptr->node_cnt,
+			 tmp_char, sizeof(tmp_char), UNIT_NONE);
+	gtk_tree_store_set(treestore, iter, 
+			   SORTID_NODE_CNT, tmp_char, -1);
+
+	gtk_tree_store_set(treestore, iter, 
+			   SORTID_NODE_LIST, resv_ptr->node_list, -1);
+
+	gtk_tree_store_set(treestore, iter, 
+			   SORTID_NODE_INX, resv_ptr->node_inx, -1);
+
+	gtk_tree_store_set(treestore, iter, 
+			   SORTID_PARTITION, resv_ptr->partition, -1);
+
+	slurm_make_time_str((time_t *)&resv_ptr->start_time, tmp_char,
+			    sizeof(tmp_char));
+	gtk_tree_store_set(treestore, iter, SORTID_START_TIME, tmp_char, -1);
+
+	gtk_tree_store_set(treestore, iter,
+			   SORTID_USERS, resv_ptr->users, -1);
+		
+	return;
+}
+
+static void _append_resv_record(sview_resv_info_t *sview_resv_info_ptr, 
+				GtkTreeStore *treestore, GtkTreeIter *iter,
+				int line)
+{
+	gtk_tree_store_append(treestore, iter, NULL);
+	gtk_tree_store_set(treestore, iter, SORTID_POS, line, -1);
+	_update_resv_record(sview_resv_info_ptr, treestore, iter);	
+}
+
+static void _update_info_resv(List info_list, 
+			      GtkTreeView *tree_view)
+{
+	GtkTreePath *path = gtk_tree_path_new_first();
+	GtkTreeModel *model = gtk_tree_view_get_model(tree_view);
+	GtkTreeIter iter;
+	reserve_info_t *resv_ptr = NULL;
+	int line = 0;
+	char *host = NULL, *resv_name = NULL;
+	ListIterator itr = NULL;
+	sview_resv_info_t *sview_resv_info = NULL;
+
+	/* get the iter, or find out the list is empty goto add */
+	if (gtk_tree_model_get_iter(model, &iter, path)) {
+		/* make sure all the reserves are still here */
+		while(1) {
+			gtk_tree_store_set(GTK_TREE_STORE(model), &iter, 
+					   SORTID_UPDATED, 0, -1);	
+			if(!gtk_tree_model_iter_next(model, &iter)) {
+				break;
+			}
+		}
+	}
+
+	itr = list_iterator_create(info_list);
+	while ((sview_resv_info = (sview_resv_info_t*) list_next(itr))) {
+		resv_ptr = sview_resv_info->resv_ptr;
+		/* get the iter, or find out the list is empty goto add */
+		if (!gtk_tree_model_get_iter(model, &iter, path)) {
+			goto adding;
+		} 
+		while(1) {
+			/* search for the jobid and check to see if 
+			   it is in the list */
+			gtk_tree_model_get(model, &iter, SORTID_NAME, 
+					   &resv_name, -1);
+			if(!strcmp(resv_name, resv_ptr->name)) {
+				/* update with new info */
+				g_free(resv_name);
+				_update_resv_record(sview_resv_info, 
+						    GTK_TREE_STORE(model), 
+						    &iter);
+				goto found;
+			}
+			g_free(resv_name);
+				
+			/* see what line we were on to add the next one 
+			   to the list */
+			gtk_tree_model_get(model, &iter, SORTID_POS, 
+					   &line, -1);
+			if(!gtk_tree_model_iter_next(model, &iter)) {
+				line++;
+				break;
+			}
+		}
+	adding:
+		_append_resv_record(sview_resv_info, GTK_TREE_STORE(model), 
+				    &iter, line);
+	found:
+		;
+	}
+	list_iterator_destroy(itr);
+	if(host)
+		free(host);
+
+	gtk_tree_path_free(path);
+	/* remove all old reservations */
+	remove_old(model, SORTID_UPDATED);
+	return;
+}
+
+static int _sview_resv_sort_aval_dec(sview_resv_info_t* rec_a,
+				     sview_resv_info_t* rec_b)
+{
+	int size_a = rec_a->resv_ptr->node_cnt;
+	int size_b = rec_b->resv_ptr->node_cnt;
+
+	if (size_a > size_b)
+		return -1;
+	else if (size_a < size_b)
+		return 1;
+
+	if(rec_a->resv_ptr->node_list && rec_b->resv_ptr->node_list) {
+		size_a = strcmp(rec_a->resv_ptr->node_list, 
+				rec_b->resv_ptr->node_list);
+		if (size_a > 0)
+			return -1;
+		else if (size_a < 0)
+			return 1;
+	}
+	return 0;
+}
+
+static List _create_resv_info_list(reserve_info_msg_t *resv_info_ptr,
+				   int changed)
+{
+	static List info_list = NULL;
+	int i = 0;
+	sview_resv_info_t *sview_resv_info_ptr = NULL;
+	reserve_info_t *resv_ptr = NULL;
+
+	if(!changed && info_list) 
+		goto update_color;
+	
+	if(info_list) 
+		list_flush(info_list);
+	else
+		info_list = list_create(_resv_info_list_del);
+
+	if (!info_list) {
+		g_print("malloc error\n");
+		return NULL;
+	}
+	
+	for(i=0; i<resv_info_ptr->record_count; i++) {
+		resv_ptr = &(resv_info_ptr->reservation_array[i]);
+		sview_resv_info_ptr = xmalloc(sizeof(sview_resv_info_t));
+		sview_resv_info_ptr->resv_ptr = resv_ptr;
+		list_append(info_list, sview_resv_info_ptr);
+	}
+
+	list_sort(info_list,
+		  (ListCmpF)_sview_resv_sort_aval_dec);
+
+update_color:
+	return info_list;
+}
+
+void _display_info_resv(List info_list,	popup_info_t *popup_win)
+{
+	specific_info_t *spec_info = popup_win->spec_info;
+	char *name = (char *)spec_info->search_info->gchar_data;
+	int found = 0;
+	reserve_info_t *resv_ptr = NULL;
+	GtkTreeView *treeview = NULL;
+	ListIterator itr = NULL;
+	sview_resv_info_t *sview_resv_info = NULL;
+	int update = 0;
+	int i = -1, j = 0;
+
+	if(!spec_info->search_info->gchar_data) {
+		//info = xstrdup("No pointer given!");
+		goto finished;
+	}
+
+need_refresh:
+	if(!spec_info->display_widget) {
+		treeview = create_treeview_2cols_attach_to_table(
+			popup_win->table);
+		spec_info->display_widget = 
+			gtk_widget_ref(GTK_WIDGET(treeview));
+	} else {
+		treeview = GTK_TREE_VIEW(spec_info->display_widget);
+		update = 1;
+	}
+
+	itr = list_iterator_create(info_list);
+	while ((sview_resv_info = (sview_resv_info_t*) list_next(itr))) {
+		resv_ptr = sview_resv_info->resv_ptr;
+		i++;
+		if(!strcmp(resv_ptr->name, name)) {
+			j=0;
+			while(resv_ptr->node_inx[j] >= 0) {
+				change_grid_color(
+					popup_win->grid_button_list,
+					resv_ptr->node_inx[j],
+					resv_ptr->node_inx[j+1], i, true);
+				j += 2;
+			}
+			_layout_resv_record(treeview, sview_resv_info, update);
+			found = 1;
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+	
+	if(!found) {
+		if(!popup_win->not_found) { 
+			char *temp = "RESERVATION DOESN'T EXSIST\n";
+			GtkTreeIter iter;
+			GtkTreeModel *model = NULL;
+	
+			/* only time this will be run so no update */
+			model = gtk_tree_view_get_model(treeview);
+			add_display_treestore_line(0, 
+						   GTK_TREE_STORE(model), 
+						   &iter,
+						   temp, "");
+		}
+		popup_win->not_found = true;
+	} else {
+		if(popup_win->not_found) { 
+			popup_win->not_found = false;
+			gtk_widget_destroy(spec_info->display_widget);
+			
+			goto need_refresh;
+		}
+	}
+	gtk_widget_show(spec_info->display_widget);
+		
+finished:
+		
+	return;
+}
+
+extern void refresh_resv(GtkAction *action, gpointer user_data)
+{
+	popup_info_t *popup_win = (popup_info_t *)user_data;
+	xassert(popup_win != NULL);
+	xassert(popup_win->spec_info != NULL);
+	xassert(popup_win->spec_info->title != NULL);
+	popup_win->force_refresh = 1;
+	specific_info_resv(popup_win);
+}
+
+extern int get_new_info_resv(reserve_info_msg_t **info_ptr, 
+			     int force)
+{
+	static reserve_info_msg_t *resv_info_ptr = NULL, *new_resv_ptr = NULL;
+	int error_code = SLURM_NO_CHANGE_IN_DATA;
+	time_t now = time(NULL);
+	static time_t last;
+	static bool changed = 0;
+		
+	if(!force && ((now - last) < global_sleep_time)) {
+		if(*info_ptr != resv_info_ptr) 
+			error_code = SLURM_SUCCESS;
+		*info_ptr = resv_info_ptr;
+		if(changed) 
+			return SLURM_SUCCESS;
+		return error_code;
+	}
+	last = now;
+	if (resv_info_ptr) {
+		error_code = slurm_load_reservations(resv_info_ptr->last_update,
+						     &new_resv_ptr);
+		if (error_code == SLURM_SUCCESS) {
+			slurm_free_reservation_info_msg(resv_info_ptr);
+			changed = 1;
+		} else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA) {
+			error_code = SLURM_NO_CHANGE_IN_DATA;
+			new_resv_ptr = resv_info_ptr;
+			changed = 0;
+		}
+	} else {
+		error_code = slurm_load_reservations((time_t) NULL, 
+						     &new_resv_ptr);
+		changed = 1;
+	}
+
+	resv_info_ptr = new_resv_ptr;
+
+	if(*info_ptr != resv_info_ptr) 
+		error_code = SLURM_SUCCESS;
+
+	*info_ptr = new_resv_ptr;
+	return error_code;
+}
+
+extern GtkListStore *create_model_resv(int type)
+{
+	GtkListStore *model = NULL;
+	GtkTreeIter iter;
+
+	switch(type) {
+	case SORTID_ACTION:
+		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   1, SORTID_ACTION,
+				   0, "None",
+				   -1);	
+		gtk_list_store_append(model, &iter);
+		gtk_list_store_set(model, &iter,
+				   1, SORTID_ACTION,
+				   0, "Remove",
+				   -1);	
+		break;
+	default:
+		break;
+	}
+	return model;
+}
+
+extern void admin_edit_resv(GtkCellRendererText *cell,
+			    const char *path_string,
+			    const char *new_text,
+			    gpointer data)
+{
+	GtkTreeStore *treestore = GTK_TREE_STORE(data);
+	GtkTreePath *path = gtk_tree_path_new_from_string(path_string);
+	GtkTreeIter iter;
+	resv_desc_msg_t *resv_msg = xmalloc(sizeof(resv_desc_msg_t));
+	
+	char *temp = NULL;
+	char *old_text = NULL;
+	const char *type = NULL;
+	
+	int column = GPOINTER_TO_INT(g_object_get_data(G_OBJECT(cell), 
+						       "column"));
+
+	if(!new_text || !strcmp(new_text, ""))
+		goto no_input;
+	
+	gtk_tree_model_get_iter(GTK_TREE_MODEL(treestore), &iter, path);
+
+	slurm_init_resv_desc_msg(resv_msg);	
+	gtk_tree_model_get(GTK_TREE_MODEL(treestore), &iter, 
+			   SORTID_NAME, &temp, 
+			   column, &old_text,
+			   -1);
+	resv_msg->name = xstrdup(temp);
+	g_free(temp);
+
+	type = _set_resv_msg(resv_msg, new_text, column);
+	if(errno) 
+		goto print_error;
+	
+	if(got_edit_signal) {
+		temp = got_edit_signal;
+		got_edit_signal = NULL;
+		admin_resv(GTK_TREE_MODEL(treestore), &iter, temp);
+		xfree(temp);
+		goto no_input;
+	}
+			
+	if(old_text && !strcmp(old_text, new_text)) {
+		temp = g_strdup_printf("No change in value.");
+	} else if(slurm_update_reservation(resv_msg) == SLURM_SUCCESS) {
+		gtk_tree_store_set(treestore, &iter, column, new_text, -1);
+		temp = g_strdup_printf("Reservation %s %s changed to %s",
+				       resv_msg->name,
+				       type,
+				       new_text);
+	} else if(errno == ESLURM_DISABLED) {
+		temp = g_strdup_printf(
+			"Can only edit %s on reservations not yet started.",
+			type);
+	} else {
+	print_error:
+		temp = g_strdup_printf("Reservation %s %s can't be "
+				       "set to %s",
+				       resv_msg->name,
+				       type,
+				       new_text);
+	}
+	
+	display_edit_note(temp);
+	g_free(temp);
+
+no_input:
+	slurm_free_resv_desc_msg(resv_msg);
+			  
+	gtk_tree_path_free (path);
+	g_free(old_text);
+	g_static_mutex_unlock(&sview_mutex);
+}
+
+extern void get_info_resv(GtkTable *table, display_data_t *display_data)
+{
+	int error_code = SLURM_SUCCESS;
+	List info_list = NULL;
+	static int view = -1;
+	static reserve_info_msg_t *resv_info_ptr = NULL;
+	char error_char[100];
+	GtkWidget *label = NULL;
+	GtkTreeView *tree_view = NULL;
+	static GtkWidget *display_widget = NULL;
+	int i = 0, j = 0;
+	int changed = 1;
+	ListIterator itr = NULL;
+	sview_resv_info_t *sview_resv_info_ptr = NULL;
+	reserve_info_t *resv_ptr = NULL;
+	time_t now = time(NULL);
+		
+	if(display_data)
+		local_display_data = display_data;
+	if(!table) {
+		display_data_resv->set_menu = local_display_data->set_menu;
+		return;
+	}
+	if(display_widget && toggled) {
+		gtk_widget_destroy(display_widget);
+		display_widget = NULL;
+		goto display_it;
+	}
+
+	error_code = get_new_info_resv(&resv_info_ptr, force_refresh);
+	if(error_code == SLURM_NO_CHANGE_IN_DATA) {
+		changed = 0;
+	} else if (error_code != SLURM_SUCCESS) {
+		if(view == ERROR_VIEW)
+			goto end_it;
+		if(display_widget)
+			gtk_widget_destroy(display_widget);
+		view = ERROR_VIEW;
+		sprintf(error_char, "slurm_load_reservations: %s",
+			slurm_strerror(slurm_get_errno()));
+		label = gtk_label_new(error_char);
+		gtk_table_attach_defaults(table, label, 0, 1, 0, 1); 
+		gtk_widget_show(label);	
+		display_widget = gtk_widget_ref(GTK_WIDGET(label));
+		goto end_it;
+	}
+
+display_it:
+	info_list = _create_resv_info_list(resv_info_ptr, changed);
+	if(!info_list)
+		return;
+	/* set up the grid */
+	itr = list_iterator_create(info_list);
+	while ((sview_resv_info_ptr = list_next(itr))) {
+		resv_ptr = sview_resv_info_ptr->resv_ptr;
+		if ((resv_ptr->start_time > now) ||
+		    (resv_ptr->end_time   < now))
+			continue;	/* only map current reservations */
+		j=0;
+		while(resv_ptr->node_inx[j] >= 0) {
+			sview_resv_info_ptr->color = 
+				change_grid_color(grid_button_list,
+						  resv_ptr->node_inx[j],
+						  resv_ptr->node_inx[j+1],
+						  i, true);
+			j += 2;
+		}
+		i++;
+	}
+	list_iterator_destroy(itr);
+		
+	if(view == ERROR_VIEW && display_widget) {
+		gtk_widget_destroy(display_widget);
+		display_widget = NULL;
+	}
+	if(!display_widget) {
+		tree_view = create_treeview(local_display_data);
+
+		display_widget = gtk_widget_ref(GTK_WIDGET(tree_view));
+		gtk_table_attach_defaults(table,
+					  GTK_WIDGET(tree_view),
+					  0, 1, 0, 1);
+		/* since this function sets the model of the tree_view 
+		   to the treestore we don't really care about 
+		   the return value */
+		create_treestore(tree_view, display_data_resv, SORTID_CNT);
+	}
+	view = INFO_VIEW;
+	_update_info_resv(info_list, GTK_TREE_VIEW(display_widget));
+end_it:
+	toggled = FALSE;
+	force_refresh = FALSE;
+	
+	return;
+}
+
+extern void specific_info_resv(popup_info_t *popup_win)
+{
+	int resv_error_code = SLURM_SUCCESS;
+	static reserve_info_msg_t *resv_info_ptr = NULL;
+	static reserve_info_t *resv_ptr = NULL;
+	specific_info_t *spec_info = popup_win->spec_info;
+	sview_search_info_t *search_info = spec_info->search_info;
+	char error_char[100];
+	GtkWidget *label = NULL;
+	GtkTreeView *tree_view = NULL;
+	List resv_list = NULL;
+	List send_resv_list = NULL;
+	int changed = 1;
+	sview_resv_info_t *sview_resv_info_ptr = NULL;
+	int j=0, i=-1;
+	hostset_t hostset = NULL;
+	ListIterator itr = NULL;
+	
+	if(!spec_info->display_widget) {
+		setup_popup_info(popup_win, display_data_resv, SORTID_CNT);
+	}
+
+	if(spec_info->display_widget && popup_win->toggled) {
+		gtk_widget_destroy(spec_info->display_widget);
+		spec_info->display_widget = NULL;
+		goto display_it;
+	}
+	
+	if((resv_error_code = 
+	    get_new_info_resv(&resv_info_ptr, popup_win->force_refresh))
+	   == SLURM_NO_CHANGE_IN_DATA) { 
+		if(!spec_info->display_widget || spec_info->view == ERROR_VIEW)
+			goto display_it;
+		changed = 0;		
+	} else if (resv_error_code != SLURM_SUCCESS) {
+		if(spec_info->view == ERROR_VIEW)
+			goto end_it;
+		spec_info->view = ERROR_VIEW;
+		if(spec_info->display_widget)
+			gtk_widget_destroy(spec_info->display_widget);
+		sprintf(error_char, "get_new_info_resv: %s",
+			slurm_strerror(slurm_get_errno()));
+		label = gtk_label_new(error_char);
+		gtk_table_attach_defaults(popup_win->table, 
+					  label,
+					  0, 1, 0, 1); 
+		gtk_widget_show(label);	
+		spec_info->display_widget = gtk_widget_ref(label);
+		goto end_it;
+	}
+	
+display_it:
+	
+	resv_list = _create_resv_info_list(resv_info_ptr, changed);
+
+	if(!resv_list)
+		return;
+
+	if(spec_info->view == ERROR_VIEW && spec_info->display_widget) {
+		gtk_widget_destroy(spec_info->display_widget);
+		spec_info->display_widget = NULL;
+	}
+	if(spec_info->type != INFO_PAGE && !spec_info->display_widget) {
+		tree_view = create_treeview(local_display_data);
+		spec_info->display_widget = 
+			gtk_widget_ref(GTK_WIDGET(tree_view));
+		gtk_table_attach_defaults(popup_win->table, 
+					  GTK_WIDGET(tree_view),
+					  0, 1, 0, 1); 
+		/* since this function sets the model of the tree_view 
+		   to the treestore we don't really care about 
+		   the return value */
+		create_treestore(tree_view, 
+				 popup_win->display_data, SORTID_CNT);
+	}
+
+	setup_popup_grid_list(popup_win);
+
+	spec_info->view = INFO_VIEW;
+	if(spec_info->type == INFO_PAGE) {
+		_display_info_resv(resv_list, popup_win);
+		goto end_it;
+	}
+
+	/* just linking to another list, don't free the inside, just
+	   the list */
+	send_resv_list = list_create(NULL);	
+	itr = list_iterator_create(resv_list);
+	i = -1;
+	while ((sview_resv_info_ptr = list_next(itr))) {
+		i++;
+		resv_ptr = sview_resv_info_ptr->resv_ptr;	
+		switch(spec_info->type) {
+		case PART_PAGE:
+		case BLOCK_PAGE:
+		case NODE_PAGE:
+			if(!resv_ptr->node_list)
+				continue;
+			
+			if(!(hostset = hostset_create(search_info->gchar_data)))
+				continue;
+			if(!hostset_intersects(hostset, resv_ptr->node_list)) {
+				hostset_destroy(hostset);
+				continue;
+			}
+			hostset_destroy(hostset);				
+			break;
+		case JOB_PAGE:
+			if(strcmp(resv_ptr->name, 
+				  search_info->gchar_data)) 
+				continue;
+			break;
+		case RESV_PAGE:
+			switch(search_info->search_type) {
+			case SEARCH_RESERVATION_NAME:
+				if(!search_info->gchar_data)
+					continue;
+				
+				if(strcmp(resv_ptr->name, 
+					  search_info->gchar_data)) 
+					continue;
+				break;
+			default:
+				continue;
+			}
+			break;
+		default:
+			g_print("Unknown type %d\n", spec_info->type);
+			continue;
+		}
+		list_push(send_resv_list, sview_resv_info_ptr);
+		j=0;
+		while(resv_ptr->node_inx[j] >= 0) {
+				change_grid_color(
+					popup_win->grid_button_list,
+					resv_ptr->node_inx[j],
+					resv_ptr->node_inx[j+1], i, false);
+			j += 2;
+		}
+	}
+	list_iterator_destroy(itr);
+
+	_update_info_resv(send_resv_list, 
+			  GTK_TREE_VIEW(spec_info->display_widget));
+	list_destroy(send_resv_list);
+end_it:
+	popup_win->toggled = 0;
+	popup_win->force_refresh = 0;
+	
+	return;
+}
+
+extern void set_menus_resv(void *arg, GtkTreePath *path, 
+			   GtkMenu *menu, int type)
+{
+	GtkTreeView *tree_view = (GtkTreeView *)arg;
+	popup_info_t *popup_win = (popup_info_t *)arg;
+	switch(type) {
+	case TAB_CLICKED:
+		make_fields_menu(menu, display_data_resv, SORTID_CNT);
+		break;
+	case ROW_CLICKED:
+		make_options_menu(tree_view, path, menu, options_data_resv);
+		break;
+	case POPUP_CLICKED:
+		make_popup_fields_menu(popup_win, menu);
+		break;
+	default:
+		g_error("UNKNOWN type %d given to set_fields\n", type);
+	}
+}
+
+extern void popup_all_resv(GtkTreeModel *model, GtkTreeIter *iter, int id)
+{
+	char *name = NULL;
+	char title[100];
+	ListIterator itr = NULL;
+	popup_info_t *popup_win = NULL;
+	GError *error = NULL;
+				
+	gtk_tree_model_get(model, iter, SORTID_NAME, &name, -1);
+
+	switch(id) {
+	case PART_PAGE:
+		snprintf(title, 100, "Partition(s) with reservation %s", name);
+		break;
+	case JOB_PAGE:
+		snprintf(title, 100, "Job(s) in reservation %s", name);
+		break;
+	case NODE_PAGE:
+#ifdef HAVE_BG
+		snprintf(title, 100, 
+			 "Base partitions(s) in reservation %s",
+			 name);
+#else
+		snprintf(title, 100, "Node(s) in reservation %s ",
+			 name);
+#endif
+		break;
+	case BLOCK_PAGE: 
+		snprintf(title, 100, "Block(s) in reservation %s", name);
+		break;
+	case SUBMIT_PAGE: 
+		snprintf(title, 100, "Submit job in reservation %s", name);
+		break;
+	case INFO_PAGE: 
+		snprintf(title, 100, "Full info for reservation %s", name);
+		break;
+	default:
+		g_print("resv got %d\n", id);
+	}
+	
+	itr = list_iterator_create(popup_list);
+	while((popup_win = list_next(itr))) {
+		if(popup_win->spec_info)
+			if(!strcmp(popup_win->spec_info->title, title)) {
+				break;
+			} 
+	}
+	list_iterator_destroy(itr);
+
+	if(!popup_win) {
+		if(id == INFO_PAGE)
+			popup_win = create_popup_info(id, RESV_PAGE, title);
+		else
+			popup_win = create_popup_info(RESV_PAGE, id, title);
+	} else {
+		g_free(name);
+		gtk_window_present(GTK_WINDOW(popup_win->popup));
+		return;
+	}
+
+	/* Pass the model and the structs from the iter so we can always get
+	   the current node_inx.
+	*/
+	popup_win->model = model;
+	popup_win->iter = *iter;
+	popup_win->node_inx_id = SORTID_NODE_INX;
+
+	switch(id) {
+	case JOB_PAGE:
+	case INFO_PAGE:
+		popup_win->spec_info->search_info->gchar_data = name;
+		//specific_info_job(popup_win);
+		break;
+	case BLOCK_PAGE: 
+	case NODE_PAGE:
+	case PART_PAGE:
+		g_free(name);
+		gtk_tree_model_get(model, iter, SORTID_NODE_LIST, &name, -1);
+		popup_win->spec_info->search_info->gchar_data = name;
+		popup_win->spec_info->search_info->search_type = 
+			SEARCH_NODE_NAME;
+		//specific_info_node(popup_win);
+		break;
+	case SUBMIT_PAGE: 
+		break;
+	default:
+		g_print("resv got unknown type %d\n", id);
+	}
+	if (!g_thread_create((gpointer)popup_thr, popup_win, FALSE, &error))
+	{
+		g_printerr ("Failed to create resv popup thread: %s\n", 
+			    error->message);
+		return;
+	}		
+}
+
+extern void admin_resv(GtkTreeModel *model, GtkTreeIter *iter, char *type)
+{
+	resv_desc_msg_t *resv_msg = xmalloc(sizeof(resv_desc_msg_t));
+	reservation_name_msg_t resv_name_msg;
+	char *resvid = NULL;
+	char tmp_char[100];
+	char *temp = NULL;
+	int edit_type = 0;
+	int response = 0;	
+	GtkWidget *label = NULL;
+	GtkWidget *entry = NULL;
+	GtkWidget *popup = gtk_dialog_new_with_buttons(
+		type,
+		GTK_WINDOW(main_window),
+		GTK_DIALOG_MODAL | GTK_DIALOG_DESTROY_WITH_PARENT,
+		NULL);
+	gtk_window_set_transient_for(GTK_WINDOW(popup), NULL);
+
+	gtk_tree_model_get(model, iter, SORTID_NAME, &resvid, -1);
+
+	slurm_init_resv_desc_msg(resv_msg);
+	memset(&resv_name_msg, 0, sizeof(reservation_name_msg_t));
+	
+	resv_msg->name = xstrdup(resvid);
+		
+	if(!strcasecmp("Remove", type)) {
+		resv_name_msg.name = resvid;
+
+		label = gtk_dialog_add_button(GTK_DIALOG(popup),
+					      GTK_STOCK_YES, GTK_RESPONSE_OK);
+		gtk_window_set_default(GTK_WINDOW(popup), label);
+		gtk_dialog_add_button(GTK_DIALOG(popup),
+				      GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL);
+		
+			snprintf(tmp_char, sizeof(tmp_char), 
+				 "Are you sure you want to remove "
+				 "reservation %s?",
+				 resvid);
+		label = gtk_label_new(tmp_char);
+		edit_type = EDIT_REMOVE;
+	} else {
+		label = gtk_dialog_add_button(GTK_DIALOG(popup),
+					      GTK_STOCK_OK, GTK_RESPONSE_OK);
+		gtk_window_set_default(GTK_WINDOW(popup), label);
+		gtk_dialog_add_button(GTK_DIALOG(popup),
+				      GTK_STOCK_CANCEL, GTK_RESPONSE_CANCEL);
+
+		gtk_window_set_default_size(GTK_WINDOW(popup), 200, 400);
+		snprintf(tmp_char, sizeof(tmp_char), 
+			 "Editing reservation %s think before you type",
+			 resvid);
+		label = gtk_label_new(tmp_char);
+		edit_type = EDIT_EDIT;
+		entry = _admin_full_edit_resv(resv_msg, model, iter);
+	}
+
+	gtk_box_pack_start(GTK_BOX(GTK_DIALOG(popup)->vbox), 
+			   label, FALSE, FALSE, 0);
+	if(entry)
+		gtk_box_pack_start(GTK_BOX(GTK_DIALOG(popup)->vbox), 
+				   entry, TRUE, TRUE, 0);
+	gtk_widget_show_all(popup);
+	response = gtk_dialog_run (GTK_DIALOG(popup));
+
+	if (response == GTK_RESPONSE_OK) {
+		switch(edit_type) {
+		case EDIT_REMOVE:
+			if(slurm_delete_reservation(&resv_name_msg)
+			   == SLURM_SUCCESS) {
+				temp = g_strdup_printf(
+					"Reservation %s removed successfully",
+					resvid);
+			} else {
+				temp = g_strdup_printf(
+					"Problem removing reservation %s.",
+					resvid);
+			}			
+			display_edit_note(temp);
+			g_free(temp);
+			break;
+		case EDIT_EDIT:
+			if(got_edit_signal) 
+				goto end_it;
+			if(slurm_update_reservation(resv_msg)
+			   == SLURM_SUCCESS) {
+				temp = g_strdup_printf(
+					"Reservation %s updated successfully",
+					resvid);
+			} else {
+				temp = g_strdup_printf(
+					"Problem updating reservation %s.",
+					resvid);
+			}
+			display_edit_note(temp);
+			g_free(temp);
+			break;
+		default:
+			break;
+		}
+	}
+end_it:
+		
+	g_free(resvid);
+	slurm_free_resv_desc_msg(resv_msg);
+	gtk_widget_destroy(popup);
+	if(got_edit_signal) {
+		type = got_edit_signal;
+		got_edit_signal = NULL;
+		admin_resv(model, iter, type);
+		xfree(type);
+	}			
+	return;
+}
+
diff --git a/src/sview/submit_info.c b/src/sview/submit_info.c
index 19543917b3a06c3cc7a838f92c118cf079dc5708..c767ae7700c1ce9ad327b4628687ab8a1b55e5c7 100644
--- a/src/sview/submit_info.c
+++ b/src/sview/submit_info.c
@@ -6,10 +6,11 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/src/sview/sview.c b/src/sview/sview.c
index 74979e6ba514ce1f5445df113fc7d7f15a4a9719..cc832a9bc13e87e842c4570de23fa9da28bd48b9 100644
--- a/src/sview/sview.c
+++ b/src/sview/sview.c
@@ -2,13 +2,14 @@
  *  sview.c - main for sview
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
- *  Copyright (C) 2008 Lawrence Livermore National Security.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>, et. al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -78,6 +79,10 @@ display_data_t main_display_data[] = {
 	 refresh_main, create_model_part, admin_edit_part,
 	 get_info_part, specific_info_part, 
 	 set_menus_part, NULL},
+	{G_TYPE_NONE, RESV_PAGE, "Reservations", TRUE, -1, 
+	 refresh_main, create_model_resv, admin_edit_resv,
+	 get_info_resv, specific_info_resv, 
+	 set_menus_resv, NULL},
 #ifdef HAVE_BG
 	{G_TYPE_NONE, BLOCK_PAGE, "BG Blocks", TRUE, -1,
 	 refresh_main, NULL, NULL,
@@ -379,6 +384,8 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 		"      <separator/>"
 		"        <menuitem action='node_name'/>"
 		"        <menuitem action='node_state'/>"
+		"      <separator/>"
+		"        <menuitem action='reservation_name'/>"
 		"      </menu>"
 		"      <menuitem action='refresh'/>"
 		"      <menuitem action='reconfig'/>"
@@ -459,6 +466,9 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 		 "", "Search for a Node in a given state", 
 #endif
 		 G_CALLBACK(create_search_popup)},		
+		{"reservation_name", NULL, "Reservation Name", 
+		 "", "Search for reservation", 
+		 G_CALLBACK(create_search_popup)},
 		{"tab_pos", NULL, "_Tab Pos"},
 		{"interval", GTK_STOCK_REFRESH, "Set Refresh _Interval", 
 		 "<control>i", "Change Refresh Interval", 
@@ -537,6 +547,7 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 	/* Finally, return the actual menu bar created by the item factory. */
 	return gtk_ui_manager_get_widget (ui_manager, "/main");
 }
+
 void *_popup_thr_main(void *arg)
 {
 	popup_thr(arg);		
diff --git a/src/sview/sview.h b/src/sview/sview.h
index d55d5f26fe11c82a42641403f404eda3a7d57e80..e86b555701043837bac9fccb15f56ff59605f564 100644
--- a/src/sview/sview.h
+++ b/src/sview/sview.h
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -82,11 +83,15 @@
 #define POS_LOC 0
 #define DEFAULT_ENTRY_LENGTH 500
 
+#define MAKE_BLACK -2
+#define MAKE_WHITE -1
+
 enum { JOB_PAGE, 
        STEP_PAGE, 
-       PART_PAGE, 
+       PART_PAGE,
        NODE_PAGE, 
        BLOCK_PAGE, 
+       RESV_PAGE,
        SUBMIT_PAGE,
        ADMIN_PAGE,
        INFO_PAGE,
@@ -115,6 +120,18 @@ enum { EDIT_NONE,
        EDIT_TEXTBOX	
 };
 
+#ifdef HAVE_BG
+/* ERROR_STATE must be last since that will affect the state of the rest of the
+   midplane.
+*/
+enum {
+	SVIEW_BG_IDLE_STATE,
+	SVIEW_BG_ALLOC_STATE,
+	SVIEW_BG_ERROR_STATE
+};
+#endif
+
+
 typedef enum { SEARCH_JOB_ID = 1,
 	       SEARCH_JOB_USER,
 	       SEARCH_JOB_STATE,
@@ -125,7 +142,8 @@ typedef enum { SEARCH_JOB_ID = 1,
 	       SEARCH_PARTITION_NAME,
 	       SEARCH_PARTITION_STATE,
 	       SEARCH_NODE_NAME,
-	       SEARCH_NODE_STATE
+	       SEARCH_NODE_STATE,
+	       SEARCH_RESERVATION_NAME,
 } sview_search_type_t;
 
 
@@ -196,7 +214,9 @@ struct popup_info {
 	int toggled;
 	int force_refresh;
 	int *running;
+	int *node_inx;
 	int show_grid;
+	int full_grid;
 	bool not_found;
 	GtkWidget *popup;
 	GtkWidget *event_box;
@@ -205,6 +225,9 @@ struct popup_info {
 	List grid_button_list;
 	specific_info_t *spec_info;
 	display_data_t *display_data;
+	GtkTreeModel *model;
+	GtkTreeIter iter;
+	int node_inx_id;
 };
 
 typedef struct {
@@ -217,6 +240,7 @@ typedef struct {
 	int state;
 	int table_x;
 	int table_y;
+	bool used;
 } grid_button_t;
 
 typedef struct {
@@ -266,11 +290,12 @@ extern void destroy_grid_button(void *arg);
 extern grid_button_t *create_grid_button_from_another(
 	grid_button_t *grid_button, char *name, int color_inx);
 /* do not free the char * from this function it is static */
-extern char *change_grid_color(List button_list, int start_inx, int end_inx,
-			       int color_inx);
+extern char *change_grid_color(List button_list, int start, int end,
+			       int color_inx, bool change_unused);
+extern void set_grid_used(List button_list, int start, int end, bool used);
 extern void get_button_list_from_main(List *button_list, int start, int end,
 				      int color_inx);
-extern List copy_main_button_list();
+extern List copy_main_button_list(int initial_color);
 #ifdef HAVE_BG
 extern void add_extra_bluegene_buttons(List *button_list, int inx, 
 				       int *color_inx);
@@ -281,6 +306,7 @@ extern int get_system_stats(GtkTable *table);
 extern int setup_grid_table(GtkTable *table, List button_list, List node_list);
 extern void sview_init_grid();
 extern void sview_reset_grid();
+extern void setup_popup_grid_list(popup_info_t *popup_win);
 
 // part_info.c
 extern void refresh_part(GtkAction *action, gpointer user_data);
@@ -353,6 +379,22 @@ extern void set_menus_node(void *arg, GtkTreePath *path,
 extern void popup_all_node(GtkTreeModel *model, GtkTreeIter *iter, int id);
 extern void admin_node(GtkTreeModel *model, GtkTreeIter *iter, char *type);
 
+// resv_info.c
+extern void refresh_resv(GtkAction *action, gpointer user_data);
+extern GtkListStore *create_model_resv(int type);
+extern void admin_edit_resv(GtkCellRendererText *cell,
+			    const char *path_string,
+			    const char *new_text,
+			    gpointer data);
+extern int get_new_info_resv(reserve_info_msg_t **info_ptr, int force);
+extern void get_info_resv(GtkTable *table, display_data_t *display_data);
+extern void specific_info_resv(popup_info_t *popup_win);
+extern void set_menus_resv(void *arg, GtkTreePath *path, 
+			   GtkMenu *menu, int type);
+extern void popup_all_resv(GtkTreeModel *model, GtkTreeIter *iter, int id);
+extern void admin_resv(GtkTreeModel *model, GtkTreeIter *iter, char *type);
+
+
 // submit_info.c
 extern void get_info_submit(GtkTable *table, display_data_t *display_data);
 extern void set_menus_submit(void *arg, GtkTreePath *path, 
diff --git a/testsuite/Makefile.in b/testsuite/Makefile.in
index 69bacc1f8f6d67abe934858693fbcc15fa47feec..24c6e63e6b2f9a6bf990251786c52aa3977d5f2c 100644
--- a/testsuite/Makefile.in
+++ b/testsuite/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -96,6 +100,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/testsuite/expect/Makefile.am b/testsuite/expect/Makefile.am
index 0ef54425017438a8fe650f7849bb2d8a5337254f..c96b2cb6c4c6520d112b6cd88dde726aac0e6088 100644
--- a/testsuite/expect/Makefile.am
+++ b/testsuite/expect/Makefile.am
@@ -60,6 +60,7 @@ EXTRA_DIST = \
 	test1.42			\
 	test1.43			\
 	test1.44			\
+	test1.45			\
 	test1.46			\
 	test1.47			\
 	test1.48			\
@@ -73,6 +74,7 @@ EXTRA_DIST = \
 	test1.57			\
 	test1.58			\
 	test1.59			\
+	test1.60			\
 	test1.80			\
 	test1.81			\
 	test1.82			\
@@ -101,6 +103,7 @@ EXTRA_DIST = \
 	test2.9				\
 	test2.10			\
 	test2.11			\
+	test2.12			\
 	test3.1				\
 	test3.2				\
 	test3.3				\
@@ -112,6 +115,7 @@ EXTRA_DIST = \
 	test3.8				\
 	test3.9				\
 	test3.10			\
+	test3.11			\
 	test4.1				\
 	test4.2				\
 	test4.3				\
@@ -146,6 +150,7 @@ EXTRA_DIST = \
 	test6.12			\
 	test6.13			\
 	test6.13.prog.c			\
+	test6.14			\
 	test7.1				\
 	test7.2				\
 	test7.2.prog.c			\
@@ -208,6 +213,8 @@ EXTRA_DIST = \
 	test12.1			\
 	test12.2			\
 	test12.2.prog.c			\
+	test12.4			\
+	test12.5			\
 	test13.1			\
 	test14.1			\
 	test14.2			\
@@ -266,7 +273,6 @@ EXTRA_DIST = \
 	test17.16			\
 	test17.17			\
 	test17.18			\
-	test17.19			\
 	test17.20			\
 	test17.21			\
 	test17.22			\
@@ -311,6 +317,16 @@ EXTRA_DIST = \
 	test21.22			\
 	test21.23			\
 	test21.24			\
+	test21.25			\
+	test22.1			\
+	test22.2			\
+	test23.1			\
+	test23.2			\
+	test23.3			\
+	test24.1			\
+	test24.1.prog.c			\
+	test24.2			\
+	test25.1			\
 	usleep
 
 distclean-local:
diff --git a/testsuite/expect/Makefile.in b/testsuite/expect/Makefile.in
index 206556eb7b6b2b9921eb6e28a670d019106e82f2..5db8a2983698bc891aa88d423ecdbaa9974eea6a 100644
--- a/testsuite/expect/Makefile.in
+++ b/testsuite/expect/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -77,6 +81,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
@@ -296,6 +304,7 @@ EXTRA_DIST = \
 	test1.42			\
 	test1.43			\
 	test1.44			\
+	test1.45			\
 	test1.46			\
 	test1.47			\
 	test1.48			\
@@ -309,6 +318,7 @@ EXTRA_DIST = \
 	test1.57			\
 	test1.58			\
 	test1.59			\
+	test1.60			\
 	test1.80			\
 	test1.81			\
 	test1.82			\
@@ -337,6 +347,7 @@ EXTRA_DIST = \
 	test2.9				\
 	test2.10			\
 	test2.11			\
+	test2.12			\
 	test3.1				\
 	test3.2				\
 	test3.3				\
@@ -348,6 +359,7 @@ EXTRA_DIST = \
 	test3.8				\
 	test3.9				\
 	test3.10			\
+	test3.11			\
 	test4.1				\
 	test4.2				\
 	test4.3				\
@@ -382,6 +394,7 @@ EXTRA_DIST = \
 	test6.12			\
 	test6.13			\
 	test6.13.prog.c			\
+	test6.14			\
 	test7.1				\
 	test7.2				\
 	test7.2.prog.c			\
@@ -444,6 +457,8 @@ EXTRA_DIST = \
 	test12.1			\
 	test12.2			\
 	test12.2.prog.c			\
+	test12.4			\
+	test12.5			\
 	test13.1			\
 	test14.1			\
 	test14.2			\
@@ -502,7 +517,6 @@ EXTRA_DIST = \
 	test17.16			\
 	test17.17			\
 	test17.18			\
-	test17.19			\
 	test17.20			\
 	test17.21			\
 	test17.22			\
@@ -547,6 +561,16 @@ EXTRA_DIST = \
 	test21.22			\
 	test21.23			\
 	test21.24			\
+	test21.25			\
+	test22.1			\
+	test22.2			\
+	test23.1			\
+	test23.2			\
+	test23.3			\
+	test24.1			\
+	test24.1.prog.c			\
+	test24.2			\
+	test25.1			\
 	usleep
 
 all: all-am
diff --git a/testsuite/expect/README b/testsuite/expect/README
index 683a3a3a41a1161ba636f65acab5e91dadc2e095..63b73a230579bdd43c1478b3ccbea365fcfd2dc0 100644
--- a/testsuite/expect/README
+++ b/testsuite/expect/README
@@ -1,13 +1,14 @@
 ############################################################################
-# Copyright (C) 2008 Lawrence Livermore National Security.
 # Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008-2009 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # Additionals by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 #
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the supplied file: DISCLAIMER.
 #
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -139,7 +140,7 @@ test1.41   Validate SLURM debugger infrastructure (--debugger-test option).
 test1.42   Test of job dependencies (--dependency option).
 test1.43   Test of slurm_job_will_run API, (srun --test-only option).
 test1.44   Read srun's stdout slowly and test for lost data.   
-test1.45   REMOVED
+test1.45   Test srun option --preserve-env
 test1.46   Test srun option --kill-on-bad-exit
 test1.47   Test of job dependencies with singleton parameter.
 test1.48   Test of srun mail options (--mail-type and --mail-user options).
@@ -156,6 +157,7 @@ test1.56   Test buffered standard IO with really long lines
 test1.57   Test of srun --jobid for a new job allocation (used by Moab)
 test1.58   Test of srun --jobid for an existing job allocation
 test1.59   Test of hostfile logic for job steps
+test1.60   Test of labelling output when writing a file per task or per node
 
 **NOTE**   The following tests attempt to utilize multiple CPUs or partitions,
            The test will print "WARNING" and terminate with an exit code of 
@@ -197,6 +199,7 @@ test2.8    Validate scontrol show commands for jobs and steps.
 test2.9    Validate scontrol completing command.
 test2.10   Validate scontrol oneliner mode (--oneliner option).
 test2.11   Validate scontrol listpids command.
+test2.12   Validate scontrol show topology option.
 
 
 test3.#    Testing of scontrol options (best run as SlurmUser or root). 
@@ -211,6 +214,7 @@ test3.7    Test of job suspend/resume.
 test3.8    Test of batch job requeue.
 test3.9    Test of "scontrol show slurmd"
 test3.10   Test of "scontrol notify <jobid> <message>"
+test3.11   Validate scontrol create, delete, and update of reservations.
 UNTESTED   "scontrol abort"    would stop slurm 
 UNTESTED   "scontrol shutdown" would stop slurm
 
@@ -267,6 +271,7 @@ test6.11   Validate scancel quiet option, no warning if job gone
 test6.12   Test scancel signal to batch script (--batch option)
 test6.13   Test routing all signals through slurmctld rather than directly 
            to slurmd (undocumented --ctld option).
+test6.14   Test scancel nodelist option (--nodelist or -w)
 
 test7.#    Testing of other functionality.
 ==========================================
@@ -299,7 +304,7 @@ test8.1    Test of Blue Gene specific sbatch command line options
 test8.2    Test of Blue Gene specific sbatch environment variables
 test8.3    Test of Blue Gene specific job geometry support
 test8.4    Test of Blue Gene MPI job execution
-test8.5    Confirm we can make a 32, 128, and 512 cnode block.
+test8.5    Test creation of all blocks 1 midplane and smaller.
 test8.6    Stress test Dynamic mode block creation.
 test8.7    Test of Blue Gene scheduling with sched/wik2 plugin.
 
@@ -351,9 +356,9 @@ test12.#   Testing of sacct command and options
 test12.1   Test sacct --help option.
 test12.2   Test validity/accuracy of accounting data for exit code, 
            memory and real-time information along with stating a running job.
-(There are many more tests that should probably be added, but HP 
-is taking responsibility for validating this code, so we'll stick 
-with the basics here.)
+test12.3   (EMPTY)
+test12.4   Test sacct --b, g, j, l, n, p, u, v options.
+test12.5   Test sacct --helpformat option.
 
 
 test13.#   Testing of switch plugins
@@ -455,8 +460,7 @@ test17.17  Confirm that node sharing flags are respected  (--nodelist and
            --share options)
 test17.18  Test of job dependencies and deferred begin time (--depedency 
            and --begin options).
-test17.19  Test the launch of a batch job within an existing job allocation.
-           This logic is used by LSF
+test17.19  REMOVED
 test17.20  Test of mail options (--mail-type and --mail-user options)
 test17.21  Tests #SLURM entry functionality in a batch script
 test17.22  Test of running non-existant job, confirm timely termination.
@@ -521,3 +525,25 @@ test21.21  sacctmgr add an account to this cluster and try using it with
 test21.22  sacctmgr load file
 test21.23  sacctmgr wckey
 test21.24  sacctmgr dump file
+test21.25  sacctmgr show config
+
+test22.#   Testing of sreport commands and options. 
+           These also test the sacctmgr archive dump/load functions.
+=================================================
+test22.1   sreport cluster utilization report
+test22.2   sreport h, n, p, P, t, V options
+
+test23.#   Testing of sstat commands and options.
+=================================================
+test23.1   sstat -e, h, usage and V options
+test23.2   sstat --helpformat
+test23.3   sstat -a, n, o, p, P, v options
+
+test24.#   Testing of sshare/priority multifactor plugin.
+=================================================
+test24.1   multifactor plugin algo test
+test24.2   sshare h, n, p, P, v, and V options.
+
+test25.#   Testing of sprio command and options.
+=================================================
+test25.1   sprio all options
diff --git a/testsuite/expect/globals b/testsuite/expect/globals
index 28dea26ebb7b9e8de8431390ce7712be17092f5b..81815bbd35de32b02a32dd2e05c1611c57573fb7 100755
--- a/testsuite/expect/globals
+++ b/testsuite/expect/globals
@@ -16,10 +16,11 @@
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # Additions by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+ # Please also read the supplied file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -56,6 +57,7 @@ if [file exists globals.local] {
 #
 cset slurm_dir   "/usr"
 cset build_dir   "../../"
+cset src_dir     "../../"
 cset sacctmgr    "${slurm_dir}/bin/sacctmgr"
 cset sacct       "${slurm_dir}/bin/sacct"
 cset salloc      "${slurm_dir}/bin/salloc"
@@ -66,9 +68,11 @@ cset scancel     "${slurm_dir}/bin/scancel"
 cset scontrol    "${slurm_dir}/bin/scontrol"
 cset sinfo       "${slurm_dir}/bin/sinfo"
 cset smap        "${slurm_dir}/bin/smap"
+cset sprio       "${slurm_dir}/bin/sprio"
 cset squeue      "${slurm_dir}/bin/squeue"
 cset srun        "${slurm_dir}/bin/srun"
 cset sreport     "${slurm_dir}/bin/sreport"
+cset sshare      "${slurm_dir}/bin/sshare"
 cset sstat       "${slurm_dir}/bin/sstat"
 cset strigger    "${slurm_dir}/bin/strigger"
 
@@ -195,6 +199,10 @@ cset super_user_set 0
 proc cancel_job { job_id } {
 	global scancel bin_sleep
 
+	if {$job_id == 0} {
+		return 1
+	}
+
 	send_user "cancelling $job_id\n"
 	exec $scancel -q $job_id
 	exec $bin_sleep 1
@@ -251,6 +259,102 @@ proc slow_kill { pid } {
 	return 0
 }
 
+################################################################
+#
+# Proc: get_my_nuid
+#
+# Purpose:  gets the name uid from the running user
+#
+# Returns: A non-zero return code indicates a failure.
+#
+#
+################################################################
+
+proc get_my_nuid {  } {
+	global bin_id alpha alpha_numeric
+
+	set uid -1
+
+	log_user 0
+	spawn $bin_id -nu
+	expect {
+		-re "($alpha_numeric|$alpha)" {
+			set nuid $expect_out(1,string)
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $nuid
+}
+
+################################################################
+#
+# Proc: get_my_uid
+#
+# Purpose:  gets the uid from the running user
+#
+# Returns: A non-zero return code indicates a failure.
+#
+#
+################################################################
+
+proc get_my_uid {  } {
+	global bin_id number
+
+	set uid -1
+
+	log_user 0
+	spawn $bin_id -u
+	expect {
+		-re "($number)" {
+			set uid $expect_out(1,string)
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $uid
+}
+
+################################################################
+#
+# Proc: get_my_gid
+#
+# Purpose:  gets the gid from the running user
+#
+# Returns: A non-zero return code indicates a failure.
+#
+#
+################################################################
+
+proc get_my_gid {  } {
+	global bin_id number
+
+	set gid -1
+
+	log_user 0
+	spawn $bin_id -g
+	expect {
+		-re "($number)" {
+			set gid $expect_out(1,string)
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $gid
+}
+
 
 ################################################################
 #
@@ -360,9 +464,9 @@ proc wait_for_file { file_name } {
 	global bin_sleep max_file_delay
 
 	for {set my_delay 0} {$my_delay <= $max_file_delay} {incr my_delay} {
-		if {[file exists $file_name] && [file exists $file_name]} {
+		if {[file exists $file_name]} {
 #			Add small delay for I/O buffering
-			exec $bin_sleep 2
+			exec $bin_sleep 1
 			return 0
 		}
 		exec $bin_sleep 1
@@ -520,8 +624,8 @@ proc test_assoc_enforced { } {
 	set assoc_enforced 0
 	spawn $scontrol show config
 	expect {
-		-re "AccountingStorageEnforce *= ($number)" {
-			set assoc_enforced $expect_out(1,string)
+		-re "AccountingStorageEnforce *= associations" {
+			set assoc_enforced 1
 			exp_continue
 		}
 		eof {
@@ -533,6 +637,62 @@ proc test_assoc_enforced { } {
 	return $assoc_enforced
 }
 
+################################################################
+#
+# Proc: slurmd_user_root
+#
+# Return 1 if the SlurmdUser is root, 0 otherwise
+#
+################################################################
+proc slurmd_user_root { } {
+	global scontrol
+
+	log_user 0
+	set rc 0
+	spawn $scontrol show config
+	expect {
+		-re "SlurmdUser *= root" {
+			set rc 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+
+	return $rc
+}
+
+################################################################
+#
+# Proc: test_topology
+#
+# Purpose: Determine if system is topology aware
+#
+# Returns level of association enforcement, 0 if none
+#
+################################################################
+proc test_topology { } {
+	global scontrol
+
+	log_user 0
+	set have_topology 1
+	spawn $scontrol show config
+	expect {
+		-re "TopologyPlugin *= *topology/none" {
+			set have_topology 0
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+
+	log_user 1
+	return $have_topology
+}
+
 ################################################################
 #
 # Proc: test_track_wckey
@@ -623,6 +783,36 @@ proc test_account_storage { } {
 	return $account_storage
 }
 
+################################################################
+#
+# Proc: priority_type
+#
+# Purpose: Use scontrol to determine the priority plugin
+#
+# Returns: Name of priority type
+#
+################################################################
+
+proc priority_type {} {
+	global scontrol
+
+	set name ""
+	set fd [open "|$scontrol show config"]
+	while {[gets $fd line] != -1} {
+		if {[regexp {^PriorityType *= priority/(\w+)} $line frag name]
+				== 1} {
+			break
+		}
+	}
+	catch {close $fd}
+
+	if {[string length $name] == 0} {
+		send_user "ERROR: could not identify the switch type\n"
+	}
+
+	return $name
+}
+
 ################################################################
 #
 # Proc: get_default_acct
@@ -1002,13 +1192,12 @@ proc switch_type {} {
 	catch {close $fd}
 
 	if {[string length $name] == 0} {
-		send_user "ERROR: could not identify the switch type"
+		send_user "ERROR: could not identify the switch type\n"
 	}
 
 	return $name
 }
 
-
 ################################################################
 #
 # Proc: make_bash_script
@@ -1229,7 +1418,7 @@ proc get_cluster_name { } {
 	#
 	log_user 0
 	set cluster_name ""
-	set scon_pid [spawn $scontrol show config]
+	set scon_pid [spawn -noecho $scontrol show config]
 	expect {
 		-re "ClusterName *= ($alpha_numeric_under)" {
 			set cluster_name $expect_out(1,string)
@@ -1248,3 +1437,216 @@ proc get_cluster_name { } {
 	log_user 1
 	return $cluster_name
 }
+
+################################################################
+#
+# Proc: get_bluegene_layout
+#
+# Purpose: Determine what layout mode the blugene system is running
+#
+# Returns name of layout mode if found, 0 otherwise
+#
+################################################################
+
+proc get_bluegene_layout { } {
+	global scontrol alpha_numeric_under
+
+	log_user 0
+	set layout 0
+	set scon_pid [spawn -noecho $scontrol show config]
+	expect {
+		-re "LayoutMode *= ($alpha_numeric_under)" {
+			set layout $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			slow_kill $scon_pid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $layout
+}
+
+################################################################
+#
+# Proc: get_bluegene_psets
+#
+# Purpose: Determine how many psets a midplane has in a bluegene system
+#
+# Returns num of psets, 0 if not set
+#
+################################################################
+
+proc get_bluegene_psets { } {
+	global scontrol number
+
+	log_user 0
+	set psets 0
+	set scon_pid [spawn -noecho $scontrol show config]
+	expect {
+		-re "Numpsets *= ($number)" {
+			set psets $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			slow_kill $scon_pid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $psets
+}
+
+################################################################
+#
+# Proc: get_bluegene_type
+#
+# Purpose: Determine what kind of bluegene system we are running
+#
+# Returns 'L' for bluegene/L,
+# 	  'P' for bluegene/P,
+#	  'Q' for bluegene/Q,
+# 	  0 if not set
+#
+################################################################
+
+proc get_bluegene_type { } {
+	global scontrol alpha
+
+	log_user 0
+	set type 0
+	set scon_pid [spawn -noecho $scontrol show config]
+	expect {
+		-re "Bluegene/($alpha) configuration" {
+			set type $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			slow_kill $scon_pid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $type
+}
+
+################################################################
+#
+# Proc: get_bluegene_procs_per_cnode
+#
+# Purpose: Determine how many cpus are on a cnode
+#
+# Returns count of cpus on a cnode or 0 if not set
+#
+################################################################
+
+proc get_bluegene_procs_per_cnode { } {
+	global scontrol number
+
+	log_user 0
+	set cpu_cnt 0
+	set scon_pid [spawn -noecho $scontrol show config]
+	expect {
+		-re "NodeCPUCnt *= ($number)" {
+			set cpu_cnt $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			slow_kill $scon_pid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $cpu_cnt
+}
+
+################################################################
+#
+# Proc: get_bluegene_cnodes_per_np
+#
+# Purpose: Determine how many cnodes are in a midplane
+#
+# Returns count of nodes on a midplane or 0 if not set
+#
+################################################################
+
+proc get_bluegene_cnodes_per_mp { } {
+	global scontrol number
+
+	log_user 0
+	set node_cnt 0
+	set scon_pid [spawn -noecho $scontrol show config]
+	expect {
+		-re "BasePartitionNodeCnt *= ($number)" {
+			set node_cnt $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			slow_kill $scon_pid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $node_cnt
+}
+
+################################################################
+#
+# Proc: get_node_cnt
+#
+# Purpose: Determine how many nodes are on the system
+#
+# Returns count of nodes on system or 0 if unknown
+#
+################################################################
+
+proc get_node_cnt { } {
+	global scontrol
+
+	log_user 0
+	set node_cnt 0
+	set scon_pid [spawn -noecho $scontrol show nodes]
+	expect {
+		-re "NodeName=" {
+			incr node_cnt
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			slow_kill $scon_pid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	
+	return $node_cnt
+}
diff --git a/testsuite/expect/globals_accounting b/testsuite/expect/globals_accounting
index 1a43e68c671fc18794ec9d09181172601b4cd17d..9df5cd2c9a601f093a68b70c5ebf34e5c9e6cbe0 100644
--- a/testsuite/expect/globals_accounting
+++ b/testsuite/expect/globals_accounting
@@ -1,6 +1,6 @@
 #!/usr/bin/expect
 ############################################################################
-# Purpose: Establish global state information for SLURM test suite
+# Purpose: Establish global state information for SLURM accounting tests
 #
 # To define site-specific state information, set the values in a file
 # named 'globals.local'. Those values will override any specified here.
@@ -11,15 +11,17 @@
 # set mpicc     "/usr/local/bin/mpicc"
 #
 ############################################################################
-# Copyright (C) 2008 Lawrence Livermore National Security.
+# Copyright (C) 2008-2009 Lawrence Livermore National Security.
 # Copyright (C) 2002-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Danny Auble <da@llnl.gov>
 # Written by Morris Jette <jette1@llnl.gov>
 # Additions by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+ # Please also read the supplied file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -43,7 +45,7 @@ set timeout 60
 #
 # Use sacctmgr to create a cluster
 #	
-proc add_cluster {name qos fairshare grpcpumin grpcpu grpjob grpnode grpsubmit grpwall maxcpumin maxcpu maxjob maxnode maxsubmit maxwall} {
+proc add_cluster {name qos fairshare grpcpu grpjob grpnode grpsubmit maxcpumin maxcpu maxjob maxnode maxsubmit maxwall} {
         global sacctmgr timeout
 
 	set exit_code 0
@@ -61,11 +63,6 @@ proc add_cluster {name qos fairshare grpcpumin grpcpu grpjob grpnode grpsubmit g
 		set assoc_stuff 1
 	}
 
-	if { [string length $grpcpumin] } {
-		set command "$command GrpCPUMins=$grpcpumin"
-		set assoc_stuff 1
-	}
-
 	if { [string length $grpcpu] } {
 		set command "$command GrpCPUs=$grpcpu"
 		set assoc_stuff 1
@@ -86,11 +83,6 @@ proc add_cluster {name qos fairshare grpcpumin grpcpu grpjob grpnode grpsubmit g
 		set assoc_stuff 1
 	}
 
-	if { [string length $grpwall] } {
-		set command "$command GrpWall=$grpwall"
-		set assoc_stuff 1
-	}
-
 	if { [string length $maxcpumin] } {
 		set command "$command MaxCPUMins=$maxcpumin"
 		set assoc_stuff 1
@@ -1363,3 +1355,36 @@ proc remove_coor { accounts names } {
 	return $exit_code
 }
 
+proc archive_load { file } {
+        global sacctmgr timeout
+	#
+	# Use sacctmgr to load info
+	#
+	set matches 0
+	set exit_code 0
+	set my_pid [spawn $sacctmgr -i -n archive load $file]
+	expect {
+		-re "There was a problem" {
+			send_user "FAILURE: there was a problem with the sacctmgr command\n"
+			incr exit_code 1
+		}
+		-re "SUCCESS" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr archive load not responding\n"
+			slow_kill $my_pid
+			incr exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	
+	if {$matches != 1} {
+		send_user "\nFAILURE:  sacctmgr didn't load archive correctly.\n"
+		incr exit_code 1
+	}
+	return $exit_code
+}
diff --git a/testsuite/expect/pkill b/testsuite/expect/pkill
index 8c648f22ce302a6403cb6f36a698f94e2f88604b..9ad91586d65450d332800c09091d5cbffb660290 100755
--- a/testsuite/expect/pkill
+++ b/testsuite/expect/pkill
@@ -7,10 +7,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+ # Please also read the supplied file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/regression b/testsuite/expect/regression
index 0c7c5d7c451fdc1c72e86eba7f1fb44d189f772b..a54f8def643a7243e2ec13fa93733f64f0f07826 100755
--- a/testsuite/expect/regression
+++ b/testsuite/expect/regression
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+ # Please also read the supplied file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/regression.py b/testsuite/expect/regression.py
index a1e706430f01adebcd29bee0ffdd70368d96ab63..2d138708cb3b6e701bc8fbb6ba1ab45083f35890 100755
--- a/testsuite/expect/regression.py
+++ b/testsuite/expect/regression.py
@@ -3,10 +3,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Christopher J. Morrone <morrone2@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+ # Please also read the supplied file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.1 b/testsuite/expect/test1.1
index 19516084daadcf0ae593da42a5fba16251e216ac..f388711ae87e67f40a9be45b35b6823323d9b207 100755
--- a/testsuite/expect/test1.1
+++ b/testsuite/expect/test1.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.10 b/testsuite/expect/test1.10
index f8268d040df1b06694b07d78c791fed09a8e006e..8b809a93bb0ace65e56c6a67234e6cdf7a172d57 100755
--- a/testsuite/expect/test1.10
+++ b/testsuite/expect/test1.10
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.11 b/testsuite/expect/test1.11
index b27adc5a7372d7a55de14ff67ee9012466c81e21..6f4108c9ab84344ae797c031cbdcf2842e4412c9 100755
--- a/testsuite/expect/test1.11
+++ b/testsuite/expect/test1.11
@@ -14,10 +14,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.12 b/testsuite/expect/test1.12
index a0c173d5036a6675429c41867071c1e69ae30d41..8e3ca8137290fe8aa777a067f2b29886d6795b6c 100755
--- a/testsuite/expect/test1.12
+++ b/testsuite/expect/test1.12
@@ -11,10 +11,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -45,7 +46,7 @@ print_header $test_id
 #
 exec $bin_rm -f $file_in
 make_bash_script $file_in "
-  $scontrol show step \$SLURM_JOBID.\$SLURM_STEPID
+  $scontrol show step \$SLURM_JOB_ID.\$SLURM_STEPID
 "
 
 set srun_pid [spawn $srun -N1 -t1 --checkpoint=$ckpt_in $file_in]
@@ -66,13 +67,12 @@ expect {
 	eof {
 		wait
 	}
-}
-
+} 
 if {$ckpt_out == -1} {
 	send_user "\nFAILURE: No Checkpoint time reported for job step\n"
 	set exit_code 1
 } elseif {$ckpt_in != $ckpt_out} {
-	send_user "\nFAILURE: No Checkpoint time not set properly\n"
+	send_user "\nFAILURE: Checkpoint time not set properly ($ckpt_in != $ckpt_out)\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test1.13 b/testsuite/expect/test1.13
index 2d31d61bf7cf4299546fffbbd88b26b74318b255..265acbf1f51a2762a6d0d7fc8d3870594cc95554 100755
--- a/testsuite/expect/test1.13
+++ b/testsuite/expect/test1.13
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.14 b/testsuite/expect/test1.14
index 86f8f9dfc81ee0841d60b68124f0c86cbe90d7da..c720bfb4c497d429ab48c4256dccc096e688c07c 100755
--- a/testsuite/expect/test1.14
+++ b/testsuite/expect/test1.14
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -53,6 +54,7 @@ if {[test_bluegene]} {
 #
 exec $bin_rm -f $file_in $file_out
 make_bash_script $file_in "
+  echo tasks_per_node=\$SLURM_TASKS_PER_NODE
   inx=0
   while \[ \$inx -lt \$SLURM_TASKS_PER_NODE \]
   do
diff --git a/testsuite/expect/test1.15 b/testsuite/expect/test1.15
index 9cbbfd612f43200f3bbd4252e445d1f2254b8e33..b47f9c8ee7c8c9fa8e05e93622c08e012ca53f8b 100755
--- a/testsuite/expect/test1.15
+++ b/testsuite/expect/test1.15
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -61,14 +62,10 @@ expect {
 		incr matches
 		exp_continue
 	}
-	-re "task \\\[0,2-9\\\]: running" {
+	-re "tasks \\\[0,2-9\\\]: running" {
 		incr matches
 		exp_continue
 	}
-	-re "Exited with exit code" {
-		send_user "This error is expected, no worries\n"
-		exp_continue
-	}
 	timeout {
 		send_user "\nFAILURE: srun not responding\n"
 		slow_kill $srun_pid
diff --git a/testsuite/expect/test1.16 b/testsuite/expect/test1.16
index 5d7465a95ed3bb1998f474f11e7911bbe4a249cf..3769788f4e3cf4d233d07eb0f79df93b82512b37 100755
--- a/testsuite/expect/test1.16
+++ b/testsuite/expect/test1.16
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.17 b/testsuite/expect/test1.17
index e1adfa1ae5d5b96d0061e9147d5ed241a2da86d4..8502f234a909b2a53bdb68a1e0f1e3c2eabed601 100755
--- a/testsuite/expect/test1.17
+++ b/testsuite/expect/test1.17
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.18 b/testsuite/expect/test1.18
index fdc0946e6e52e1994e665bac8460141035c43e7f..b245e1c76f432d424168b84ffd4958a401dfa953 100755
--- a/testsuite/expect/test1.18
+++ b/testsuite/expect/test1.18
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.19 b/testsuite/expect/test1.19
index 0cef6672ee974c03075a6b012e9239c4b322a1c8..4622c03628de6d917c6d4da70fec3456e4d8cc47 100755
--- a/testsuite/expect/test1.19
+++ b/testsuite/expect/test1.19
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.2 b/testsuite/expect/test1.2
index f49b9ee0ffba228c3fb66b28dcf498fbcc04e794..deed651515b8638c3c8858e53fb6e89ad17938f6 100755
--- a/testsuite/expect/test1.2
+++ b/testsuite/expect/test1.2
@@ -11,10 +11,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.20 b/testsuite/expect/test1.20
index 3f10914c19920bb6f569cc08517de83bff7f1b78..7caa3db2845f792940acd05e2d1c281cda7e2894 100755
--- a/testsuite/expect/test1.20
+++ b/testsuite/expect/test1.20
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.21 b/testsuite/expect/test1.21
index 9b258cae2aaa36d8500124b7a146aff974b300a3..66b011474fc7368c460866acb1d3e2fc723af77b 100755
--- a/testsuite/expect/test1.21
+++ b/testsuite/expect/test1.21
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.22 b/testsuite/expect/test1.22
index 61ec64965b915138c559701145a980fa20fbe804..bf079a46bd9e0554bd0d2fb2628cd46933e5a4b0 100755
--- a/testsuite/expect/test1.22
+++ b/testsuite/expect/test1.22
@@ -12,10 +12,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.23 b/testsuite/expect/test1.23
index 121c2766fdc635b6e0ac99b982f6064d0f2a7337..ff74c7d0a311d0d6c7ff68e269cf0fde16165195 100755
--- a/testsuite/expect/test1.23
+++ b/testsuite/expect/test1.23
@@ -12,10 +12,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -147,7 +148,7 @@ if {$err_msg != 1} {
 #
 set host_0      ""
 set timeout $max_job_delay
-set srun_pid [spawn $srun -N1 -l --mem=1 -t1 $bin_printenv SLURMD_NODENAME]
+set srun_pid [spawn $srun -N1 -l --mem=10 -t1 $bin_printenv SLURMD_NODENAME]
 expect {
 	-re "0: ($alpha_numeric_under)" {
 		set host_0 $expect_out(1,string)
diff --git a/testsuite/expect/test1.24 b/testsuite/expect/test1.24
index ee70fa7ef7235d3bcb0413944e38b33e00dc54ed..d0b1d1f31c10802fce4468ac31df8260bcff811d 100755
--- a/testsuite/expect/test1.24
+++ b/testsuite/expect/test1.24
@@ -11,10 +11,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.25 b/testsuite/expect/test1.25
index 35ec82df5612c123e74c19964520402ed2515416..6ff7348177e29cfdd7fb4a1312c2f81fb24cde83 100755
--- a/testsuite/expect/test1.25
+++ b/testsuite/expect/test1.25
@@ -12,10 +12,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.26 b/testsuite/expect/test1.26
index b6168ca78edf1daf1765e9a2722a9576adb432d6..7b9104338cf01c3875a8dea589f7e1861cd23572 100755
--- a/testsuite/expect/test1.26
+++ b/testsuite/expect/test1.26
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -172,7 +173,7 @@ for {set inx 0} {$inx < $interations} {incr inx} {
 	set spawn_id $noalloc2
 	expect {
 		-i $noalloc2
-		-re "qsw_prog_init.*Error configuring interconnect" {
+		-re "error:.*configuring interconnect" {
 			send_user "Can't avoid this possible error\n"
 			exp_continue
 		}
@@ -194,7 +195,7 @@ for {set inx 0} {$inx < $interations} {incr inx} {
 	set spawn_id $noalloc1
 	expect {
 		-i $noalloc1
-		-re "qsw_prog_init.*Error configuring interconnect" {
+		-re "error:.*configuring interconnect" {
 			send_user "Can't avoid this possible error\n"
 			exp_continue
 		}
@@ -226,7 +227,7 @@ for {set inx 0} {$inx < $interations} {incr inx} {
 			}
 			exp_continue
 		}
-		-re "qsw_prog_init.*Error configuring interconnect" {
+		-re "error:.*configuring interconnect" {
 			send_user "Can't avoid this possible error\n"
 			exp_continue
 		}
diff --git a/testsuite/expect/test1.27 b/testsuite/expect/test1.27
index 976919b8d5b7af79fb1956f16964f5ed203bcf1b..50171d9a8acdf1d10c13a9f5601f6baf9dcbeeb5 100755
--- a/testsuite/expect/test1.27
+++ b/testsuite/expect/test1.27
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -43,7 +44,7 @@ print_header $test_id
 array set good_vars {
     SLURM_CPUS_ON_NODE 1
     SLURM_CPUS_PER_TASK 1
-    SLURM_JOBID 1
+    SLURM_JOB_ID 1
     SLURM_LAUNCH_NODE_IPADDR 0
     SLURM_LOCALID 0
     SLURM_NNODES 0
diff --git a/testsuite/expect/test1.28 b/testsuite/expect/test1.28
index 4dc41c5c4dc3715ff1fdbd8b099fc66e7c0c2d45..0a26e559239e5d266799688eb4903c44c49f55ac 100755
--- a/testsuite/expect/test1.28
+++ b/testsuite/expect/test1.28
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.29 b/testsuite/expect/test1.29
index 88066f61cc84293d76e00642783ca3ee4d0fef90..d0c054525d4984f885aad6aab2435422b34d5197 100755
--- a/testsuite/expect/test1.29
+++ b/testsuite/expect/test1.29
@@ -13,10 +13,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.29.prog.c b/testsuite/expect/test1.29.prog.c
index d6e2317004c7bb8e80da75752e6f9747d918e4bc..a6ab4edcfd9c0ec79672586259c5caea4f6206e9 100644
--- a/testsuite/expect/test1.29.prog.c
+++ b/testsuite/expect/test1.29.prog.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.3 b/testsuite/expect/test1.3
index a0f68f4714f6f82b4f64acb3265ea4147f045c0d..3a900099604b12617a0eafe73ff110f316852505 100755
--- a/testsuite/expect/test1.3
+++ b/testsuite/expect/test1.3
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.30 b/testsuite/expect/test1.30
index 7bfc6c32d145b12ef9d477b0f90e9b7c4852c2f3..1379e67d8f1bf26087d0d53002d4532bb62a7d6e 100755
--- a/testsuite/expect/test1.30
+++ b/testsuite/expect/test1.30
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.31 b/testsuite/expect/test1.31
index 58e0e093274a0688f2c2831cf1c9da80803c4420..8aca35928e02b12cefc3dcdbd2325b62b0455fa9 100755
--- a/testsuite/expect/test1.31
+++ b/testsuite/expect/test1.31
@@ -12,10 +12,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.32 b/testsuite/expect/test1.32
index 0a2f10181e0e5aeae9a9946ff6a51f002f580c54..d93c34d48611ace9fbc4299fb0098268e453b0a8 100755
--- a/testsuite/expect/test1.32
+++ b/testsuite/expect/test1.32
@@ -13,10 +13,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -85,9 +86,10 @@ expect {
 	-re "WAITING" {
 		incr matches
 		# sleep to make sure the process is actually running
-		sleep 1
+		exec $bin_sleep 1
 		exec $bin_kill -USR1 $srun_pid
 		exec $bin_kill -USR2 $srun_pid
+		send_user "\nSent signals USR1 and USR2\n"
 		exp_continue
 	}
 	-re "SIGUSR($number)" {
diff --git a/testsuite/expect/test1.32.prog.c b/testsuite/expect/test1.32.prog.c
index 2f206dab8815fbd37709b2b6eb289c783afc04b5..8cec2a3b086e7ce4b84c82faaff81d27656ec651 100644
--- a/testsuite/expect/test1.32.prog.c
+++ b/testsuite/expect/test1.32.prog.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -76,7 +77,6 @@ main (int argc, char **argv)
 	while (!sigusr1_cnt || !sigusr2_cnt) {
 		sleep(1);
 	}
-
-	printf("FINI\n");
+	printf("FINI: sig1:%d sig2:%d\n", sigusr1_cnt, sigusr2_cnt);
 	exit(0);
 }
diff --git a/testsuite/expect/test1.33 b/testsuite/expect/test1.33
index 30511eef17d19f1bfb6fb3c3b91211f9890ac526..ca8892efff83026995a807f1a662b45c403c31b8 100755
--- a/testsuite/expect/test1.33
+++ b/testsuite/expect/test1.33
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.34 b/testsuite/expect/test1.34
index 87485a9deeba64de31d648d6402f85629ab15fe6..6d98138d309f9865aea5d8b34f492148a527cb79 100755
--- a/testsuite/expect/test1.34
+++ b/testsuite/expect/test1.34
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.34.prog.c b/testsuite/expect/test1.34.prog.c
index d51a6c56ecbb22af89812c1b65ad205ac7bcfdae..3bceda4e84259b87cacb5dd8d7e83502b38ea8f1 100644
--- a/testsuite/expect/test1.34.prog.c
+++ b/testsuite/expect/test1.34.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.35 b/testsuite/expect/test1.35
index d20e2c83602361b3fc44b46052a58922b4af02cb..35690c4df5d00c2e22e7c7311ca740dd6d19c7d7 100755
--- a/testsuite/expect/test1.35
+++ b/testsuite/expect/test1.35
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -50,7 +51,7 @@ print_header $test_id
 exec $bin_rm -f $file_in $file_out $file_err
 make_bash_script $file_in "
   for ((i = 0; i < $steps_started; i++)); do
-    j=`expr $steps_started + 10 - \$i`
+    j=`expr $steps_started + 15 - \$i`
     $srun $bin_sleep \$j &
     $bin_sleep 1
   done
diff --git a/testsuite/expect/test1.36 b/testsuite/expect/test1.36
index a5570a7be95ff7fe384a2b232488d351dda64253..ad2c4d062941049ff4f1d896ca3f3047c9b67f50 100755
--- a/testsuite/expect/test1.36
+++ b/testsuite/expect/test1.36
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.37 b/testsuite/expect/test1.37
index 78b29cb72dc08e089350def137f0e0b7a082eafd..4b15449ba7e409eb0791bb6cb23822c33d8b62c6 100755
--- a/testsuite/expect/test1.37
+++ b/testsuite/expect/test1.37
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.38 b/testsuite/expect/test1.38
index 3501fbeaf2fec4e2b7b65be702351ab78f87c5f5..8a4d25d06a652d343f84ddc480d6b2c8b9eb3c4a 100755
--- a/testsuite/expect/test1.38
+++ b/testsuite/expect/test1.38
@@ -12,10 +12,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -141,7 +142,7 @@ expect {
 		set match_run 999
 		exp_continue
 	}
-	-re "forcing job termination" {
+	-re "Job step aborted" {
 		set match_term 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.39 b/testsuite/expect/test1.39
index 2a2a15cf9e0a26fe1eb2df6cf26f00aaafd11ed0..55263f2561b949b3c1cfd98ab703deb2c39c1671 100755
--- a/testsuite/expect/test1.39
+++ b/testsuite/expect/test1.39
@@ -10,10 +10,11 @@
 # Copyright (C) 2004-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.39.prog.c b/testsuite/expect/test1.39.prog.c
index fccca258a33b255527c25b2a4f0cb888be0b8ff2..9892d2a3d48cecf0080e655ec8e1493d21e7c7fa 100644
--- a/testsuite/expect/test1.39.prog.c
+++ b/testsuite/expect/test1.39.prog.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.4 b/testsuite/expect/test1.4
index 85389cc91e8dcb6c883bb8f58fcb6eace769c1e1..f7aee300bb470c732db542449bc3d67e7a6ce1fd 100755
--- a/testsuite/expect/test1.4
+++ b/testsuite/expect/test1.4
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.40 b/testsuite/expect/test1.40
index 355decfe4eae5bf69c783aadcf879f5b99281a08..cc05a090520270bbd27cef021ef51e15b1d4a794 100755
--- a/testsuite/expect/test1.40
+++ b/testsuite/expect/test1.40
@@ -10,10 +10,11 @@
 # Copyright (C) 2004-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.41 b/testsuite/expect/test1.41
index ee367ee273f77ab50cda673ceb6821ee6c8d376f..33067ee6a281ed8c35a3cdf820bc7591e4bc455c 100755
--- a/testsuite/expect/test1.41
+++ b/testsuite/expect/test1.41
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.42 b/testsuite/expect/test1.42
index 010755d1b5bf199a689b97c2cdb483c3454e8f92..5b50a011c321dafd931fd8e01c3233a54d4a305e 100755
--- a/testsuite/expect/test1.42
+++ b/testsuite/expect/test1.42
@@ -10,10 +10,11 @@
 # Copyright (C) 2004-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -78,7 +79,7 @@ expect {
 		set job_id2 $expect_out(1,string)
 		exp_continue
 	}
-	-re "JobState=COMPLETED" {
+	-re "JobState=COMPLETED|COMPLETING" {
 		set match_state 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.43 b/testsuite/expect/test1.43
index cf6834a275f6aee8157c68f5204d5f5f4582dad7..a953acb384394f6133c1a3fdb43cec412c8a780c 100755
--- a/testsuite/expect/test1.43
+++ b/testsuite/expect/test1.43
@@ -10,10 +10,11 @@
 # Copyright (C) 2005 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -33,6 +34,7 @@ source ./globals
 
 set test_id     "1.43"
 set exit_code   0
+set jobs_run    0
 
 print_header $test_id
 
@@ -43,7 +45,8 @@ set timeout 60
 for {set node_cnt 1} {$node_cnt > 0} {set node_cnt [expr $node_cnt * 2]} {
 	set srun_pid [spawn $srun --test-only -N$node_cnt -t1 $bin_printenv SLURMD_NODENAME]
 	expect {
-		-re "allocation success" {
+		-re "Job $number to start at" {
+			incr jobs_run
 			exp_continue
 		}
 		-re "allocation failure" {
@@ -66,6 +69,11 @@ for {set node_cnt 1} {$node_cnt > 0} {set node_cnt [expr $node_cnt * 2]} {
 	}
 }
 
+if {$jobs_run == 0} {
+	send_user "\nFAILURE: No jobs run\n"
+	set exit_code 1
+}
+
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 }
diff --git a/testsuite/expect/test1.44 b/testsuite/expect/test1.44
index 993f1aa9a221e4c46db4730cfafffd4d3f5b703d..093c42f7835f8f312c14e9cc7d6667370762535f 100755
--- a/testsuite/expect/test1.44
+++ b/testsuite/expect/test1.44
@@ -12,10 +12,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Chris Morrone <morrone2@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.45 b/testsuite/expect/test1.45
new file mode 100755
index 0000000000000000000000000000000000000000..dc6f745a6812c96b7e86c058634d787cf2378469
--- /dev/null
+++ b/testsuite/expect/test1.45
@@ -0,0 +1,156 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test that a job correctly uses the -E or --preserve-env flag.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "WARNING: ..." with an explanation of why the test can't be made, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008-2009 Lawrence Livermore National Security
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Dave Bremer <dbremer@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id          "1.45"
+set exit_code        0
+set file_in          "test$test_id.input"
+set file_out         "test$test_id.output"
+set job_id           0
+set min_nodes        1
+set max_nodes        3
+set num_procs        6
+set num_nodes_test1  ""
+set num_nodes_test2  ""
+set num_nodes_test3  ""
+set num_procs_test1  ""
+set num_procs_test2  ""
+set num_procs_test3  ""
+
+
+print_header $test_id
+
+if { [test_xcpu] } {
+	send_user "\nWARNING: This test is incompatable with XCPU systems\n"
+	exit 0
+}
+
+#
+# Build input script file
+#
+make_bash_script $file_in "
+   $bin_printenv SLURM_NNODES
+   $srun -E -n1 -N1 $bin_printenv SLURM_NNODES
+   $bin_printenv SLURM_NPROCS
+   $srun --preserve-env -n1 -N1 $bin_printenv SLURM_NPROCS
+   $srun -n1 -N1 $bin_printenv SLURM_NNODES
+   $srun -n1 -N1 $bin_printenv SLURM_NPROCS"
+
+#
+# Run job to determine what nodes are available
+#
+spawn $sbatch -N$min_nodes-$max_nodes -n$num_procs -O -t1 --output=$file_out $file_in
+expect {
+	-re "nodes *: ($number)" {
+		if {$expect_out(1,string) != 3} {
+			send_user "\nFAILURE: failed to process --nodes option\n"
+			set exit_code 1
+		}
+		exp_continue
+	}
+	-re "Submitted batch job ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: job not submitted\n"
+	exit 1
+}
+if {[wait_for_job $job_id "DONE"] != 0} {
+	send_user "\nFAILURE: job did not complete\n"
+	cancel_job $job_id
+	exit 1
+}
+if {[wait_for_file $file_out] != 0} {
+	send_user "\nFAILURE: no output file\n"
+	exit 1
+}
+
+# NOTE: There could be warning messages generated by the srun commands,
+# For example: "srun: Job step creation temporarily disabled, retrying"
+# would be due to nodes being powered down
+set index 0
+spawn $bin_cat $file_out
+expect {
+	-re "($number)\r\n" {
+		incr index
+		if {$index == 1} {set num_nodes_test1 $expect_out(1,string)}
+		if {$index == 2} {set num_nodes_test2 $expect_out(1,string)}
+		if {$index == 3} {set num_procs_test1 $expect_out(1,string)}
+		if {$index == 4} {set num_procs_test2 $expect_out(1,string)}
+		if {$index == 5} {set num_nodes_test3 $expect_out(1,string)}
+		if {$index == 6} {set num_procs_test3 $expect_out(1,string)}
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+
+
+if {$num_nodes_test1 != $num_nodes_test2} {
+	send_user "\nFAILURE: SLURM_NNODES was not preserved ($num_nodes_test1 != $num_nodes_test2)\n"
+	set exit_code 1
+}
+
+if {$num_procs_test1 != $num_procs_test2} {
+	send_user "\nFAILURE: SLURM_NPROCS was not preserved ($num_procs_test1 != $num_procs_test2)\n"
+	set exit_code 1
+}
+
+if {$num_nodes_test3 != 1} {
+	send_user "\nFAILURE: SLURM_NNODES should be 1 ($num_nodes_test3 != 1)\n"
+	set exit_code 1
+}
+
+if {$num_procs_test3 != 1} {
+	send_user "\nFAILURE: SLURM_NPROCS should be 1 ($num_procs_test3 != 1)\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm $file_in $file_out
+	send_user "\nSUCCESS\n"
+}
+
+exit $exit_code
diff --git a/testsuite/expect/test1.46 b/testsuite/expect/test1.46
index 0938ed86bfae9ed9afc6031da990c5098cde61f0..9f748ff1d526729c7caa7c062ca9e6508445fa81 100755
--- a/testsuite/expect/test1.46
+++ b/testsuite/expect/test1.46
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.47 b/testsuite/expect/test1.47
index cb83a2f29e370ecff90ae3edd8e62540d88612dc..37c4eb9bb7e8101c0bce5c5d6b915fe35a7052b6 100755
--- a/testsuite/expect/test1.47
+++ b/testsuite/expect/test1.47
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -79,7 +80,7 @@ expect {
 		set job_id2 $expect_out(1,string)
 		exp_continue
 	}
-	-re "JobState=COMPLETED" {
+	-re "JobState=COMPLETED|COMPLETING" {
 		set matches 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.48 b/testsuite/expect/test1.48
index ac41d29958775940d368b16a2634888ebb482bc4..7ce7011327da8e38566833ef62e1876713a1c3e8 100755
--- a/testsuite/expect/test1.48
+++ b/testsuite/expect/test1.48
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.49 b/testsuite/expect/test1.49
index 5c8f43fef92161464f210fefb331e3690b0696c2..1f170a5ae40eae263a923f9f18659536b4d9840b 100755
--- a/testsuite/expect/test1.49
+++ b/testsuite/expect/test1.49
@@ -10,10 +10,11 @@
 # Copyright (C) 2005-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.5 b/testsuite/expect/test1.5
index 79427066b3ef312e35ffbd3d6f2091608bf01923..6d98842cce7426f224fcede36ab1ca5a79a43e58 100755
--- a/testsuite/expect/test1.5
+++ b/testsuite/expect/test1.5
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.50 b/testsuite/expect/test1.50
index bccb6a3d285904deef854ff7bf8be8a49fb8f05a..6cd180d42667f679edfb84283d4ef3c65c348061 100755
--- a/testsuite/expect/test1.50
+++ b/testsuite/expect/test1.50
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.51 b/testsuite/expect/test1.51
index 45ae7892537dbcdf3156ca38523e478d90729aba..4c7cf67acd72dfcb0040baca3245359e962fe31c 100755
--- a/testsuite/expect/test1.51
+++ b/testsuite/expect/test1.51
@@ -11,10 +11,11 @@
 # Copyright (C) 2005 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.52 b/testsuite/expect/test1.52
index 74e47c00c5c87fb67d79637381c024956f893a0e..0eb3184ee44353f007b981e722ebf4ebf52e0a6f 100755
--- a/testsuite/expect/test1.52
+++ b/testsuite/expect/test1.52
@@ -7,13 +7,15 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002-2006 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008-2009  Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -83,7 +85,6 @@ set node2       0
 set no_hostfile 0
 set timeout $max_job_delay
 for {set i 0} {$i<3} {incr i} {
-
 	if { $i==1 } {
 		if { $node0 == 0 || $node1 == 0 || $node2 == 0 } {
 			send_user "\nFAILURE: node names not set from \
@@ -91,9 +92,9 @@ for {set i 0} {$i<3} {incr i} {
 			exit 1
 		}
 		set env(SLURM_HOSTFILE) $hostfile
-		set 1node0 $node0
-		set 1node1 $node1
-		set 1node2 $node2
+		set 1node0 $node2
+		set 1node1 $node0
+		set 1node2 $node1
 		set file [open $hostfile "w"]
 		puts $file "$node2\n$node0\n$node1"
 		close $file
@@ -106,9 +107,9 @@ for {set i 0} {$i<3} {incr i} {
 		set env(SLURM_HOSTFILE) $hostfile
 		set 2node0 $node1
 		set 2node1 $node0
-		set 2node2 $node0
+		set 2node2 $node2
 		set file [open $hostfile "w"]
-		puts $file "$node1\n$node0\n$node0"
+		puts $file "$node1\n$node0\n$node2"
 		close $file
 	}
 	#
@@ -117,7 +118,11 @@ for {set i 0} {$i<3} {incr i} {
 	set node0  ""
 	set node1  ""
 	set node2  ""
-	set srun_pid [spawn $srun -N3 -t1 -l $bin_printenv SLURMD_NODENAME]
+	if { $i == 0} {
+		set srun_pid [spawn $srun -N3 -t1 -l $bin_printenv SLURMD_NODENAME]
+	} else {
+		set srun_pid [spawn $srun -N3 -t1 -l --distribution=arbitrary $bin_printenv SLURMD_NODENAME]
+	}
 	expect {
 		-re "SwitchType does not permit arbitrary task distribution" {
 			set no_hostfile 1
@@ -146,25 +151,25 @@ for {set i 0} {$i<3} {incr i} {
 	if { $no_hostfile != 0 } {
 		send_user "\nNo worries, test just can not run here\n"
 	} elseif { $i == 1 } {
-		if { [string compare $node0 $1node2] } {
-			send_user "\nFAILURE: tasks not distributed by hostfile\n"
+		if { [string compare $node0 $1node0] } {
+			send_user "\nFAILURE: task 0 not distributed by hostfile ($node0 != $1node0)\n"
 			set exit_code 1
-		} elseif { [string compare $node1 $1node0] } {
-			send_user "\nFAILURE: tasks not distributed by hostfile\n"
+		} elseif { [string compare $node1 $1node1] } {
+			send_user "\nFAILURE: task 1 not distributed by hostfile ($node1 != $1node1)\n"
 			set exit_code 1
-		} elseif { [string compare $node2 $1node1] } {
-			send_user "\nFAILURE: tasks not distributed by hostfile\n"
+		} elseif { [string compare $node2 $1node2] } {
+			send_user "\nFAILURE: task 2 not distributed by hostfile ($node2 != $1node2)\n"
 			set exit_code 1
 		}
 	} elseif { $i == 2 } {
 		if { [string compare $node0 $2node0] } {
-			send_user "\nFAILURE: tasks not distributed by hostfile\n"
+			send_user "\nFAILURE: task 0 not distributed by hostfile ($node0 != $2node0)\n"
 			set exit_code 1
 		} elseif { [string compare $node1 $2node1] } {
-			send_user "\nFAILURE: tasks not distributed by hostfile\n"
+			send_user "\nFAILURE: task 1 not distributed by hostfile ($node1 != $2node1)\n"
 			set exit_code 1
-		} elseif { [string compare $node2 $2node1] } {
-			send_user "\nFAILURE: tasks not distributed by hostfile\n"
+		} elseif { [string compare $node2 $2node2] } {
+			send_user "\nFAILURE: task 2 not distributed by hostfile ($node2 != $2node2)\n"
 			set exit_code 1
 		}
 	}
diff --git a/testsuite/expect/test1.54 b/testsuite/expect/test1.54
index cd235cf73306d0abf8950b7112dceeb75951ebb0..fd48d26f146e2faa127bbbeae703f15bd52e4b8e 100755
--- a/testsuite/expect/test1.54
+++ b/testsuite/expect/test1.54
@@ -11,10 +11,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.55 b/testsuite/expect/test1.55
index 6879d7563050018e24f5307b3a2039935a4cf1a5..05bdda069b613edd8e3309944490df66dbb9bfa4 100755
--- a/testsuite/expect/test1.55
+++ b/testsuite/expect/test1.55
@@ -11,10 +11,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Christopher J. Morrone <morrone2@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.56 b/testsuite/expect/test1.56
index 67e539c97937685f43b6b3e0dd18041335085301..24509eb34f3a21a36f9b103e7757022c521b8b5f 100755
--- a/testsuite/expect/test1.56
+++ b/testsuite/expect/test1.56
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Christopher J. Morrone <morrone2@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.57 b/testsuite/expect/test1.57
index 5a553f57df20cc9f0c3fcbc7a146de5daae720e1..cc865333014f9452012140696bc33a5a4b420f9c 100755
--- a/testsuite/expect/test1.57
+++ b/testsuite/expect/test1.57
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.58 b/testsuite/expect/test1.58
index 6d467811c9c6f4989160ae336f5758c326acc55b..bb92683d03e4d862252c9752dd2b2d9120fed990 100755
--- a/testsuite/expect/test1.58
+++ b/testsuite/expect/test1.58
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Christopher J. Morrone <morrone2@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.59 b/testsuite/expect/test1.59
index 6cb16f1c926aa2dc7ba1f07a499fac165805275c..c6ee7cd315f806e46e0ae5365f03427dbbc5daf6 100755
--- a/testsuite/expect/test1.59
+++ b/testsuite/expect/test1.59
@@ -15,7 +15,8 @@
 # UCRL-CODE-217948.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -62,8 +63,8 @@ expect {
 	}
 	-re "TotalNodes=($number)" {
 		set node_count $expect_out(1,string)
-		if { $node_count < 2 } {
-			send_user "WARNING: system must have at least 2 \
+		if { $node_count < 3 } {
+			send_user "WARNING: system must have at least 3 \
 				   nodes to run this test on.  This system \
 				   only has $node_count.\n"
 			exit $exit_code
@@ -86,7 +87,7 @@ set node4 0
 set timeout $max_job_delay
 spawn $salloc -N$num_nodes -v bash
 expect {
-	-re "salloc: Granted job allocation ($number):" {
+	-re "salloc: Granted job allocation ($number)" {
 		set job_id $expect_out(1,string)
 		exp_continue
 	}
@@ -102,6 +103,10 @@ expect {
 		wait
 	}
 }
+if {$job_id == 0} {
+	send_user "\nFAILURE: salloc failure\n"
+	exit 1
+}
 
 for {set i 0} {$i<4} {incr i} {
 	set extra ""
@@ -192,7 +197,7 @@ for {set i 0} {$i<4} {incr i} {
 		-re $prompt {
 			#send_user "srun completed\n"
 		}
-		-re "slurm job ($number)" {
+		-re "Granted job allocation ($number)" {
 			set job_id $expect_out(1,string)
 			exp_continue
 		}
diff --git a/testsuite/expect/test1.6 b/testsuite/expect/test1.6
index 5b6ffcb9fe64c682003e35f0607d5350f84d8bc8..f2f338e87d1cd9ecbc65a76050b9320677684304 100755
--- a/testsuite/expect/test1.6
+++ b/testsuite/expect/test1.6
@@ -13,10 +13,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.60 b/testsuite/expect/test1.60
new file mode 100755
index 0000000000000000000000000000000000000000..09d003c4ae07c49a14e41082a82a6006bc5593f8
--- /dev/null
+++ b/testsuite/expect/test1.60
@@ -0,0 +1,204 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test srun stdout/err labelling combined with file template
+#          options with %t and %N.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2009 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Dave Bremer <dbremer@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id          "1.60"
+set exit_code        0
+set file_out_n       "test$test_id.n.%n.output"
+set file_out_t       "test$test_id.t.%t.output"
+set job_id           0
+set node_count       0
+set task_count       0
+set task_id          0
+set node_id          0
+set file_cnt         0
+set file_out_t_glob  ""
+set file_out_n_glob  ""
+
+print_header $test_id
+
+#
+# Spawn a program that generates "task_id" (%t) in stdout file names
+# and confirm they are created
+#
+for {set task_id 0} {$task_id < $node_count} {incr task_id} {
+	set file_out_t_glob  "test$test_id.t.$task_id.output"
+	exec $bin_rm -f $file_out_t_glob
+}
+set timeout $max_job_delay
+set srun_pid [spawn $srun -l --output=$file_out_t -N 1-10 -v -t1 $bin_echo hello]
+expect {
+	-re "jobid ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: job initiation failed\n"
+	exit 1
+}
+
+set node_count 0
+spawn $squeue -tall -j $job_id -o "%i %D"
+expect {
+	-re "$job_id ($number)" {
+		set node_count $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+if {$node_count == 0} {
+	send_user "\nFAILURE: did not get node_count\n"
+	cancel_job $job_id
+	exit 1
+}
+
+set file_cnt 0
+for {set task_id 0} {$task_id < $node_count} {incr task_id} {
+	set file_out_t_glob  "test$test_id.t.$task_id.output"
+	if {[wait_for_file $file_out_t_glob] != 0} {
+		set exit_code 1
+	} else {
+		set test_task_id -1
+		spawn $bin_cat $file_out_t_glob
+		expect {
+			-re "($number): *hello" {
+				set test_task_id $expect_out(1,string)
+				exp_continue
+			}
+			eof {
+				wait
+			}
+		}
+		if {$task_id != $test_task_id} {
+			send_user "\nFAILURE: file $file_out_t_glob was not labelled.  $task_id != $test_task_id\n"
+			set exit_code 1
+		} else {
+			exec $bin_rm -f $file_out_t_glob
+		}
+		incr file_cnt
+	}
+}
+if {$file_cnt != $node_count} {
+	send_user "\nFAILURE: file format of %t in stdout failed\n"
+	set exit_code 1
+}
+if {$exit_code != 0} {
+	exit $exit_code
+}
+
+#
+# Spawn a program that generates "node_id" (%n) in stdout file names
+# and confirm they are created
+#
+for {set node_id 0} {$node_id < $node_count} {incr node_id} {
+	set file_out_n_glob  "test$test_id.n.$node_id.output"
+	exec $bin_rm -f $file_out_n_glob
+}
+
+set task_count [expr $node_count * 2]
+set timeout $max_job_delay
+set srun_pid [spawn $srun -l --output=$file_out_n -N $node_count -n $task_count -O -v -t1 $bin_echo hello]
+expect {
+	-re "jobid ($number).*" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: job initiation failed\n"
+	exit 1
+}
+
+
+for {set node_id 0} {$node_id < $node_count} {incr node_id} {
+	set file_out_n_glob  "test$test_id.n.$node_id.output"
+	if {[wait_for_file $file_out_n_glob] != 0} {
+		set exit_code 1
+	} else {
+		set t0found false
+		set t1found false
+
+		spawn $bin_cat $file_out_n_glob
+		expect {
+			-re "($number): *hello" {
+				set test_task_id $expect_out(1,string)
+				if {$test_task_id == $node_id * 2} {
+					set t0found true
+				}
+				if {$test_task_id == $node_id * 2 + 1} {
+					set t1found true
+				}
+				exp_continue
+			}
+			eof {
+				wait
+			}
+		}
+		if {!$t0found || !$t1found} {
+			send_user "\nFAILURE: file $file_out_n_glob was not labelled correctly\n"
+			set exit_code 1
+		} else {
+			exec $bin_rm -f $file_out_n_glob
+		}
+		incr file_cnt
+	}
+}
+
+
+#
+# Post-processing
+#
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test1.7 b/testsuite/expect/test1.7
index 42efc6c938bea2ac87c66abf633fbdb0c160d902..95fef27af743093aa22f9c82df4510119cc07636 100755
--- a/testsuite/expect/test1.7
+++ b/testsuite/expect/test1.7
@@ -8,12 +8,14 @@
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
 # Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -44,6 +46,7 @@ print_header $test_id
 # Make sure sleep time is no larger than InactiveLimit
 set inactive_limit  $sleep_time
 set kill_wait       $sleep_time
+set over_time_limit 0
 log_user 0
 spawn $scontrol show config
 expect {
@@ -55,6 +58,14 @@ expect {
 		set kill_wait $expect_out(1,string)
 		exp_continue
 	}
+	-re "OverTimeLimit *= UNLIMITED" {
+		set over_time_limit 9999
+		exp_continue
+	}
+	-re "OverTimeLimit *= ($number)" {
+		set over_time_limit $expect_out(1,string)
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: scontrol not responding\n"
 		set exit_code 1
@@ -75,6 +86,10 @@ if {$kill_wait > 60} {
 	send_user "\nWARNING: KillWait ($kill_wait) is too high for this test\n"
 	exit 0
 }
+if {$over_time_limit > 0} {
+	send_user "\nWARNING: OverTimeLimit too high for this test ($over_time_limit > 0)\n"
+	exit 0
+}
 if {$inactive_limit < $sleep_time} {
 	set sleep_time [expr $inactive_limit + $kill_wait]
 	send_user "\nReset job sleep time to $sleep_time seconds\n"
diff --git a/testsuite/expect/test1.8 b/testsuite/expect/test1.8
index 3d68e60e38c36f3b2100d987289015fbfc5b0780..ddc93829cd70c660181cb679ea5e288aff6e93bb 100755
--- a/testsuite/expect/test1.8
+++ b/testsuite/expect/test1.8
@@ -14,10 +14,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.80 b/testsuite/expect/test1.80
index a3b3e11c625dcac3fa99a85de956ff9337200efa..25ede9de733de39a8dadfe08f6de9cbf0d53f276 100755
--- a/testsuite/expect/test1.80
+++ b/testsuite/expect/test1.80
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.81 b/testsuite/expect/test1.81
index 14541174bedd7004f55dd2717faaaa9f97a00381..2edd0304df67ca0a5b824602d08c71606e5781e8 100755
--- a/testsuite/expect/test1.81
+++ b/testsuite/expect/test1.81
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.82 b/testsuite/expect/test1.82
index cf8004058840bb1d0082f9e2d124c2064eece3a2..ef05b750ddd2b049df8bf58a9304f3b48127ba84 100755
--- a/testsuite/expect/test1.82
+++ b/testsuite/expect/test1.82
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.83 b/testsuite/expect/test1.83
index 3a99bf56ad1477a9a08856106ded0980868a2fe0..65aa240c451fd01f226edb19881fd8059338d1c8 100755
--- a/testsuite/expect/test1.83
+++ b/testsuite/expect/test1.83
@@ -14,13 +14,14 @@
 #          Change tha node name parsing logic as needed for other formats.
 ############################################################################
 # Copyright (C) 2002-2007 The Regents of the University of California.
-# Copyright (C) 2008 Lawrence Livermore National Security.
+# Copyright (C) 2008-2009 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -52,6 +53,10 @@ if {[test_front_end] != 0} {
 	send_user "\nWARNING: This test is incompatable with front-end systems\n"
 	exit 0
 }
+if {[test_topology] != 0} {
+	send_user "\nWARNING: This test is incompatable topology configured systems\n"
+	exit 0
+}
 
 set available [available_nodes [default_partition]]
 if {$available < 3} {
diff --git a/testsuite/expect/test1.84 b/testsuite/expect/test1.84
index 6085e12d0cbe467bcbc1034d45545c9b28b5248f..b301a657066670af347b9291eaaa157f2006add6 100755
--- a/testsuite/expect/test1.84
+++ b/testsuite/expect/test1.84
@@ -16,10 +16,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -66,7 +67,7 @@ set host           ""
 set timeout        $max_job_delay
 set task_cnt       0
 
-set srun_pid [spawn $srun -N1 --cpus-per-task=1 -l -t1 $file_in]
+set srun_pid [spawn $srun -N1 --cpus-per-task=1 --exclusive -l -t1 $file_in]
 expect {
 	-re "Granted job allocation ($number)" {
 		set job_id $expect_out(1,string)
@@ -112,6 +113,7 @@ if {[string compare $host ""] == 0} {
 if {$cpu_cnt != $task_cnt} {
 	send_user "FAILURE: should have run $cpu_cnt tasks (one per CPU) "
 	send_user "instead of $task_cnt tasks\n"
+	send_user "NOTE: This could be due to memory limit per allocated CPU\n\n"
 	set exit_code 1
 }
 if {$cpu_cnt < 2} {
@@ -123,7 +125,7 @@ if {$cpu_cnt < 2} {
 # Submit a 1 node job to determine the node's CPU count
 #
 set task_cnt  0
-set srun_pid [spawn $srun -N1 -t1 --nodelist=$host --cpus-per-task=2 -l $bin_printenv SLURMD_NODENAME]
+set srun_pid [spawn $srun -N1 -t1 --nodelist=$host --cpus-per-task=2 --exclusive -l $bin_printenv SLURMD_NODENAME]
 expect {
 	-re "Invalid node name specified" {
 		send_user "\nWARNING: Appears you are using "
@@ -150,6 +152,7 @@ expect {
 #
 if {$task_cnt != [expr $cpu_cnt / 2]} {
 	send_user "\nFAILURE: Improper task count for given cpus-per-task\n"
+	send_user "NOTE: This could be due to memory limit per allocated CPU\n\n"
 	set exit_code   1	
 }
 
diff --git a/testsuite/expect/test1.86 b/testsuite/expect/test1.86
index 44163c7aa964db25270a3da68c9accc46abf2d45..a15aed75ff22e99760795c594e79c123fae5f34d 100755
--- a/testsuite/expect/test1.86
+++ b/testsuite/expect/test1.86
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.87 b/testsuite/expect/test1.87
index 94b7afa3040f9746face270ea8106cef141005e5..4890f1b04fc33936fa042fca4200e7d1d99e1940 100755
--- a/testsuite/expect/test1.87
+++ b/testsuite/expect/test1.87
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.88 b/testsuite/expect/test1.88
index ebc20179c05df753495b99ef492f389b037dd51f..159765ae0041da90a147888b048ffac8e5d1c669 100755
--- a/testsuite/expect/test1.88
+++ b/testsuite/expect/test1.88
@@ -11,10 +11,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -112,11 +113,7 @@ expect {
 		set job_id $expect_out(1,string)
 		exp_continue
 	}
-	-re "configuration not available" {
-		set no_start 1
-		exp_continue
-	}
-	-re "Unable to submit batch job" {
+	-re "(configuration not available|Unable to submit batch job|Node count specification invalid)" {
 		set no_start 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.88.prog.c b/testsuite/expect/test1.88.prog.c
index e56cb74be02e9f7a3f87b1218e67d6d8a9e8d5d8..1539f050a208d696a5695b5949477175fb7144a4 100644
--- a/testsuite/expect/test1.88.prog.c
+++ b/testsuite/expect/test1.88.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dong Ang <dahn@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.89 b/testsuite/expect/test1.89
index f7e703f6c5d52a4eb08c9073159b01c126a32ade..b8cf2676d60469da5d50f99016c740a40c65473b 100755
--- a/testsuite/expect/test1.89
+++ b/testsuite/expect/test1.89
@@ -11,10 +11,11 @@
 # Copyright (C) 2005 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -70,7 +71,7 @@ exec $bin_chmod 700 $file_prog
 #
 # Create an allocation
 #
-set salloc_pid [spawn $salloc -N1 --verbose -t2 $bin_bash]
+set salloc_pid [spawn $salloc -N1 --exclusive --verbose -t2 $bin_bash]
 
 #
 # Run a job step to get allocated processor count and affinity
@@ -156,7 +157,7 @@ if {$task_mask != $task_cnt} {
 set verbose_cnt 0
 send "$srun -c1 --cpu_bind=verbose,map_cpu:0 $file_prog\n"
 expect {
-	-re "cpu_bind=MAP" {
+	-re "cpu_bind=MAP|cpu_bind_cores=MAP|cpu_bind_sockets=MAP|cpu_bind_threads=MAP" {
 		incr verbose_cnt
 		exp_continue
 	}
diff --git a/testsuite/expect/test1.89.prog.c b/testsuite/expect/test1.89.prog.c
index 2a1824921d3c1b2820c11448563e1f57ecb55daa..8383de0579a3ed28346a8ca90b75d9dbbd13339d 100644
--- a/testsuite/expect/test1.89.prog.c
+++ b/testsuite/expect/test1.89.prog.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.9 b/testsuite/expect/test1.9
index f6002c958430026d8efd526e407f6d77d29c15cf..10b426d943634c371cdaa64224dca5f3d666fb11 100755
--- a/testsuite/expect/test1.9
+++ b/testsuite/expect/test1.9
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.90 b/testsuite/expect/test1.90
index d3b2bb27a63b26d46504739b483e4f1788114735..c5b02164a2d2a79cdf5bee47d328e5ce132da5b1 100755
--- a/testsuite/expect/test1.90
+++ b/testsuite/expect/test1.90
@@ -11,10 +11,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.90.prog.c b/testsuite/expect/test1.90.prog.c
index b6f9a2c1232d097a53662ccc2e8aedc3f519ac83..fbde23f2a83de2b6f37c7e0a007a1253ea0c20fe 100644
--- a/testsuite/expect/test1.90.prog.c
+++ b/testsuite/expect/test1.90.prog.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.91 b/testsuite/expect/test1.91
index 01931d6fa94fcc6434345d3ea00c5a0431b2237e..e53ce931653b6d3fe430e3ea422d3ed0998bb113 100755
--- a/testsuite/expect/test1.91
+++ b/testsuite/expect/test1.91
@@ -12,10 +12,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -96,11 +97,11 @@ expect {
 		set num_sockets $expect_out(1,string)
 		exp_continue
 	}
-	-re "Cores=($number)" {
+	-re "CoresPerSocket=($number)" {
 	   	set num_cores $expect_out(1,string)
 		exp_continue
 	}
-	-re "Threads=($number)" {
+	-re "ThreadsPerCore=($number)" {
 	   	set num_threads $expect_out(1,string)
 		exp_continue
 	}
@@ -387,8 +388,9 @@ while {$this_cnt <= $task_cnt} {
 #############################################################################
 #
 # Run a job step with plane distribution to exercise option
+# Automatic binding in slurm version 2.0 will bind one task per core
 #
-set expected_mask [ expr ((1 << $task_cnt) - 1) * $task_cnt ]
+set expected_mask [ expr ((1 << $task_cnt) - 1) ]
 set task_mask 0
 send "$srun -n $task_cnt -m plane=4 $file_prog\n"
 expect {
diff --git a/testsuite/expect/test1.91.prog.c b/testsuite/expect/test1.91.prog.c
index ad98f3ec862a9aa57375d5ace51f846d10102455..c5d8705b56ad1377309f9b7f3067a0cc45af4056 100644
--- a/testsuite/expect/test1.91.prog.c
+++ b/testsuite/expect/test1.91.prog.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test1.92 b/testsuite/expect/test1.92
index c23d95e5b20b3603c89454c1ccd35358c022bac1..b02c664c28f348de9bbd147200fb1d297cea12e9 100755
--- a/testsuite/expect/test1.92
+++ b/testsuite/expect/test1.92
@@ -12,10 +12,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -36,6 +37,7 @@ source ./globals
 set test_id     "1.92"
 set exit_code   0
 set file_bash   "test$test_id.bash"
+set job_id      0
 
 print_header $test_id
 
@@ -52,8 +54,13 @@ exit 0
 #
 # Create an allocation
 #
+set timeout $max_job_delay
 set salloc_pid [spawn $salloc -N2 -n4 --verbose -t2 $bin_bash]
 expect {
+	-re "salloc: Granted job allocation ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
 	-re "More processors requested than permitted" {
 		send_user "\nWARNING: can't test srun task distribution\n"
 		exit 0
@@ -71,6 +78,10 @@ expect {
 		exit 1
 	}
 }
+if {$job_id == 0} {
+	send_user "\nFAILURE: salloc failure\n"
+	exit 1
+}
 
 #############################################################################
 #
diff --git a/testsuite/expect/test1.93 b/testsuite/expect/test1.93
index 1cd34b619004bb225db369d394de53168cc774e6..7da5ae592c2ba3e025c8c0375c90d3035f795a12 100755
--- a/testsuite/expect/test1.93
+++ b/testsuite/expect/test1.93
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.1 b/testsuite/expect/test10.1
index aacd65a05fc7747fc319bb910a0ee3bcfcb13594..dced62c79c30732439bade426dba519eee95c119 100755
--- a/testsuite/expect/test10.1
+++ b/testsuite/expect/test10.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.10 b/testsuite/expect/test10.10
index 76fb365274f3a291feb565405363a156b5770999..e90f3713585198dcc64895050a1c3e91b626f659 100755
--- a/testsuite/expect/test10.10
+++ b/testsuite/expect/test10.10
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.11 b/testsuite/expect/test10.11
index 4bab9f667b460e47003095bd721c00be3e4d67a8..e0116c80edd027fe7649ab799146cd9af3cf52b8 100755
--- a/testsuite/expect/test10.11
+++ b/testsuite/expect/test10.11
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.12 b/testsuite/expect/test10.12
index 5063f5aec4884a4bbf9c84ccbae958451f559358..5748d980c7425d8e2cfa57b05e9d5d2354db7ae8 100755
--- a/testsuite/expect/test10.12
+++ b/testsuite/expect/test10.12
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.13 b/testsuite/expect/test10.13
index db8fe4a0c05e52791815445928880193eeedd4fc..074d5e74001d6ced1bf144b8121cae38f543f7ca 100755
--- a/testsuite/expect/test10.13
+++ b/testsuite/expect/test10.13
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.2 b/testsuite/expect/test10.2
index edeb5035ebad34c3cf2ce7c39c4f1a116aa53e1e..aaae364b374040d2e463a4e1be1bcb81490247a7 100755
--- a/testsuite/expect/test10.2
+++ b/testsuite/expect/test10.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.3 b/testsuite/expect/test10.3
index 87a2c5a1923a93e74eee02ee0fb9ff4254d11cd8..d9ad533b6a11f64bc22cf712c11afde76398e256 100755
--- a/testsuite/expect/test10.3
+++ b/testsuite/expect/test10.3
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.4 b/testsuite/expect/test10.4
index dd6cb8ce8e0d11ae711c735a501458f323cb8d8d..1b009245d7910e2c6a2a187e9bd7acc373d9708e 100755
--- a/testsuite/expect/test10.4
+++ b/testsuite/expect/test10.4
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.5 b/testsuite/expect/test10.5
index cf57af940472cf32ea3ed9cab11e29bc7f977818..45d04acf1d744f07badfc6b8226244a6309209bb 100755
--- a/testsuite/expect/test10.5
+++ b/testsuite/expect/test10.5
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.6 b/testsuite/expect/test10.6
index 6329e97096a4198b5aa771237b207aee5f6dcd03..5620669c542880e3782420e05606111cf1e598cd 100755
--- a/testsuite/expect/test10.6
+++ b/testsuite/expect/test10.6
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.7 b/testsuite/expect/test10.7
index 43445ae0321a1d098dc244f22620c2383c275ff0..c9175fbffa15a3e4f247a5a00d1bac13521cf141 100755
--- a/testsuite/expect/test10.7
+++ b/testsuite/expect/test10.7
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.8 b/testsuite/expect/test10.8
index 1a5bacb39cdfcf7c5c080e6de5f6d7342333a8a0..24d8c2dc317f9ab82f4c4a4bd0458d9b757287f5 100755
--- a/testsuite/expect/test10.8
+++ b/testsuite/expect/test10.8
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test10.9 b/testsuite/expect/test10.9
index 095cdd53ecc59752833ed34eea8dc11d2ed7b835..d40029b2d7656c1643b6fd0b7ad6b92a1d838094 100755
--- a/testsuite/expect/test10.9
+++ b/testsuite/expect/test10.9
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test11.1 b/testsuite/expect/test11.1
index a67bf66924f13aaa8251bfcdd4368e3f552d7ca7..10e6bf9e47bde8232da4c8129c41a0314fe54e48 100755
--- a/testsuite/expect/test11.1
+++ b/testsuite/expect/test11.1
@@ -13,10 +13,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test11.2 b/testsuite/expect/test11.2
index 1fcb822913b44fa39328ac184630b06a31064269..058f7b45f1c602b979236b689e1edecdf0b9136c 100755
--- a/testsuite/expect/test11.2
+++ b/testsuite/expect/test11.2
@@ -13,10 +13,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test11.3 b/testsuite/expect/test11.3
index af784f1edb83e829f541da87c9c4b48b5eee176b..1d81520c7398758030ed12f08bf1b840cdbef7eb 100755
--- a/testsuite/expect/test11.3
+++ b/testsuite/expect/test11.3
@@ -13,10 +13,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test11.4 b/testsuite/expect/test11.4
index 10e119cffbb4320807f9de613b4879d804ab64eb..60fa0658bd993a5cff752511289ab1cf9af36d64 100755
--- a/testsuite/expect/test11.4
+++ b/testsuite/expect/test11.4
@@ -12,10 +12,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test11.5 b/testsuite/expect/test11.5
index ae5c79c62ffb05498de57e74d74dc9ccd27db3a3..071c064a93ae80193e1f6ca4f7294bde148bf6a6 100755
--- a/testsuite/expect/test11.5
+++ b/testsuite/expect/test11.5
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test11.6 b/testsuite/expect/test11.6
index afa4414df030b867703ac51359215ef8eec66cd5..0e1bc988f7080375058817a58e6f43157611d56c 100755
--- a/testsuite/expect/test11.6
+++ b/testsuite/expect/test11.6
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -58,7 +59,7 @@ if { [string length $partition] == 0 } {
 global env
 set env(CHECKPOINT) "yes"
 make_bash_script $file_in "
-  echo BEGIN=\$SLURM_JOBID
+  echo BEGIN=\$SLURM_JOB_ID
   $bin_sleep 30
   echo FINI
 "
@@ -85,7 +86,7 @@ expect {
 	}
 }
 if {$job_id == 0} {
-	send_user "\nFAILURE: SLURM_JOBID missing\n"
+	send_user "\nFAILURE: SLURM_JOB_ID missing\n"
 	exit 0
 }
 
diff --git a/testsuite/expect/test11.7 b/testsuite/expect/test11.7
index a600aefdb462177b216d90d63837d75becbf3fe3..dea129556fdc8eb036488cb434897d99a6fb5f27 100755
--- a/testsuite/expect/test11.7
+++ b/testsuite/expect/test11.7
@@ -12,10 +12,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test12.1 b/testsuite/expect/test12.1
index 4e704b3d2f42a904d7eb558114c5818790a5de69..33e00ce9c0824c5bb61f961de002ffc6a765a2bd 100755
--- a/testsuite/expect/test12.1
+++ b/testsuite/expect/test12.1
@@ -7,13 +7,16 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2005-2006 The Regents of the University of California.
+# Copyright (C) 2005-2008 The Regents of the University of California.
+# Copyright (C) 2008-2009 TheLawrence Livermore National Security, LLC .
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -48,11 +51,11 @@ expect {
 		set not_support 1
 		exp_continue
 	}
-	-re "Notes:" {
+	-re "sacct...OPTION" {
 		incr matches
 		exp_continue
 	}
-	-re "Options:" {
+	-re "Valid..OPTION..values are:" {
 		incr matches
 		exp_continue
 	}
diff --git a/testsuite/expect/test12.2 b/testsuite/expect/test12.2
index 6c5efad940b18f3fbea6fb519ca04c9751e9c465..f055586e262a781c5b35c47661625cfc6a9b8392 100755
--- a/testsuite/expect/test12.2
+++ b/testsuite/expect/test12.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2005 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -61,9 +62,9 @@ proc _get_mem {prog} {
 	set ave_used -1
 	set exit_code 0
 
-	spawn $prog --noheader --job=$job_id.0 --fields vsize
+	spawn $prog --noheader -p --job=$job_id.0 --fields maxvmsize,maxvmsizetask,avevmsize
 	expect {
-		-re "($float)(\[KMG\]*)/.*:($number) - ($float)(\[KMG\]*)" {
+		-re "($float)(\[KMG\]*).($number).($float)(\[KMG\]*)" {
 			set mem_used  $expect_out(1,string)
 			set scale1    $expect_out(2,string)
 			set mem_task  $expect_out(3,string)
@@ -230,7 +231,7 @@ if {[wait_for_job $job_id "DONE"] != 0} {
 # Report basic sacct info
 #
  
-spawn $sacct --noheader --job=$job_id.0 --fields jobid,jobname,status,exitcode
+spawn $sacct --noheader -P --job=$job_id.0 --fields jobid,jobname,state,exitcode
 expect {
 	-re "$job_id\.0" {
 		incr matches
@@ -267,7 +268,7 @@ if {$matches < 4} {
 #   expected integer but got "08" (looks like invalid octal number)
 #
 set elapsed_time 0
-spawn $sacct --noheader  --job=$job_id.0 --fields elapsed
+spawn $sacct --noheader -P --job=$job_id.0 --fields elapsed
 expect {
 	-re "($number):($number):(\[0-9\])(\[0-9\])" {
 		set hours $expect_out(1,string)
diff --git a/testsuite/expect/test12.2.prog.c b/testsuite/expect/test12.2.prog.c
index a87085124798d3eb406cd7aaf39fa1b5dec0ebc3..108266944dcda1a679360dd38900afecff6cea2d 100644
--- a/testsuite/expect/test12.2.prog.c
+++ b/testsuite/expect/test12.2.prog.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test12.4 b/testsuite/expect/test12.4
new file mode 100755
index 0000000000000000000000000000000000000000..9f1b008bb4cd6e2c6a13c4983c75ea8a3a392ff0
--- /dev/null
+++ b/testsuite/expect/test12.4
@@ -0,0 +1,689 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM sacct functionality
+#          sacct options b, g, j, l, n, p, u, v.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "12.4"
+set exit_code   0
+set file_in     "test.$test_id.input"
+set test_acct   "test_acct"
+set timeout 60
+print_header $test_id
+
+#
+# Check accounting config and bail if not found.
+#
+if { [test_account_storage] == 0 } {
+	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
+	exit 0
+}
+
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME set admin=admin.\n"
+	exit 0
+}
+
+#
+# Identify the user and his current default account
+#
+set acct_name ""
+set user_name ""
+set user_gid ""
+spawn $bin_id -u -n
+expect {
+	 -re "($alpha_numeric_under)" {
+		set user_name $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+spawn $bin_id -u
+expect {
+	 -re "($alpha_numeric_under)" {
+		set user_gid $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+set s_pid [spawn $sacctmgr show user $user_name]
+expect {
+	-re "$user_name *($alpha_numeric_under)" {
+		set acct_name $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "FAILURE: sacctmgr add not responding\n"
+		slow_kill $s_pid
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Use sacctmgr to add an account
+#
+set aamatches 0
+set sadd_pid [spawn $sacctmgr -i add account $test_acct]
+expect {
+	-re "Adding Account" {
+		incr aamatches
+		exp_continue
+	}
+	-re "Nothing new added" {
+		send_user "\nWARNING: vestigial account $test_acct found\n"
+		incr aamatches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr add not responding\n"
+		slow_kill $sadd_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$aamatches != 1} {
+	send_user "\nFAILURE:  sacctmgr had a problem adding account.\n"
+	exit 1
+}
+
+#
+# Add self to this new account
+#
+set sadd_pid [spawn $sacctmgr -i create user name=$user_name account=$test_acct]
+expect {
+	 timeout {
+		send_user "\nFAILURE: sacctmgr add not responding\n"
+		slow_kill $sadd_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+
+make_bash_script $file_in "$bin_id"
+
+#
+# Spawn a job via srun using this account
+#
+set job_id 0
+spawn $srun -N1 -v --account=$test_acct $bin_id
+expect {
+	-re "launching ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: did not get srun job_id3\n"
+	set exit_code 1
+} else {
+	set matches 0
+	spawn $scontrol show job $job_id
+	expect {
+		 -re "Account=$test_acct" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: srun failed to use specified account\n"
+		set exit_code 1
+	}
+}
+################################################################
+#
+# Proc: sacct_job
+#
+# Purpose:  Pass sacct options and test
+#
+# Returns: Number of matches.
+#
+# Input: Switch options not requiring arguments
+#
+################################################################
+
+proc sacct_job { soption job_id} {
+	global sacct
+	set debug       0
+	set exit_code   0
+	set matches     0
+	set not_support 0
+	send_user "sacct -$soption -p -j $job_id\n"
+
+	if { $soption == "-brief" || $soption == "b" } {
+
+	spawn $sacct -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "JobID.State.ExitCode" {
+			if {$debug} {send_user "\nmatch1\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id" {
+			if {$debug} {send_user "\nmatch2\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacct not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 2} {
+		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "-long" || $soption == "l" } {
+
+	spawn $sacct -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "JobID.JobName.Partition.MaxVMSize" {
+			if {$debug} {send_user "\nmatch3\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxVMSizeNode.MaxVMSizeTask.AveVMSize.MaxRSS" {
+			if {$debug} {send_user "\nmatch4\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxRSSNode.MaxRSSTask.AveRSS.MaxPages" {
+			if {$debug} {send_user "\nmatch5\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxPagesNode.MaxPagesTask.AvePages.MinCPU" {
+			if {$debug} {send_user "\nmatch6\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MinCPUNode.MinCPUTask.AveCPU.NTasks" {
+			if {$debug} {send_user "\nmatch7\n"}
+			incr matches
+			exp_continue
+		}
+		-re "AllocCPUS.Elapsed.State.ExitCode" {
+			if {$debug} {send_user "\nmatch8\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id" {
+			if {$debug} {send_user "\nmatch9\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacct not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+
+	if {$matches != 7} {
+		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "-noheader" || $soption == "n" } {
+
+	spawn $sacct -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "AllocCPUS|Account|AssocID|AveCPU|AvePages|AveRSS|AveVSize|BlockID	\
+			|Cluster|CPUTime|CPUTimeRAW|Elapsed	\
+			|Eligible|End|ExitCode|GID	\
+			|Group|JobID|JobName|NodeList	\
+			|MaxPages|MaxPagesNode|MaxPagesTask|MaxRSS	|
+			|MaxRSSNode|MaxRSSTask|MaxVSize|MaxVSizeNode	|
+			|MaxVSizeTask|MinCPU|MinCPUNode|MinCPUTask	|
+			|NCPUS|NNodes|NTasks|Priority	|
+			|Partition|QOS|QOSRAW|ReqCPUS	|
+			|Reserved|ResvCPU|ResvCPURAW|Start	|
+			|State|Submit|Suspended|SystemCPU	|
+			|Timelimit|TotalCPU|UID|User	|
+			|UserCPU|WCKey|WCKeyID" {
+			if {$debug} {send_user "\nmatch10\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id" {
+			if {$debug} {send_user "\nmatch11\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacct not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "-parsable" || $soption == "p" } {
+
+	spawn $sacct -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "JobID\\|JobName\\|Partition\\|Account\\|AllocCPUS\\|State\\|ExitCode\\|" {
+			if {$debug} {send_user "\nmatch12\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id\\|" {
+			if {$debug} {send_user "\nmatch13\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacct not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 2} {
+		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "-parsable2" || $soption == "P" } {
+
+	spawn $sacct -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re
+"JobID\\|JobName\\|Partition\\|Account\\|AllocCPUS\\|State\\|ExitCode *" {
+			if {$debug} {send_user "\nmatch14\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id\\|" {
+			if {$debug} {send_user "\nmatch15\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacct not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 2} {
+		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "-verbose" || $soption == "v" } {
+
+	spawn $sacct -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "sacct: Accounting storage SLURMDBD plugin loaded " {
+			if {$debug} {send_user "\nmatch16\n"}
+			incr matches
+			exp_continue
+		}
+		-re "JobID.JobName.Partition" {
+			if {$debug} {send_user "\nmatch17\n"}
+			incr matches
+			exp_continue
+		}
+		-re "Account.AllocCPUS.State.ExitCode" {
+			if {$debug} {send_user "\nmatch18\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id" {
+			if {$debug} {send_user "\nmatch19\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacct not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 4} {
+		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+
+}
+
+################################################################
+#
+# Proc: sacct_vargs
+#
+# Purpose:  Pass sacct options with arguments and test
+#
+# Returns: Number of matches.
+#
+# Input: Switch options with argument
+#
+################################################################
+
+proc sacct_vargs { soption vargs job_id} {
+	global sacct
+	set debug       0
+	set exit_code   0
+	set matches     0
+	set not_support 0
+	send_user "sacct -$soption $vargs -p -j $job_id\n"
+
+	if { $soption == "g" || $soption == "-gid" || $soption == "-group" || $soption == "u" || $soption == "-uid" || $soption == "-user"} {
+
+	spawn $sacct -$soption $vargs -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "JobID.JobName.Partition" {
+			if {$debug} {send_user "\nmatch20\n"}
+			incr matches
+			exp_continue
+		}
+		-re "Account.AllocCPUS.State.ExitCode" {
+			if {$debug} {send_user "\nmatch21\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id" {
+			incr matches
+			if {$debug} {send_user "\nmatch22\n"}
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacct not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 3} {
+		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+}
+################################################################
+
+set matches [sacct_job b $job_id]
+if {$matches != 2} {
+	send_user "\nFAILURE: sacct -b failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job -brief $job_id]
+if {$matches != 2} {
+	send_user "\nFAILURE: sacct --brief failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_vargs g $user_gid $job_id]
+if {$matches != 3} {
+	send_user "\nFAILURE: sacct -g failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_vargs -gid $user_gid $job_id]
+if {$matches != 3} {
+	send_user "\nFAILURE: sacct --gid failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_vargs -group $user_gid $job_id]
+if {$matches != 3} {
+	send_user "\nFAILURE: sacct --group failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job l $job_id]
+if {$matches != 7} {
+	send_user "\nFAILURE: sacct --allusers failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job -long $job_id]
+if {$matches != 7} {
+	send_user "\nFAILURE: sacct -l failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job n $job_id]
+if {$matches != 1} {
+	send_user "\nFAILURE: sacct -n failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job -noheader $job_id]
+if {$matches != 1} {
+	send_user "\nFAILURE: sacct -n failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job p $job_id]
+if {$matches != 2} {
+	send_user "\nFAILURE: sacct -p failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job -parsable $job_id]
+if {$matches != 2} {
+	send_user "\nFAILURE: sacct --parsable failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job P $job_id]
+if {$matches != 2} {
+	send_user "\nFAILURE: sacct -P failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job -parsable2 $job_id]
+if {$matches != 2} {
+	send_user "\nFAILURE: sacct --parsable2 failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_vargs u $user_name $job_id]
+if {$matches != 3} {
+	send_user "\nFAILURE: sacct -g failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_vargs -uid $user_name $job_id]
+if {$matches != 3} {
+	send_user "\nFAILURE: sacct --gid failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_vargs -user $user_name $job_id]
+if {$matches != 3} {
+	send_user "\nFAILURE: sacct --group failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job v $job_id]
+if {$matches != 4} {
+	send_user "\nFAILURE: sacct -v failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job -verbose $job_id]
+if {$matches != 4} {
+	send_user "\nFAILURE: sacct --verbosee failed ($matches)\n"
+	set exit_code 1
+}
+
+#
+# Use sacctmgr to delete the test account
+#
+set damatches 0
+set sadel_pid [spawn $sacctmgr -i delete account $test_acct]
+expect {
+	-re "Deleting account" {
+		incr damatches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr delete not responding\n"
+		slow_kill $sadel_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$damatches != 1} {
+	send_user "\nFAILURE: sacctmgr had a problem deleting account\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $file_in
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test12.5 b/testsuite/expect/test12.5
new file mode 100755
index 0000000000000000000000000000000000000000..215ba09c229e77f2fa1ad12862ac83ca5feb6841
--- /dev/null
+++ b/testsuite/expect/test12.5
@@ -0,0 +1,110 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test sacct --helpformat option.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008-2009 TheLawrence Livermore National Security, LLC .
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "12.5"
+set exit_code   0
+set matches     0
+set not_support 0
+
+print_header $test_id
+
+################################################################
+#
+# Proc: sacct
+#
+# Purpose:  Pass sacct options and test
+#
+# Returns: Number of matches.
+#
+# Input: Switch options -e and --helpformat
+#
+################################################################
+
+proc sacct_job { soption } {
+	global sacct
+	set debug       0
+	set exit_code   0
+	set matches     0
+	set not_support 0
+	send_user "sacct $soption\n"
+
+	spawn $sacct $soption
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "AllocCPUS     Account       AssocID       AveCPU" {
+			if {$debug} {send_user "\nmatch1\n"}
+			incr matches
+			exp_continue
+		}
+		-re "AvePages      AveRSS        AveVMSize     BlockID" {
+			if {$debug} {send_user "\nmatch2\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacct not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	return $matches
+}
+
+################################################################
+set matches [sacct_job --helpformat]
+if {$matches != 2} {
+	send_user "\nFAILURE: sacct --helpformat failed ($matches of 2)\n"
+	set exit_code 1
+}
+
+set matches [sacct_job -e]
+if {$matches != 2} {
+	send_user "\nFAILURE: sacct -e failed ($matches of 2)\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test13.1 b/testsuite/expect/test13.1
index dfe37a3cb3b20daa2d4d64f910399752b95a6d19..4e0f3b2ee7946ed6eb2f1d52e0dd70f676290c71 100755
--- a/testsuite/expect/test13.1
+++ b/testsuite/expect/test13.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2005-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test14.1 b/testsuite/expect/test14.1
index fed1099e99a259d33006e7e81eb65a23c1ed753c..6792f64bf68d97d8cc1ca09fb49664c2b38031c0 100755
--- a/testsuite/expect/test14.1
+++ b/testsuite/expect/test14.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test14.2 b/testsuite/expect/test14.2
index 564f091e3ef9209622f984351aae77c2f884603c..c0c0a75c9071d2db1f91aa52bcc05273a4236562 100755
--- a/testsuite/expect/test14.2
+++ b/testsuite/expect/test14.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test14.3 b/testsuite/expect/test14.3
index 1037ca76d775a61a2238cd82dec345d5c3abed88..e6e18b82ddb21799360ce3807fbac90d7bb70a8f 100755
--- a/testsuite/expect/test14.3
+++ b/testsuite/expect/test14.3
@@ -11,10 +11,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test14.4 b/testsuite/expect/test14.4
index 8c7b3636ca7669637c9729e045a8260644e5da1b..d97be47b16f345235744eb296f497f901922adc3 100755
--- a/testsuite/expect/test14.4
+++ b/testsuite/expect/test14.4
@@ -13,10 +13,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -52,6 +53,10 @@ if {[test_multiple_slurmd] != 0} {
 	send_user "\nWARNING: This test is incompatable with multiple slurmd systems\n"
 	exit 0
 }
+if {[slurmd_user_root] == 0} {
+	send_user "\nWARNING: This test is incompatable with SlurmdUser != root\n"
+	exit 0
+}
 
 #
 # Delete left-over stdout/err files
diff --git a/testsuite/expect/test14.5 b/testsuite/expect/test14.5
index 4cdc56176068f15c83d3d2dbf8efe8b181205e59..fca739154235f05c287e71cfa351c8eb4acff856 100755
--- a/testsuite/expect/test14.5
+++ b/testsuite/expect/test14.5
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -45,6 +46,10 @@ if {[test_front_end] != 0} {
 	send_user "\nWARNING: This test is incompatable with front-end systems\n"
 	exit 0
 }
+if {[slurmd_user_root] == 0} {
+	send_user "\nWARNING: This test is incompatable with SlurmdUser != root\n"
+	exit 0
+}
 
 # Delete left-over stdout/err files
 file delete $file_out $file_err
@@ -105,27 +110,30 @@ if {[wait_for_job $job_id "DONE"] != 0} {
 set record    0
 set preserved 1
 set reset     1
+set time1     0
+set time2     0
+set time3     0
 if {[wait_for_file $file_out] == 0} {
 	spawn $bin_cat $file_out
 	expect {
-		-re "($number):($number)" {
+		-re "($number) *0?($number):0?($number)" {
 			incr record
+			set date      $expect_out(1,string)
+			set hour      $expect_out(2,string)
+			set minute    $expect_out(3,string)
+			set this_time [expr $date * 10000 + $hour * 100 + $minute]
+
 			if {$record == 1} {
-				set hour   $expect_out(1,string)
-				set minute $expect_out(2,string)
+				set base_time $this_time
 			}
 			if {$record == 2} {
-				if {$hour != $expect_out(1,string)} {
-					if {$minute != $expect_out(2,string)} {
-						set preserved 0
-					}
+				if {$this_time != $base_time} {
+					set preserved 0
 				}
 			}
 			if {$record == 3} {
-				if {$hour == $expect_out(1,string)} {
-					if {$minute == $expect_out(2,string)} {
-						set reset 0
-					}
+				if {$this_time == $base_time} {
+					set reset 0
 				}
 			}
 			exp_continue;
@@ -137,7 +145,7 @@ if {[wait_for_file $file_out] == 0} {
 }
 
 if {$record != 3} {
-	send_user "\nFAILURE: Wrong record count\n"
+	send_user "\nFAILURE: Wrong record count $record\n"
 	set exit_code 1
 }
 if {$preserved == 0} {
diff --git a/testsuite/expect/test14.6 b/testsuite/expect/test14.6
index 3f5e82c96e99afecf908387a21588d6f36cc3dde..05ee6e8dcfac905cd1e9618b15f104a21a4c903a 100755
--- a/testsuite/expect/test14.6
+++ b/testsuite/expect/test14.6
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test14.7 b/testsuite/expect/test14.7
index a66eb24da1deddd078a46e6c2ba753577c52ae2f..ab8a325f474ecbab07cee7a5ca9d21735c47332b 100755
--- a/testsuite/expect/test14.7
+++ b/testsuite/expect/test14.7
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -115,6 +116,13 @@ if {[wait_for_file $file_err] == 0} {
 			incr matches
 			exp_continue
 		}
+		-re "REQUEST_FILE_BCAST.* Operation not permitted" {
+#			variation on Permission denied if initgroups() fails
+#			due to not running slurmd as root
+			send_user "These errors are expected, no worries\n"
+			incr matches
+			exp_continue
+		}
 		-re "REQUEST_FILE_BCAST.* File exists" {
 			send_user "Vestigial file should be removed\n"
 			incr matches
diff --git a/testsuite/expect/test14.8 b/testsuite/expect/test14.8
index 967c95717a2b5abf9c819c839dcff86cad926f92..a895df083509cc05550ddbee6079641859df3a08 100755
--- a/testsuite/expect/test14.8
+++ b/testsuite/expect/test14.8
@@ -14,10 +14,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -52,6 +53,10 @@ if {[test_multiple_slurmd] != 0} {
 	send_user "\nWARNING: This test is incompatable with multiple slurmd systems\n"
 	exit 0
 }
+if {[slurmd_user_root] == 0} {
+	send_user "\nWARNING: This test is incompatable with SlurmdUser != root\n"
+	exit 0
+}
 
 # Delete left-over stdout file
 file delete $file_out
diff --git a/testsuite/expect/test15.1 b/testsuite/expect/test15.1
index e94ca83020df62ad005d448d37958a17abdac70c..7cf9bcb078d6f270d13769bf5a1d7c3a34a91dcb 100755
--- a/testsuite/expect/test15.1
+++ b/testsuite/expect/test15.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.10 b/testsuite/expect/test15.10
index 511b2020ede0b64dea3677f5b5dec5c2a783b3cb..c3ea7d82def6c76fd452dca67a38449e1374a9fd 100755
--- a/testsuite/expect/test15.10
+++ b/testsuite/expect/test15.10
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.11 b/testsuite/expect/test15.11
index 52abcb841a376a2e169750527ef820f64163f55c..64fe0351f40e40d1d593464dc4fe59aa4b0da0ff 100755
--- a/testsuite/expect/test15.11
+++ b/testsuite/expect/test15.11
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -44,6 +45,11 @@ if {[test_wiki_sched] == 1} {
 	send_user "         or sched/wiki2 (Moab) schedulers\n"
 	exit $exit_code
 }
+set prio_type [priority_type]
+if {[string compare $prio_type "multifactor"] == 0} {
+	send_user "\nWARNING: not compatable with priority/multifactor\n"
+	exit $exit_code
+}
 
 #
 # Test setting job's name and get priority
diff --git a/testsuite/expect/test15.12 b/testsuite/expect/test15.12
index 2a72cf7abf9e6ce8166b0647c15470861ad4bdc1..493376a3d2077592e002cb0aa9ea07d06ca04be1 100755
--- a/testsuite/expect/test15.12
+++ b/testsuite/expect/test15.12
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.13 b/testsuite/expect/test15.13
index cb65cd3c550ecc80cc46cdcb9c8f471d6d89fc05..50195ae73fb1cd90dfdfef728f3931f8e528cb88 100755
--- a/testsuite/expect/test15.13
+++ b/testsuite/expect/test15.13
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -46,7 +47,7 @@ array set good_vars {
     SLURM_JOB_NODELIST 0
     SLURM_JOB_CPUS_PER_NODE 1
 
-    SLURM_JOBID 1
+    SLURM_JOB_ID 1
     SLURM_NNODES 0
     SLURM_NODELIST 0
     SLURM_TASKS_PER_NODE 1
diff --git a/testsuite/expect/test15.14 b/testsuite/expect/test15.14
index 07e87f0c85f7dcd60c5d2781d61eef66321c2662..fb1e04bf526fa0f14dbe92558fdabf4edfa87406 100755
--- a/testsuite/expect/test15.14
+++ b/testsuite/expect/test15.14
@@ -10,10 +10,11 @@
 # Copyright (C) 2004-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -79,7 +80,8 @@ expect {
 		set job_id2 $expect_out(1,string)
 		exp_continue
 	}
-	-re "JobState=COMPLETED" {
+#	COMPLETED or COMPLETING
+	-re "JobState=COMPLET" {
 		set match_state 1
 		exp_continue
 	}
diff --git a/testsuite/expect/test15.15 b/testsuite/expect/test15.15
index bb310ec97e1ce70f1cb26877524b5d0da1e20d76..a2fa207ea37834ed9c5c4e69ca9a1cb9c6039531 100755
--- a/testsuite/expect/test15.15
+++ b/testsuite/expect/test15.15
@@ -12,10 +12,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.16 b/testsuite/expect/test15.16
index 8c03be953572b45602ffcecd8204cca4b4cae401..8c685f653c623ed7ce067950a3d50fc03cab8b0d 100755
--- a/testsuite/expect/test15.16
+++ b/testsuite/expect/test15.16
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.17 b/testsuite/expect/test15.17
index a6ae14ac270e7d2fe0b3f7e82cf2cc32ee5996ba..a67c2710868c22a01823364aa0e28c456b5a6440 100755
--- a/testsuite/expect/test15.17
+++ b/testsuite/expect/test15.17
@@ -11,10 +11,11 @@
 # Copyright (C) 2005-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -32,7 +33,6 @@
 ############################################################################
 source ./globals
 
-slow_kill 45984
 set test_id      "15.17"
 set file_in      "test$test_id.input"
 set exit_code    0
diff --git a/testsuite/expect/test15.18 b/testsuite/expect/test15.18
index 7c40c9fde894cae724e0c9302da86d375b9fd468..068d1d954ac28ec7cc3c8fb027a05c7aef19548d 100755
--- a/testsuite/expect/test15.18
+++ b/testsuite/expect/test15.18
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.19 b/testsuite/expect/test15.19
index 8b673cb61c9ae8e2635fda65f4662c21bdb5318b..c1ebebc4956055909dd8cae9d0044ddb8ea16e8c 100755
--- a/testsuite/expect/test15.19
+++ b/testsuite/expect/test15.19
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.2 b/testsuite/expect/test15.2
index fd397cbf82edf178c1f44280ab6dd3aa6306cba4..db94ad9f148a56d483b1b9ebf79f83fb903e089f 100755
--- a/testsuite/expect/test15.2
+++ b/testsuite/expect/test15.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.20 b/testsuite/expect/test15.20
index aa28b812480da765765515514e645cdc971edfa9..5f1cd3d76dcc30f56ab76054db8c864672c662b5 100755
--- a/testsuite/expect/test15.20
+++ b/testsuite/expect/test15.20
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -70,7 +71,7 @@ set salloc_pid [spawn $salloc -N$node_cnt -t1 $srun -l $bin_printenv SLURMD_NODE
 expect {
 	-re "Granted job allocation ($number)" {
 		set job_id $expect_out(1,string)
-		send "$bin_echo MY_ID=\$SLURM_JOBID \n"
+		send "$bin_echo MY_ID=\$SLURM_JOB_ID \n"
 		exp_continue
 	}
 	-re "More processors requested than permitted" {
diff --git a/testsuite/expect/test15.21 b/testsuite/expect/test15.21
index 05cd8bce843ca9949c6a55d84ffd596e8e2c4af9..554c0e7eee56900239e4876d76b71ab8d73947d6 100755
--- a/testsuite/expect/test15.21
+++ b/testsuite/expect/test15.21
@@ -12,13 +12,15 @@
 #          the value of <number> indicates the nodes relative location. 
 #          Change tha node name parsing logic as needed for other formats.
 ############################################################################
-# Copyright (C) 2002 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008-2009 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -45,6 +47,10 @@ if {[test_front_end] != 0} {
 	send_user "\nWARNING: This test is incompatable with front-end systems\n"
 	exit 0
 }
+if {[test_topology] != 0} {
+	send_user "\nWARNING: This test is incompatable topology configured systems\n"
+	exit 0
+}
 
 set available [available_nodes [default_partition]]
 if {$available < 3} {
diff --git a/testsuite/expect/test15.22 b/testsuite/expect/test15.22
index 47004d8f726b28ab9c345f7e83b43cd1df59686a..dc0889737b24b1487906f24aadb5e194b8f79863 100755
--- a/testsuite/expect/test15.22
+++ b/testsuite/expect/test15.22
@@ -12,10 +12,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.23 b/testsuite/expect/test15.23
index ac452e02cc67b30063bd1789d85eb060ddd5dc53..4d59e365be0245078847873f227e813b1d42a7ce 100755
--- a/testsuite/expect/test15.23
+++ b/testsuite/expect/test15.23
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.24 b/testsuite/expect/test15.24
index 21ba71c8ef81be8bc9ac29ac281f757c02f56378..3cb43167e8a3964af79cef47f795b7a704252306 100755
--- a/testsuite/expect/test15.24
+++ b/testsuite/expect/test15.24
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.25 b/testsuite/expect/test15.25
index 7b18ba289b7e367bf5eeae6b92efc91f79c77811..3dc9311c62ee5bbce7c6ac3b332e16cd07f1ac6a 100755
--- a/testsuite/expect/test15.25
+++ b/testsuite/expect/test15.25
@@ -10,10 +10,11 @@
 # Copyright (C) 2004-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.3 b/testsuite/expect/test15.3
index 9598d575a49c0612ecea08807d84123113bf653a..d1b57f83f1f2be8e3915d102f2e6624fd0cb5574 100755
--- a/testsuite/expect/test15.3
+++ b/testsuite/expect/test15.3
@@ -11,10 +11,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.4 b/testsuite/expect/test15.4
index 515a1f132535953faec33b4ba53b738b8de61fc4..ab9a518d896191b88d65b6b9e55c25b1208c7a53 100755
--- a/testsuite/expect/test15.4
+++ b/testsuite/expect/test15.4
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.5 b/testsuite/expect/test15.5
index 84a5f0d86509145ef418f60f86bcb3c486ee884a..849a03633719382c9cb861403ebfa14a37e1bf94 100755
--- a/testsuite/expect/test15.5
+++ b/testsuite/expect/test15.5
@@ -8,13 +8,15 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2002-2006 The Regents of the University of California.
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -42,14 +44,28 @@ set sleep_time  180
 
 print_header $test_id
 
+set inactive_limit  $sleep_time
 set kill_wait       $sleep_time
+set over_time_limit 0
 log_user 0
 spawn $scontrol show config
 expect {
+	-re "InactiveLimit *= ($number)" {
+		set inactive_limit $expect_out(1,string)
+		exp_continue
+	}
 	-re "KillWait *= ($number)" {
 		set kill_wait $expect_out(1,string)
 		exp_continue
 	}
+	-re "OverTimeLimit *= UNLIMITED" {
+		set over_time_limit 9999
+		exp_continue
+	}
+	-re "OverTimeLimit *= ($number)" {
+		set over_time_limit $expect_out(1,string)
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: scontrol not responding\n"
 		set exit_code 1
@@ -59,10 +75,21 @@ expect {
 	}
 }
 log_user 1
+if {$inactive_limit == 0} {
+	set inactive_limit $sleep_time
+}
+if {$inactive_limit < 120} {
+	send_user "\nWARNING: InactiveLimit ($inactive_limit) is too low for this test\n"
+	exit 0
+}
 if {$kill_wait > 60} {
 	send_user "\nWARNING: KillWait ($kill_wait) is too high for this test\n"
 	exit 0
 }
+if {$over_time_limit > 0} {
+	send_user "\nWARNING: OverTimeLimit too high for this test ($over_time_limit > 0)\n"
+	exit 0
+}
 
 #
 # Build input script file
diff --git a/testsuite/expect/test15.6 b/testsuite/expect/test15.6
index 9c002d753355b4763e96d91b48aacd515a87175f..93721b05ccec9aa0724bd39b4d53704e90875113 100755
--- a/testsuite/expect/test15.6
+++ b/testsuite/expect/test15.6
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.7 b/testsuite/expect/test15.7
index f928c5ba58f829dfe32cdd49cded983d14074f77..bdc33ff64ad5d08c3cb01485418669f15cc033a8 100755
--- a/testsuite/expect/test15.7
+++ b/testsuite/expect/test15.7
@@ -13,10 +13,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.8 b/testsuite/expect/test15.8
index 077d3694eda19ae060877970f20756ca7c0eef5c..5ecca86b08a360a7f96729c9cececc00d34e499e 100755
--- a/testsuite/expect/test15.8
+++ b/testsuite/expect/test15.8
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test15.9 b/testsuite/expect/test15.9
index f7cc37cff1e86d272e9842173b0c532cc6e00685..71aa3281fa549be24ee1168cfeca2b0001516c9e 100755
--- a/testsuite/expect/test15.9
+++ b/testsuite/expect/test15.9
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -40,7 +41,7 @@ print_header $test_id
 
 #
 # Submit a slurm allocate job
-# Interactively print $SLURM_JOBID
+# Interactively print $SLURM_JOB_ID
 #
 set timeout $max_job_delay
 set match 0
@@ -49,14 +50,14 @@ set salloc_pid [spawn $salloc -t1 $bin_bash]
 expect {
 	-re "Granted job allocation ($number)" {
 		set job_id $expect_out(1,string)
-		send "$bin_echo MY_ID=\$SLURM_JOBID \n"
+		send "$bin_echo MY_ID=\$SLURM_JOB_ID \n"
 		array unset expect_out
 		exp_continue
 	}
 	-re "MY_ID=($number)?" {
 		foreach name [array names expect_out] {
 			if {$name == "1,string"} {
-				# SLURM_JOBID is set
+				# SLURM_JOB_ID is set
 				set slurm_jobid $expect_out(1,string)
 				break
 			}
@@ -90,7 +91,7 @@ if { $job_id == 0 } {
 	set exit_code 1
 } else {
 	if { $job_id != $slurm_jobid } {
-		send_user "\nFAILURE: salloc failed to set valid SLURM_JOBID\n"
+		send_user "\nFAILURE: salloc failed to set valid SLURM_JOB_ID\n"
 		set exit_code 1
 	}
 }
diff --git a/testsuite/expect/test16.1 b/testsuite/expect/test16.1
index ad2a8d1eb3be304757d784cee8c389aabe8fe89c..62aa8c403d89bfc0c22cb69fa360239eb39d3e14 100755
--- a/testsuite/expect/test16.1
+++ b/testsuite/expect/test16.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test16.2 b/testsuite/expect/test16.2
index bcc9893810a5bbd836fc6f01189558351e5f3842..260d481d1ae45e18837eb5378d3da64ac160af7c 100755
--- a/testsuite/expect/test16.2
+++ b/testsuite/expect/test16.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test16.3 b/testsuite/expect/test16.3
index 91dc0efaae50dc1fcc9cfec264607c2bef238ef0..7d17abc34087a6c0ccb8e726acaf9366d066c4d2 100755
--- a/testsuite/expect/test16.3
+++ b/testsuite/expect/test16.3
@@ -11,10 +11,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test16.4 b/testsuite/expect/test16.4
index 217c4ed48fde3049458f3c778d90bac0fa5576ab..b6b61e6a30dd99fe28b7835a2991244617ce0d98 100755
--- a/testsuite/expect/test16.4
+++ b/testsuite/expect/test16.4
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test16.4.prog.c b/testsuite/expect/test16.4.prog.c
index 323926345a3fdb95468ed6b753c013742d5ab711..752a5c874b14d235c610788aeebb764baf861084 100644
--- a/testsuite/expect/test16.4.prog.c
+++ b/testsuite/expect/test16.4.prog.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.1 b/testsuite/expect/test17.1
index f9c4dde8d7e98cc9bbafac90a0a4a484f0c8e8ba..871390c0ec71f87c9aefe9413bbef7a1d249b7e5 100755
--- a/testsuite/expect/test17.1
+++ b/testsuite/expect/test17.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.10 b/testsuite/expect/test17.10
index d2b11f3a26fff6991a28361beff0d9cdc9f2f539..5b025bd9a052f7db07f404a4a3c3adfc37cad7a3 100755
--- a/testsuite/expect/test17.10
+++ b/testsuite/expect/test17.10
@@ -13,10 +13,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.11 b/testsuite/expect/test17.11
index 2978fbb93eba8dfb0355cba7b388f98331b277f9..1cc60d2c7e6a5fa1d2a793b5c9873af10f47ee14 100755
--- a/testsuite/expect/test17.11
+++ b/testsuite/expect/test17.11
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.12 b/testsuite/expect/test17.12
index 9fa863ad952d05fea9c31206e5cbf89ed332065c..86a79e696f7f878b205a160bc62928c0a15013b8 100755
--- a/testsuite/expect/test17.12
+++ b/testsuite/expect/test17.12
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.13 b/testsuite/expect/test17.13
index 2b2f35bfdee9fdb8c8288b078a0a637e1e936ed5..a296430c31fcdc033ede3ec7c3c23605583ee918 100755
--- a/testsuite/expect/test17.13
+++ b/testsuite/expect/test17.13
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.14 b/testsuite/expect/test17.14
index c13b6ce7d5a108bd7082568e04ef5d624bbeb67c..11660d64158440a38c1ed5ad40a3102c6ba3d8e3 100755
--- a/testsuite/expect/test17.14
+++ b/testsuite/expect/test17.14
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.15 b/testsuite/expect/test17.15
index 7bbf9bb320e5916cbdb15fd2726f9677cc5c3a23..371368baee08be4c59484c1799c70535591b697f 100755
--- a/testsuite/expect/test17.15
+++ b/testsuite/expect/test17.15
@@ -13,10 +13,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.15.prog.c b/testsuite/expect/test17.15.prog.c
index 520d927990cf7700c3dae37f15e74a9bd5082243..e680f327794cade32485ab5a158031194d000c68 100644
--- a/testsuite/expect/test17.15.prog.c
+++ b/testsuite/expect/test17.15.prog.c
@@ -6,10 +6,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.16 b/testsuite/expect/test17.16
index 63d9498a812f083a8a44823b5e82e6227bfb51a4..f10d8aaef563b41dbc399e1753a93c0b1960bd8b 100755
--- a/testsuite/expect/test17.16
+++ b/testsuite/expect/test17.16
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.17 b/testsuite/expect/test17.17
index fd9bba8cbae412ffd7cc9d2b38a49ad84bbe7ec1..f78cfbef07000821c355abb959b87d941f1c76c3 100755
--- a/testsuite/expect/test17.17
+++ b/testsuite/expect/test17.17
@@ -12,10 +12,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -110,7 +111,7 @@ if {$job_id1 == 0} {
 
 set partition "dummy"
 set waited 1
-set timeout [expr $timeout + 5]
+set timeout [expr $max_job_delay + 5]
 set srun_pid [spawn $srun -N1 --nodelist=$nodelist_name -t1 --share $scontrol -o show job $job_id1]
 expect {
 	-re "Partition=($alpha_numeric_under)" {
diff --git a/testsuite/expect/test17.18 b/testsuite/expect/test17.18
index 5273ba50bfd060d91398dec63cc01699c3aecc9f..e172341d71fd486f1082cf5070a2d52798b95585 100755
--- a/testsuite/expect/test17.18
+++ b/testsuite/expect/test17.18
@@ -11,10 +11,11 @@
 # Copyright (C) 2004-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.2 b/testsuite/expect/test17.2
index c10ca2aad67e74feb776c3dbdb164308345a95f2..73bb39af16bb185398f7c662730ef7a2934b9d07 100755
--- a/testsuite/expect/test17.2
+++ b/testsuite/expect/test17.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.20 b/testsuite/expect/test17.20
index f95f02a2314ca7b714ea5812a06f419c24586852..83e5cf2aefdf4b3452ce7a2218cf2d3090120490 100755
--- a/testsuite/expect/test17.20
+++ b/testsuite/expect/test17.20
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.21 b/testsuite/expect/test17.21
index b000c98c8e046935fa71816c9fa62f741a71784f..c9a1124ce88d4b43e9b679f28fa6997906d8b671 100755
--- a/testsuite/expect/test17.21
+++ b/testsuite/expect/test17.21
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.22 b/testsuite/expect/test17.22
index b97ef0ce0e839f949a123b585baa7e4cb524cda0..4856d07e826003868a37455c96b3198d0ad4b555 100755
--- a/testsuite/expect/test17.22
+++ b/testsuite/expect/test17.22
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.23 b/testsuite/expect/test17.23
index fe78c87ddbd4ee3e346f71243c7f87f408e93bf3..407b9a012f14185f047228f233483d6963e50759 100755
--- a/testsuite/expect/test17.23
+++ b/testsuite/expect/test17.23
@@ -10,10 +10,11 @@
 # Copyright (C) 2005-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -171,17 +172,19 @@ expect {
 if {$job_prio1 == 0 || $job_prio2 == 0 || $job_prio3 == 0} {
 	send_user "\nFAILURE: failed to job priorities of each submitted job\n"
 	set exit_code 1
+} elseif {$job_prio1 < 1000 || $job_prio2 < 1000 || $job_prio3 < 1000} {
+	send_user "\nWARNING: PriorityWeight factors result in a job priority too low for this test\n"
 } else {
 	set diff2 [expr $job_prio1 - $job_prio2]
 	set diff3 [expr $job_prio1 - $job_prio3]
 #	Target for diff2 is 101
 	if {$diff2 < 91 || $diff2 > 111} {
-		send_user "\nFAILURE: job2 priority delta bad $diff2\n"
+		send_user "\nFAILURE: job2 priority delta bad ($diff2, target is 101)\n"
 		set exit_code 1
 	}
 #	Target for diff3 is 202
 	if {$diff3 < 192 || $diff3 > 212} {
-		send_user "\nFAILURE: job3 priority delta bad $diff3\n"
+		send_user "\nFAILURE: job3 priority delta bad ($diff3, target is 202)\n"
 		set exit_code 1
 	}
 }
diff --git a/testsuite/expect/test17.24 b/testsuite/expect/test17.24
index 7a1a57d3d67a82611798b60625f7bebe6b3d8a9b..5607f7525ed71e9a259f9111073f9012a8dc5b80 100755
--- a/testsuite/expect/test17.24
+++ b/testsuite/expect/test17.24
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.25 b/testsuite/expect/test17.25
index 4de716e8d909d31148ad64ab2e4cf70311f6107d..b2ef269fac301661208f118c240c7dc6f2777c20 100755
--- a/testsuite/expect/test17.25
+++ b/testsuite/expect/test17.25
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.26 b/testsuite/expect/test17.26
index 99fb08cbbd27c0daf417dec87fce45b57b75fa3e..0782a580392903b1d0465694bfc5192db948ba3f 100755
--- a/testsuite/expect/test17.26
+++ b/testsuite/expect/test17.26
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.27 b/testsuite/expect/test17.27
index 97b7e48bc815d034c92323cd8322c3d88bb68e9a..e60a3a70a5229f4b2181d95138585b01aa1f0418 100755
--- a/testsuite/expect/test17.27
+++ b/testsuite/expect/test17.27
@@ -12,10 +12,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.28 b/testsuite/expect/test17.28
index 47fbd24350165c4683c2b6beadb164803faee1b1..292c0b6764d3e87017179399ed8e83dea8c31b1e 100755
--- a/testsuite/expect/test17.28
+++ b/testsuite/expect/test17.28
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.29 b/testsuite/expect/test17.29
index ae7f9744614528956c0cdcfde6c1caba6fad1b04..7843c5559b5ffd5158bd2ddcbf5eb0f700b17fa2 100755
--- a/testsuite/expect/test17.29
+++ b/testsuite/expect/test17.29
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.3 b/testsuite/expect/test17.3
index 9455d10ce2e0d5b18409e4324efe9aa840dad2bc..c256a6dd963774910c136582ef5704807a52256c 100755
--- a/testsuite/expect/test17.3
+++ b/testsuite/expect/test17.3
@@ -11,10 +11,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.31 b/testsuite/expect/test17.31
index 70c9cfe87a8eeb5609b06e54cb5e7de2b6ae0c57..9d1557e5a44fe594a080af2a6127e19f0cb625f5 100755
--- a/testsuite/expect/test17.31
+++ b/testsuite/expect/test17.31
@@ -10,10 +10,11 @@
 # Copyright (C) 2005-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.32 b/testsuite/expect/test17.32
index 22e0c172688f315d339e8a1fd2a76ce465c323bd..eace6c8984b642ea494ea1df7a286f2b82aa611c 100755
--- a/testsuite/expect/test17.32
+++ b/testsuite/expect/test17.32
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.33 b/testsuite/expect/test17.33
index c26cc08f9e5b953eb3d28215bc8431b7164b5f45..e30d07af440d7c5ac2e217fc9313199cb16c0585 100755
--- a/testsuite/expect/test17.33
+++ b/testsuite/expect/test17.33
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.4 b/testsuite/expect/test17.4
index 1c65f51f430daffc23651432f76d2d7abcc16027..116e2b1ece8edcbf438e0efb8b25f1034b796d6d 100755
--- a/testsuite/expect/test17.4
+++ b/testsuite/expect/test17.4
@@ -11,10 +11,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.5 b/testsuite/expect/test17.5
index 63f72430f1acd753feca20e4119c921650c263d1..47c9c6b8ed10d28af5df60ef96adccab28d900eb 100755
--- a/testsuite/expect/test17.5
+++ b/testsuite/expect/test17.5
@@ -12,10 +12,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.6 b/testsuite/expect/test17.6
index 29b9f0b13de7e1dbce818bfb7e7ed1000ac69f30..1e29be184cab8148143c67b55b81e09aa8732650 100755
--- a/testsuite/expect/test17.6
+++ b/testsuite/expect/test17.6
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.7 b/testsuite/expect/test17.7
index 1e68235fceee6773a4c43f542e6917f4c1bcbdf0..59d2ad878b490b0a92df8b6de825555621017147 100755
--- a/testsuite/expect/test17.7
+++ b/testsuite/expect/test17.7
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -47,6 +48,21 @@ print_header $test_id
 # Build stdin file
 #
 exec $bin_rm -f $file_in $file_out $file_err
+if {[file exists /tmp/$file_err]} {
+	send_user "\nWARNING: can not delete /tmp/$file_err to run test\n"
+	set random 0
+	spawn $bin_date +%N
+	expect {
+		-re "($number)" {
+			set random $expect_out(1,string)
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	set file_err $file_err.$random
+}
 make_bash_script $file_in "
   $bin_pwd
   $bin_cat /no/such/file
@@ -102,7 +118,7 @@ if {[wait_for_file $file_out] == 0} {
 	}
 }
 if {$matches != 1} {
-	send_user "\nFAILURE: sbatch failed to change working directory\n"
+	send_user "\nFAILURE: sbatch failed to get expected stdout\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test17.8 b/testsuite/expect/test17.8
index e181934e69ad382b95bd33cca3af67122a361067..9a990ba36bb8424b75a5d6fde468069e18751b91 100755
--- a/testsuite/expect/test17.8
+++ b/testsuite/expect/test17.8
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test17.9 b/testsuite/expect/test17.9
index f3aa6c12f41ec1a22b36a89e790fcda67284a6d2..6b4e6b3dfdff3ae6ad43d1eab97d3dae2d1a7ec7 100755
--- a/testsuite/expect/test17.9
+++ b/testsuite/expect/test17.9
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test19.1 b/testsuite/expect/test19.1
index de04f2465d3953c30df58bbe95f84ee1c9b55f29..d2d63c597ccdf9437396eebb6974336486aad6c1 100755
--- a/testsuite/expect/test19.1
+++ b/testsuite/expect/test19.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test19.2 b/testsuite/expect/test19.2
index c897eecc3695cec73dcbc53482d5213a9dd47512..e4d5f88945fd0a9936d1efc1651331c1753a2b9a 100755
--- a/testsuite/expect/test19.2
+++ b/testsuite/expect/test19.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test19.3 b/testsuite/expect/test19.3
index bbfa850862b873e9932856967824a9ecc890bd8d..c6a3721e83c170c4443ea8b188ac187214fbc497 100755
--- a/testsuite/expect/test19.3
+++ b/testsuite/expect/test19.3
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test19.4 b/testsuite/expect/test19.4
index 47059583dbd4b9f5911e210f80db1b25a7d21481..cc56e7434a547039f037e4e7577ad1ee38167275 100755
--- a/testsuite/expect/test19.4
+++ b/testsuite/expect/test19.4
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test19.5 b/testsuite/expect/test19.5
index d7f302ef830a0d6a4e4621f5e4fc5b942ea152d2..22e73b2ce8f8c1b56a5ec4adc414a72348e09194 100755
--- a/testsuite/expect/test19.5
+++ b/testsuite/expect/test19.5
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -206,10 +207,12 @@ if {[wait_for_file $file_out_fini] != 0} {
 	send_user "\nFAILURE: file $file_out_fini is missing\n"
 	set exit_code 1
 } else {
+#	Check that job run time was one minute. If SLURM is configured to 
+#	power down idle nodes, this could possibly take a bit more time.
 	set job_fini 0
 	spawn $bin_cat $file_out_fini
 	expect {
-		-re "CD *1:0" {
+		-re "CD *1:($number)" {
 			set job_fini 1
 			exp_continue
 		}
diff --git a/testsuite/expect/test19.6 b/testsuite/expect/test19.6
index 8b12a62c681fbc3f39cba51a6d2228642534c67d..f7c81e878c583a0e0e98f3866ec1debed8a29604 100755
--- a/testsuite/expect/test19.6
+++ b/testsuite/expect/test19.6
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test19.7 b/testsuite/expect/test19.7
index a2e454c0c172a3395ef7eae54647aea76053ae07..2d8a1bb706eea1cfaf285ee2be9d10fd96bd8fdd 100755
--- a/testsuite/expect/test19.7
+++ b/testsuite/expect/test19.7
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test2.1 b/testsuite/expect/test2.1
index 044ac6e1b2275af2bf4391a70e6637c5bf5ab242..635057a33ae8d1c26facea1073d489625487ca86 100755
--- a/testsuite/expect/test2.1
+++ b/testsuite/expect/test2.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test2.10 b/testsuite/expect/test2.10
index ac754a01b7237771e484131cc647defb7c046e43..aaf7c26f842c45963636fcdbfba783e3d2d757d7 100755
--- a/testsuite/expect/test2.10
+++ b/testsuite/expect/test2.10
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test2.11 b/testsuite/expect/test2.11
index 3ae4cdfdc8939bccbfe01568037a2996c3b79969..284da675916db44e18e7dbe6b4dcc07c43d875b2 100755
--- a/testsuite/expect/test2.11
+++ b/testsuite/expect/test2.11
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -49,9 +50,9 @@ file delete $file_out
 make_bash_script $file_in "
   $srun $bin_sleep 10 &
   $bin_sleep 1
-  $scontrol listpids \$SLURM_JOBID.10 \$SLURMD_NODENAME
-  $scontrol listpids \$SLURM_JOBID.0 \$SLURMD_NODENAME
-  $scontrol listpids \$SLURM_JOBID \$SLURMD_NODENAME
+  $scontrol listpids \$SLURM_JOB_ID.10 \$SLURMD_NODENAME
+  $scontrol listpids \$SLURM_JOB_ID.0 \$SLURMD_NODENAME
+  $scontrol listpids \$SLURM_JOB_ID \$SLURMD_NODENAME
 "
 
 #
diff --git a/testsuite/expect/test17.19 b/testsuite/expect/test2.12
similarity index 50%
rename from testsuite/expect/test17.19
rename to testsuite/expect/test2.12
index d44849c4c4e597b3c0428025eba66f365d130226..6753b534a3fa69f8b54e82de485fccb6d2af9840 100755
--- a/testsuite/expect/test17.19
+++ b/testsuite/expect/test2.12
@@ -1,20 +1,20 @@
 #!/usr/bin/expect
 ############################################################################
 # Purpose: Test of SLURM functionality
-#          Test the launch of a batch job within an existing job allocation.
-#          This logic is used by LSF
+#          Validate scontrol show topology option.
 #
 # Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2005-2006 The Regents of the University of California.
+# Copyright (C) 2009 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -28,58 +28,49 @@
 # 
 # You should have received a copy of the GNU General Public License along
 # with SLURM; if not, write to the Free Software Foundation, Inc.,
-# 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 ############################################################################
 source ./globals
 
-set test_id      "17.19"
-set file_in      "test$test_id.input"
-set exit_code    0
-set job_id_1     0
-set job_id_2     0
+set test_id     "2.12"
+set exit_code   0
 
 print_header $test_id
 
-#
-# Build input script file
-#
-exec $bin_rm -f $file_in
-make_bash_script $file_in "$bin_printenv SLURMD_NODENAME"
-
-#
-# Spawn a batch job that uses stdout/err and confirm their contents
-#
-set timeout $max_job_delay
-if { [test_bluegene] } {
-	set node_cnt 1-2048
-} else {
-	if { [test_xcpu] } {
-		set node_cnt 1-1
-	} else {
-		set node_cnt 1-4
-	}
-}
-
-set salloc_pid [spawn $salloc -N$node_cnt -v -t1 $bin_bash]
+# test if running with Topology/Tree
+set topology_tree 0
+log_user 0
+spawn $scontrol show config
 expect {
-	-re "Granted job allocation ($number)" {
-		set job_id_1 $expect_out(1,string)
-		send "$sbatch --jobid=$job_id_1 -o none -e none $file_in \n"
+	-re "TopologyPlugin *= *topology/tree" {
+		set topology_tree 1
 		exp_continue
 	}
-	-re "Submitted batch job ($number)" {
-		set job_id_2 $expect_out(1,string)
-		send "exit \n"
-		exp_continue
+	eof {
+		wait
 	}
-	-re "Submitted batch job ($number).0" {
-		set job_id_2 $expect_out(1,string)
-		send "exit \n"
-		exp_continue
+}
+log_user 1
+
+if {$topology_tree == 0} {
+	send_user "\nWARNING: This test is only valid with a topology/tree configuration\n"
+	exit 0
+}
+
+#
+# Report the slurm configuration
+#
+set matches     0
+spawn $scontrol show topology
+expect {
+	-re "SwitchName=($alpha_numeric_under)" {
+		if {$matches == 0} {
+			set switch_name $expect_out(1,string)
+		}
+		incr matches
 	}
 	timeout {
-		send_user "\nFAILURE: srun not responding\n"
-		slow_kill $salloc_pid
+		send_user "\nFAILURE: scontrol not responding\n"
 		set exit_code 1
 	}
 	eof {
@@ -87,22 +78,35 @@ expect {
 	}
 }
 
-if {$job_id_1 == 0} {
-	send_user "\nFAILURE: job allocation failure\n"
-	exit 1
-}
-if {$job_id_1 != $job_id_2} {
-	send_user "\nFAILURE: batch job did not run in existing allocation\n"
+if {$matches == 0} {
+	send_user "\nFAILURE: scontrol reported no switches\n"
 	exit 1
 }
 
-if [file exists none] {
-	send_user "\nFAILURE: created file 'none', should have mapped to no file\n"
-	exit 1
+#
+# Report one slurm switch
+#
+set matches     0
+spawn $scontrol show topology $switch_name 
+expect {
+	-re "SwitchName" {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$matches != 1} {
+	send_user "\nFAILURE: scontrol topology with switch name filter failed\n"
+	set exit_code 1
 }
 
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
-	exec $bin_rm -f $file_in
 }
 exit $exit_code
diff --git a/testsuite/expect/test2.2 b/testsuite/expect/test2.2
index fd894a5e0ff4f145389a25ef44bb741682677b1d..b5f8f18b2b8e9f1aeb71dce099168785765de9f5 100755
--- a/testsuite/expect/test2.2
+++ b/testsuite/expect/test2.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test2.3 b/testsuite/expect/test2.3
index 6d069fdce71aa057253fc1d36c771c14dfbadd74..60bb9cec22882169e41854c93467a9ec41828c4a 100755
--- a/testsuite/expect/test2.3
+++ b/testsuite/expect/test2.3
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test2.4 b/testsuite/expect/test2.4
index 12142fac7cd14e9b010debd1c8947647ad7b5d1d..f8128d5c7e0dfee165f1282c2dbb34603a59b9e4 100755
--- a/testsuite/expect/test2.4
+++ b/testsuite/expect/test2.4
@@ -14,10 +14,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test2.5 b/testsuite/expect/test2.5
index b378fdd4a30806fe699a323a677636cad3246d51..8a318bc7806301c5c43d63a0f675491f262702db 100755
--- a/testsuite/expect/test2.5
+++ b/testsuite/expect/test2.5
@@ -11,10 +11,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -51,7 +52,7 @@ expect {
 		incr matches
 		exp_continue
 	}
-	-re "SLURM_CONFIG_FILE" {
+	-re "SLURM_CONF" {
 		incr matches
 		exp_continue
 	}
diff --git a/testsuite/expect/test2.6 b/testsuite/expect/test2.6
index 49e3ab2aa770f494d978d4d80bf18f26a43aa3a7..0df1267f4035d8c27a1a0e56baa9bbc6088f8220 100755
--- a/testsuite/expect/test2.6
+++ b/testsuite/expect/test2.6
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test2.7 b/testsuite/expect/test2.7
index 4bea531a7c6fc6c03d3438c559d8be35253c982e..4c4f1e69dfb5739801239b846154f967f5cea3d7 100755
--- a/testsuite/expect/test2.7
+++ b/testsuite/expect/test2.7
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test2.8 b/testsuite/expect/test2.8
index 39bf4363b8211adaa8cd060588c2bccff430ad21..7560f1cee2093b8a2a081c1064608d3c4ed9622d 100755
--- a/testsuite/expect/test2.8
+++ b/testsuite/expect/test2.8
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test2.9 b/testsuite/expect/test2.9
index fc317de68928047b3eb1fa9ed4c9179b5d79bd3c..3f66b2a7f06c6e37048bdae9ef3c86c35d731540 100755
--- a/testsuite/expect/test2.9
+++ b/testsuite/expect/test2.9
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test20.1 b/testsuite/expect/test20.1
index 201f01815311d860eb71b47e7577b97c24d1d17e..657e77043272a7d244c7b696debba64dd084b1ea 100755
--- a/testsuite/expect/test20.1
+++ b/testsuite/expect/test20.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test20.2 b/testsuite/expect/test20.2
index f07bdac769c25d0ba3d1ef3812958677e1dec2d9..64f533533d0135276c6470a6f304371aa2ebab3e 100755
--- a/testsuite/expect/test20.2
+++ b/testsuite/expect/test20.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test20.3 b/testsuite/expect/test20.3
index 50679cab7e8c7cb88eeeeb8cc4ee9e37eae06801..cc2955dfb1ff6e57049b4f44f29b4f2edd1ee013 100755
--- a/testsuite/expect/test20.3
+++ b/testsuite/expect/test20.3
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test20.4 b/testsuite/expect/test20.4
index 4a8acc7833f263ce2e48365ee9c8949be9b8ac84..dd862a574ebe703768ec056b2245dafbadb1b623 100755
--- a/testsuite/expect/test20.4
+++ b/testsuite/expect/test20.4
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test21.1 b/testsuite/expect/test21.1
index 3a142cc8226ef4b439b155584c1559a6ac7a8be2..c5b22065739782bc4e5ce543abc1d74be2a4963f 100755
--- a/testsuite/expect/test21.1
+++ b/testsuite/expect/test21.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test21.10 b/testsuite/expect/test21.10
index 71d8e1ac5506874b726a065bcc3800ccbe280d22..10d4efb352ea0703a77a61d1e2412fc69777e28f 100755
--- a/testsuite/expect/test21.10
+++ b/testsuite/expect/test21.10
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test21.11 b/testsuite/expect/test21.11
index 68f75cd423b76af8fa541e2801158c3016607264..229890062e27d1b35563b000cf02fe87fc6ccd2b 100755
--- a/testsuite/expect/test21.11
+++ b/testsuite/expect/test21.11
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test21.12 b/testsuite/expect/test21.12
index 4566dcacadbf542887c328662c88510cbb436a59..457b7a6ba7330e69bc80b462be622c8105192a6d 100755
--- a/testsuite/expect/test21.12
+++ b/testsuite/expect/test21.12
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test21.13 b/testsuite/expect/test21.13
index 4a69b71d083458b99f91beae9a155ce9fb86ab91..60035fbe4d68c13e30ed4de28fb991aba0f2f09c 100755
--- a/testsuite/expect/test21.13
+++ b/testsuite/expect/test21.13
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -91,7 +92,7 @@ if {$access_err != 0} {
 remove_acct "" "$nm1,$nm2,$nm3"
 
 #add cluster
-incr exit_code [add_cluster "$tc1,$tc2" "" "" "" "" "" "" "" "" "" "" "" "" "" ""]
+incr exit_code [add_cluster "$tc1,$tc2" "" "" "" "" "" "" "" "" "" "" "" ""]
 if { $exit_code } {
 	remove_acct "" "$nm1,$nm2,$nm3"
 	remove_cluster "$tc1,$tc2"
diff --git a/testsuite/expect/test21.14 b/testsuite/expect/test21.14
index 6c8d7f4567c9ad5695a55294e5fc24cec27ac3b2..e4f97c8f01259e5b85f65cf415b84dc6295f113c 100755
--- a/testsuite/expect/test21.14
+++ b/testsuite/expect/test21.14
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -89,7 +90,7 @@ if {$access_err != 0} {
 remove_acct "" "$nm1,$nm2,$nm3"
 
 #add cluster
-incr exit_code [add_cluster "$tc1,$tc2" "" "" "" "" "" "" "" "" "" "" "" "" "" ""]
+incr exit_code [add_cluster "$tc1,$tc2" "" "" "" "" "" "" "" "" "" "" "" ""]
 if { $exit_code } {
 	remove_acct "" "$nm1,$nm2,$nm3"
 	remove_cluster "$tc1,$tc2"
diff --git a/testsuite/expect/test21.15 b/testsuite/expect/test21.15
index 559fdcdfe7c8315ee418f7091e0a284149081b22..b9ffde2128c4bac6c01302187164a4d29daa6558 100755
--- a/testsuite/expect/test21.15
+++ b/testsuite/expect/test21.15
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -117,7 +118,7 @@ if {$access_err != 0} {
 }
 
 #add cluster
-incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" "" "" ""]
+incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" ""]
 if { $exit_code } {
 	remove_user "" "" "$us1,$us2,$us3"
 	remove_acct "" "$nm1,$nm2,$nm3"
diff --git a/testsuite/expect/test21.16 b/testsuite/expect/test21.16
index 162aa914adef1a0f237429a052c7b5f58130df2c..7c3c26d77f21d6e38b623b2de7bf6eb6b2f22185 100755
--- a/testsuite/expect/test21.16
+++ b/testsuite/expect/test21.16
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -117,7 +118,7 @@ if {$access_err != 0} {
 }
 
 #add cluster
-incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" "" "" ""]
+incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" ""]
 if { $exit_code } {
 	remove_user "" "" "$us1,$us2,$us3"
 	remove_acct "" "$nm1,$nm2,$nm3"
diff --git a/testsuite/expect/test21.17 b/testsuite/expect/test21.17
index 400b82d2fb30ae5d41bcf03e9e1e3100bc07d55c..e3e87e9ef0939932633cc915609b603c77d689fd 100755
--- a/testsuite/expect/test21.17
+++ b/testsuite/expect/test21.17
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -117,7 +118,7 @@ if {$access_err != 0} {
 }
 
 #add cluster
-incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" "" "" ""]
+incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" ""]
 if { $exit_code } {
      	remove_user "" "" "$us1,$us2,$us3"
 	remove_acct "" "$nm1,$nm2,$nm3"
diff --git a/testsuite/expect/test21.18 b/testsuite/expect/test21.18
index 9f244444829a074a0d032d813112cb1250f36fdf..4e3bc716297acaaca40582547ef6bb911829cd1b 100755
--- a/testsuite/expect/test21.18
+++ b/testsuite/expect/test21.18
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -117,7 +118,7 @@ if {$access_err != 0} {
 }
 
 #add cluster
-incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" "" "" ""]
+incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" ""]
 if { $exit_code } {
 	remove_user "" "" "$us1,$us2,$us3"
 	remove_acct "" "$nm1,$nm2,$nm3"
diff --git a/testsuite/expect/test21.19 b/testsuite/expect/test21.19
index 42fa0bba6d82bbc2577b4d5a3c684fc5b9fd2fa9..5061c6994268f25c1d9108cdfdb5968bb9e68d70 100755
--- a/testsuite/expect/test21.19
+++ b/testsuite/expect/test21.19
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -117,7 +118,7 @@ if {$access_err != 0} {
 }
 
 #add cluster
-incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" "" "" ""]
+incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" ""]
 if { $exit_code } {
 	remove_user "" "" "$us1,$us2,$us3"
 	remove_acct "" "$nm1,$nm2,$nm3"
diff --git a/testsuite/expect/test21.2 b/testsuite/expect/test21.2
index 90fa0218ad23c5caf2b8ae4d8e94c37e08967901..8c69361faadf9c790dcced4a0ec8a635e4441efe 100755
--- a/testsuite/expect/test21.2
+++ b/testsuite/expect/test21.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test21.20 b/testsuite/expect/test21.20
index bb7f8b7ec1553550b3817cb05fa61c23113014fc..aeb2c31e440579853a63dab01454e6ea3e3f2237 100755
--- a/testsuite/expect/test21.20
+++ b/testsuite/expect/test21.20
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -122,7 +123,7 @@ if {$access_err != 0} {
 }
 
 #add cluster
-incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" "" "" ""]
+incr exit_code [add_cluster "$tc1,$tc2,$tc3" "" "" "" "" "" "" "" "" "" "" "" ""]
 if { $exit_code } {
 	remove_user "" "" "$us1,$us2,$us3"
 	remove_acct "" "$nm1,$nm2,$nm3"
diff --git a/testsuite/expect/test21.21 b/testsuite/expect/test21.21
index 6f8c60475ad04d189bd29e4ec13eabd4ec68da1f..30abd7c048e64040875e44f4eecd086e86af8512 100755
--- a/testsuite/expect/test21.21
+++ b/testsuite/expect/test21.21
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test21.22 b/testsuite/expect/test21.22
index 7aebb00ea8910e0ac9b29aee17704ef64f396dd9..2c1229b8b0c0418098f1f9d03c2169d28d7785e0 100755
--- a/testsuite/expect/test21.22
+++ b/testsuite/expect/test21.22
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -99,6 +100,10 @@ set mj		MaxJobs
 set ms		MaxSubmitJobs
 set mn		MaxNodes
 set mw		MaxWall
+set class       Classification
+set class1     	*Capacity
+set class2     	Capability
+set class3     	Capapacity
 set fs1		1000
 set fs2		2375
 set fs3		3240
@@ -208,81 +213,6 @@ if { [string compare [check_accounting_admin_level] "Administrator"] } {
 	exit 0
 }
 
-#
-# Identify the user and his current default account
-#
-set acct_name ""
-set user_name ""
-spawn $bin_id -u -n
-expect {
-	 -re "($alpha_numeric_under)" {
-		set user_name $expect_out(1,string)
-		exp_continue
-	}
-	eof {
-		wait
-	}
-}
-set s_pid [spawn $sacctmgr show user $user_name]
-expect {
-	-re "$user_name *($alpha_numeric_under)" {
-		set acct_name $expect_out(1,string)
-		exp_continue
-	}
-	timeout {
-		send_user "FAILURE: sacctmgr add not responding\n"
-		slow_kill $s_pid
-		exit 1
-	}
-	eof {
-		wait
-	}
-}
-
-#
-# Use sacctmgr to add an account
-#
-set aamatches 0
-set sadd_pid [spawn $sacctmgr -i add account $test_acct]
-expect {
-	-re "Adding Account" {
-		incr aamatches
-		exp_continue
-	}
-	-re "Nothing new added" {
-		send_user "\nWARNING: vestigial account $test_acct found\n"
-		incr aamatches
-		exp_continue
-	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr add not responding\n"
-		slow_kill $sadd_pid
-		set exit_code 1
-	}
-	eof {
-		wait
-	}
-}
-if {$aamatches != 1} {
-	send_user "\nFAILURE:  sacctmgr had a problem adding account.\n"
-	exit 1
-}
-
-#
-# Add self to this new account
-#
-set sadd_pid [spawn $sacctmgr -i create user name=$user_name account=$test_acct]
-expect {
-	 timeout {
-		send_user "\nFAILURE: sacctmgr add not responding\n"
-		slow_kill $sadd_pid
-		set exit_code 1
-	}
-	eof {
-		wait
-	}
-}
-
 #
 # Delete left-over input script files
 #
@@ -293,7 +223,7 @@ exec $bin_rm -f $file_in3
 #
 # Build input script file - to create original associations
 #
-exec echo "$clu - $cl1:$fs=$fs6:$gm=$gm6:$gc=$gc6:$gj=$gj6:$gn=$gn6:$gs=$gs6:$gw=$gw6:$mm=$mm6:$mc=$mc6:$mj=$mj6:$mn=$mn6:$ms=$ms6:$mw=$mw6:$qs=$qs1"     >>$file_in
+exec echo "$clu - $cl1:$class=$class1:$fs=$fs6:$gm=$gm6:$gc=$gc6:$gj=$gj6:$gn=$gn6:$gs=$gs6:$gw=$gw6:$mm=$mm6:$mc=$mc6:$mj=$mj6:$mn=$mn6:$ms=$ms6:$mw=$mw6:$qs=$qs1"     >>$file_in
 exec echo "$par - $roo"     >>$file_in
 exec echo "$acc - $nm1:$dsc=$ds1:$org=$or1:$fs=$fs5:$gm=$gm5:$gc=$gc5:$gj=$gj5:$gn=$gn5:$gs=$gs5:$gw=$gw5:$mm=$mm5:$mc=$mc5:$mj=$mj5:$mn=$mn5:$ms=$ms5:$mw=$mw5:$qs=$qs1"     >>$file_in
 exec echo "$acc - $nm2:$dsc=$ds2:$org=$or2:$fs=$fs4:$gm=$gm4:$gc=$gc4:$gj=$gj4:$gn=$gn4:$gs=$gs4:$gw=$gw4:$mm=$mm4:$mc=$mc4:$mj=$mj4:$mn=$mn4:$ms=$ms4:$mw=$mw4:$qs=$qs1"     >>$file_in
@@ -307,7 +237,7 @@ exec echo "$usr - $us2:$coo=$nm3:$dac=$nm2:$fs=$fs2:$mm=$mm1:$mc=$mc1:$mj=$mj1:$
 #
 # Second input file - to modify and add associations to the original 
 #
-exec echo "$clu - $cl1"     >>$file_in2
+exec echo "$clu - $cl1:$class=$class2"     >>$file_in2
 exec echo "$par - $roo"     >>$file_in2
 exec echo "$acc - $nm1"     >>$file_in2
 exec echo "$acc - $nm3:$dsc=$ds1:$org=$or1:$fs=$fs5:$gm=$gm5:$gc=$gc5:$gj=$gj5:$gn=$gn5:$gs=$gs5:$gw=$gw5:$mm=$mm5:$mc=$mc5:$mj=$mj5:$mn=$mn5:$ms=$ms5:$mw=$mw5:$qs=$qs1"     >>$file_in2
@@ -324,7 +254,7 @@ exec echo "$usr - $us3:$dac=$nm2:$fs=$fs2:$mm=$mm1:$mc=$mc1:$mj=$mj1:$mn=$mn1:$m
 #
 # Third input file - to replace all previous
 #
-exec echo "$clu - $cl1:$fs=$fs6:$gm=$gm6:$gc=$gc6:$gj=$gj6:$gn=$gn6:$gs=$gs6:$gw=$gw6:$mm=$mm6:$mc=$mc6:$mj=$mj6:$mn=$mn6:$ms=$ms6:$mw=$mw6:$qs=$qs1"    >>$file_in3
+exec echo "$clu - $cl1:$class=$class3:$fs=$fs6:$gm=$gm6:$gc=$gc6:$gj=$gj6:$gn=$gn6:$gs=$gs6:$gw=$gw6:$mm=$mm6:$mc=$mc6:$mj=$mj6:$mn=$mn6:$ms=$ms6:$mw=$mw6:$qs=$qs1"    >>$file_in3
 exec echo "$par - $roo"    >>$file_in3
 exec echo "$acc - $nm1:$dsc=$ds1:$org=$or1:$fs=$fs5:$gm=$gm5:$gc=$gc5:$gj=$gj5:$gn=$gn5:$gs=$gs5:$gw=$gw5:$mm=$mm5:$mc=$mc5:$mj=$mj5:$mn=$mn5:$ms=$ms5:$mw=$mw5:$qs=$qs1"    >>$file_in3
 exec echo "$acc - $nm3:$dsc=$ds3:$org=$or3:$fs=$fs5:$gm=$gm5:$gc=$gc5:$gj=$gj5:$gn=$gn5:$gs=$gs5:$gw=$gw5:$mm=$mm5:$mc=$mc5:$mj=$mj5:$mn=$mn5:$ms=$ms5:$mw=$mw5:$qs=$qs1"    >>$file_in3
@@ -339,20 +269,6 @@ exec echo "$par - $nm3"    >>$file_in3
 exec echo "$usr - $us2:$coo=$nm3:$dac=$nm1:$fs=$fs3:$mm=$mm3:$mc=$mc3:$mj=$mj3:$mn=$mn3:$ms=$ms3:$mw=$mw3:$qs=$qs1"    >>$file_in3
 exec echo "$usr - $us3:$dac=$nm1:$fs=$fs3:$mm=$mm3:$mc=$mc3:$mj=$mj3:$mn=$mn3:$ms=$ms3:$mw=$mw3:$qs=$qs1"    >>$file_in3
 
-#
-# Check accounting config and bail if not found.
-#
-if { [test_account_storage] == 0 } {
-	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
-	exit 0
-}
-
-if { [string compare [check_accounting_admin_level] "Administrator"] } {
-	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
-	exit 0
-}
-
-
 #
 # Use sacctmgr to remove the test cluster
 #
@@ -601,7 +517,11 @@ expect {
 	        send_user "FAILURE: there was a problem with the request\n"
 	    	incr exit_code 1
 	}
-	-re "(sacctmgr: For cluster $cl1)" {
+	-re "For cluster $cl1" {
+		incr matches
+		exp_continue
+	}
+	"Classification: $class1" {
 		incr matches
 		exp_continue
 	}
@@ -615,7 +535,7 @@ expect {
 	}
 }
 
-if {$matches != 1} {
+if {$matches != 2} {
 	send_user "\nFAILURE:  File load 1 incorrect with only $matches.\n"
 	incr exit_code 1
 }
@@ -721,7 +641,12 @@ expect {
 	        send_user "FAILURE: there was a problem with the request\n"
 	    	incr exit_code 1
 	}
-	-re "(sacctmgr: For cluster $cl1)" {
+	-re "For cluster $cl1" {
+		incr matches
+		exp_continue
+	}
+	
+	"$class1 -> $class2" {
 		incr matches
 		exp_continue
 	}
@@ -735,8 +660,8 @@ expect {
 	}
 }
 
-if {$matches != 1} {
-	send_user "\nFAILURE:  File load 1 incorrect with only $matches.\n"
+if {$matches != 2} {
+	send_user "\nFAILURE:  File load 2 incorrect with only $matches.\n"
 	incr exit_code 1
 }
 
@@ -857,7 +782,11 @@ expect {
 	        send_user "FAILURE: there was a problem with the request\n"
 	    	incr exit_code 1
 	}
-	-re "(sacctmgr: For cluster $cl1)" {
+	-re "For cluster $cl1" {
+		incr matches
+		exp_continue
+	}
+	"Classification: $class3" {
 		incr matches
 		exp_continue
 	}
@@ -871,8 +800,8 @@ expect {
 	}
 }
 
-if {$matches != 1} {
-	send_user "\nFAILURE:  File load 1 incorrect with only $matches.\n"
+if {$matches != 2} {
+	send_user "\nFAILURE:  File load 3 incorrect with only $matches.\n"
 	incr exit_code 1
 }
 
@@ -992,30 +921,6 @@ exec $bin_rm -f $file_in
 exec $bin_rm -f $file_in2
 exec $bin_rm -f $file_in3
 
-#
-# Use sacctmgr to delete the test account
-#
-set damatches 0
-set sadel_pid [spawn $sacctmgr -i delete account $test_acct]
-expect {
-	-re "Deleting account" {
-		incr damatches
-		exp_continue
-	}
-	timeout {
-		send_user "\nFAILURE: sacctmgr delete not responding\n"
-		slow_kill $sadel_pid
-		set exit_code 1
-	}
-	eof {
-		wait
-	}
-}
-if {$damatches != 1} {
-	send_user "\nFAILURE: sacctmgr had a problem deleting account\n"
-	set exit_code 1
-}
-
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 } else {
diff --git a/testsuite/expect/test21.23 b/testsuite/expect/test21.23
index 230a678aef7f4608fe3ffd15b17d62f4f8792fca..30df0e2679cd1e7d8b4055867edd68b3d6408111 100755
--- a/testsuite/expect/test21.23
+++ b/testsuite/expect/test21.23
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -46,6 +47,10 @@ if { [test_account_storage] == 0 } {
 	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
 	exit 0
 }
+if { [test_track_wckey] == 0} {
+	send_user "\nWARNING: This test can't be run without TrackWCKey being set\n"
+	exit 0
+}
 
 if { [string compare [check_accounting_admin_level] "Administrator"] } {
 	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME set admin=admin.\n"
@@ -133,7 +138,6 @@ expect {
 # Spawn a job via salloc using this wckey
 #
 set job_id     0
-set have_wckey 0
 set matches    0
 set timeout    $max_job_delay
 spawn $salloc -N1 --account=$test_acct --wckey=$wk
@@ -145,14 +149,9 @@ expect {
 		exp_continue
 	}
 	-re "WCKey=$wk" {
-		incr have_wckey
 		incr matches
 		exp_continue
 	}
-	-re "WCKey=" {
-		incr have_wckey
-		exp_continue
-	}
 	timeout {
 		send_user "\nFAILURE: salloc not responding\n"
 		set exit_code 1
@@ -161,11 +160,6 @@ expect {
 		wait
 	}
 }
-if {$have_wckey == 0} {
-	send_user "\nWARNING: TrackWCKey is disabled, can not continue test\n"
-	exec $sacctmgr -i delete account $test_acct
-	exit $exit_code
-}
 
 spawn $sacct -j $job_id --fields=wckey
 	expect {
diff --git a/testsuite/expect/test21.24 b/testsuite/expect/test21.24
index b20d73474fb079449e4e625a5ea9e7a9be76adbf..29eac34e2fdbdaf9ac067164e2ae852f33d1d409 100755
--- a/testsuite/expect/test21.24
+++ b/testsuite/expect/test21.24
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -248,8 +249,8 @@ if { $exit_code } {
 }
 
 #add cluster
-#name QOS Fairshare GrpCPUMins GrpCPUs GrpJobs GrpNodes GrpSubmitJobs GrpWall MaxCPUMins MaxCPUs MaxJobs MaxNodes MaxSubmitJobs MaxWallDurationPerJob
-incr exit_code [add_cluster "$tc3" "$qs2" "$fs6" "$gm6" "$gc6" "$gj6" "$gn6" "$gs6" "$gw6" "$mm6" "$mc6" "$mj6" "$mn6" "$ms6" "$mw6"]
+#name QOS Fairshare GrpCPUs GrpJobs GrpNodes GrpSubmitJobs MaxCPUMins MaxCPUs MaxJobs MaxNodes MaxSubmitJobs MaxWallDurationPerJob
+incr exit_code [add_cluster "$tc3" "$qs2" "$fs6" "$gc6" "$gj6" "$gn6" "$gs6" "$mm6" "$mc6" "$mj6" "$mn6" "$ms6" "$mw6"]
 if { $exit_code } {
 	remove_user "" "" "$us1,$us2,$us3,$us4"
 	remove_acct "" "$nm1,$nm2,$nm3,$nm4"
@@ -374,7 +375,7 @@ expect {
 	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
 	    	incr exit_code 1
 	}
-	-re "($clu - $cl3:$fs=$fs6:$gm=$gm6:$gc=$gc6:$gj=$gj6:$gn=$gn6:$gs=$gs6:$gw=$gw6:$mp=$mm6:$mu=$mc6:$mj=$mj6:$mnj=$mn6:$ms=$ms6:$md=$mw6:$qs='$qs2')" {
+	-re "($clu - $cl3:$fs=$fs6:$gc=$gc6:$gj=$gj6:$gn=$gn6:$gs=$gs6:$mp=$mm6:$mu=$mc6:$mj=$mj6:$mnj=$mn6:$ms=$ms6:$md=$mw6:$qs='$qs2')" {
 		send_user "\nmatch 1\n"
 		incr matches
 		exp_continue
diff --git a/testsuite/expect/test21.25 b/testsuite/expect/test21.25
new file mode 100755
index 0000000000000000000000000000000000000000..4cfca560f7147c87691b3586dad54693032ea6c8
--- /dev/null
+++ b/testsuite/expect/test21.25
@@ -0,0 +1,94 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          sacctmgr show config
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "21.25"
+set exit_code   0
+
+print_header $test_id
+
+#
+# Use sacctmgr to create a cluster
+#
+set slurm_conf_matches    0
+set slurmdbd_conf_matches 0
+spawn $sacctmgr show config
+expect {
+	-re "You are not running a supported accounting_storage plugin" {
+		set slurm_conf_matches    2
+		set slurmdbd_conf_matches 2
+		exp_continue
+	}
+	-re "AccountingStorageType *= *accounting_storage/slurmdbd" {
+		incr slurm_conf_matches
+		exp_continue
+	}
+	-re "AccountingStorageType" {
+		incr slurm_conf_matches
+		set slurmdbd_conf_matches 2
+		exp_continue
+	}
+	-re "SLURM_CONF" {
+		incr slurm_conf_matches
+		exp_continue
+	}
+	-re "DbdHost" {
+		incr slurmdbd_conf_matches
+		exp_continue
+	}
+	-re "SLURMDBD_CONF" {
+		incr slurmdbd_conf_matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr add not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$slurm_conf_matches != 2} {
+	send_user "\nFAILURE:  sacctmgr failed to show slurm_conf values\n"
+	set exit_code 1
+}
+if {$slurmdbd_conf_matches != 2} {
+	send_user "\nFAILURE:  sacctmgr failed to show slurmdbd configuration\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+
+exit $exit_code
diff --git a/testsuite/expect/test21.3 b/testsuite/expect/test21.3
index 55086164a51d695a97b48ad7fc42312020fa3dc5..ed8d3bcc4ef40eca813ab8702bb9d288924316c8 100755
--- a/testsuite/expect/test21.3
+++ b/testsuite/expect/test21.3
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test21.4 b/testsuite/expect/test21.4
index bd7ecca81d1d15e815628a10489d9cdcad59a88c..8df35976264173d0366e4d584c65413471a8289d 100755
--- a/testsuite/expect/test21.4
+++ b/testsuite/expect/test21.4
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test21.5 b/testsuite/expect/test21.5
index 4f83bef5e64d813fbe2129fe8c44461eed6f5b70..e2351232809caebbf31e86ad809cdb7cdfe00b2d 100755
--- a/testsuite/expect/test21.5
+++ b/testsuite/expect/test21.5
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -44,12 +45,10 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set gm		GrpCPUMins
 set gc		GrpCPUs
 set gj		GrpJobs
 set gs		GrpSubmitJobs
 set gn		GrpNodes
-set gw		GrpWall
 set mm		MaxCPUMins
 set mc		MaxCPUs
 set mj		MaxJobs
@@ -59,12 +58,10 @@ set mw		MaxWall
 set clu		cluster
 set tc1		tcluster1
 set fs1		2500
-set gm1		1000000
 set gc1		50
 set gj1		100
 set gs1		500
 set gn1		300
-set gw1		00:45:00
 set mc1		100
 set mm1		100000
 set mj1		500
@@ -152,8 +149,8 @@ set my_pid [spawn $sacctmgr -i $del $clu $tc1]
 #
 # Use sacctmgr to create a cluster
 #
-set sadd_pid [spawn $sacctmgr -i add $clu $nams=$tc1 $fs=$fs1 $gm=$gm1 \
-$gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+set sadd_pid [spawn $sacctmgr -i add $clu $nams=$tc1 $fs=$fs1 \
+$gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $mc=$mc1 $mm=$mm1 \
 $mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
 expect {
 	-re "privilege to preform this action" {
@@ -176,10 +173,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gm *= $gm1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$gc *= $gc1" {
 		incr amatches
 		exp_continue
@@ -196,10 +189,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gw *= $gw1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$mm *= $mm1" {
 		incr amatches
 		exp_continue
@@ -237,7 +226,7 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 16} {
+if {$amatches != 14} {
 	send_user "\nFAILURE:  sacctmgr had a problem adding clusters got $amatches\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test21.6 b/testsuite/expect/test21.6
index b738004445c03a26997823c58cad35b9460a070d..a88aa7a8e1328e21760c3f410316a005bc3f3702 100755
--- a/testsuite/expect/test21.6
+++ b/testsuite/expect/test21.6
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -44,12 +45,10 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set gm		GrpCPUMins
 set gc		GrpCPUs
 set gj		GrpJobs
 set gs		GrpSubmitJobs
 set gn		GrpNodes
-set gw		GrpWall
 set mm		MaxCPUMins
 set mc		MaxCPUs
 set mj		MaxJobs
@@ -61,12 +60,10 @@ set tc1		tcluster1
 set tc2		tcluster2
 set tc3		tcluster3
 set fs1		2500
-set gm1		1000000
 set gc1		50
 set gj1		100
 set gs1		500
 set gn1		300
-set gw1		00:45:00
 set mc1		100
 set mm1		100000
 set mj1		500
@@ -156,7 +153,7 @@ set my_pid [spawn $sacctmgr -i $del $clu $tc1,$tc2,$tc3]
 # Use sacctmgr to create a cluster
 #
 set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1 \
-$gm=$gm1 $gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $mc=$mc1 $mm=$mm1 \
 $mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
 
 expect {
@@ -188,10 +185,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gm *= $gm1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$gc *= $gc1" {
 		incr amatches
 		exp_continue
@@ -208,10 +201,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gw *= $gw1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$mm *= $mm1" {
 		incr amatches
 		exp_continue
@@ -258,7 +247,7 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 20} {
+if {$amatches != 18} {
 	send_user "\nFAILURE:  sacctmgr had a problem adding clusters got $amatches\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test21.7 b/testsuite/expect/test21.7
index 9e61b632dfe6dbcb04039e00a6f9a5eef72653ca..09764c2ae528f7b3f9939c5ab6adc3f2ae7708ab 100755
--- a/testsuite/expect/test21.7
+++ b/testsuite/expect/test21.7
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -44,12 +45,10 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set gm		GrpCPUMins
 set gc		GrpCPUs
 set gj		GrpJobs
 set gs		GrpSubmitJobs
 set gn		GrpNodes
-set gw		GrpWall
 set mm		MaxCPUMins
 set mc		MaxCPUs
 set mj		MaxJobs
@@ -61,12 +60,10 @@ set tc1		tcluster1
 set tc2		tcluster2
 set tc3		tcluster3
 set fs1		2500
-set gm1		1000000
 set gc1		50
 set gj1		100
 set gs1		500
 set gn1		300
-set gw1		00:45:00
 set mc1		100
 set mm1		100000
 set mj1		500
@@ -95,7 +92,7 @@ if { [test_account_storage] == 0 } {
 # Use sacctmgr to create a cluster
 #
 set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1  \
-$gm=$gm1 $gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $mc=$mc1 $mm=$mm1 \
 $mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
 expect {
 	-re "privilege to preform this action" {
@@ -126,10 +123,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gm *= $gm1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$gc *= $gc1" {
 		incr amatches
 		exp_continue
@@ -146,10 +139,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gw *= $gw1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$mm *= $mm1" {
 		incr amatches
 		exp_continue
@@ -196,7 +185,7 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 20} {
+if {$amatches != 18} {
 	send_user "\nFAILURE:  sacctmgr had a problem adding clusters\n"
 	set exit_code 1
 }
@@ -208,7 +197,7 @@ if { ![check_acct_associations] } {
 #
 # Use sacctmgr to list the addition of cluster
 #
-set slist_pid [spawn $sacctmgr $lis $clu format=$fs,$gm,$gc,$gj,$gs,$gn,$gw,$mm,$mc,$mj,$ms,$mn,$mw ]
+set slist_pid [spawn $sacctmgr $lis $clu format=$fs,$gc,$gj,$gs,$gn,$mm,$mc,$mj,$ms,$mn,$mw ]
 expect {
 	-re "Cluster" {
 		incr lmatches
@@ -218,7 +207,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
+	-re "$fs1 *$gc1 *$gj1 *$gs1 *$gn1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
@@ -226,7 +215,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
+	-re "$fs1 *$gc1 *$gj1 *$gs1 *$gn1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
@@ -234,7 +223,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
+	-re "$fs1 *$gc1 *$gj1 *$gs1 *$gn1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
diff --git a/testsuite/expect/test21.8 b/testsuite/expect/test21.8
index 05410591e8c5f37af0f8d89e0adb95e6fbbd238b..599bcb23b18ab3bf0108b76f19a72a723ad9ae22 100755
--- a/testsuite/expect/test21.8
+++ b/testsuite/expect/test21.8
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -45,12 +46,10 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set gm		GrpCPUMins
 set gc		GrpCPUs
 set gj		GrpJobs
 set gs		GrpSubmitJobs
 set gn		GrpNodes
-set gw		GrpWall
 set mm		MaxCPUMins
 set mc		MaxCPUs
 set mj		MaxJobs
@@ -63,24 +62,20 @@ set tc2		tcluster2
 set tc3		tcluster3
 set fs1		2500
 set fs2		1375
-set gm1		1000
 set gc1		20
 set gj1		100
 set gs1		300
 set gn1		100
-set gw1		00:45:00
 set mc1		200
 set mm1		100000
 set mj1		500
 set ms1		400
 set mn1		200
 set mw1		01:00:00
-set gm2		2000
 set gc2		50
 set gj2		200
 set gs2		400
 set gn2		150
-set gw2		00:45:00
 set mc2		100
 set mm2		20000
 set mj2		600
@@ -109,7 +104,7 @@ if { [test_account_storage] == 0 } {
 # Use sacctmgr to create a cluster
 #
 set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1  \
-$gm=$gm1 $gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $mc=$mc1 $mm=$mm1 \
 $mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
 expect {
 	-re "privilege to preform this action" {
@@ -140,10 +135,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gm *= $gm1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$gc *= $gc1" {
 		incr amatches
 		exp_continue
@@ -160,10 +151,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gw *= $gw1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$mm *= $mm1" {
 		incr amatches
 		exp_continue
@@ -210,7 +197,7 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 20} {
+if {$amatches != 18} {
 	send_user "\nFAILURE:  sacctmgr had a problem adding clusters\n"
 	set exit_code 1
 }
@@ -223,7 +210,7 @@ if { ![check_acct_associations] } {
 # Use sacctmgr to modify one cluster
 #
 set smod_pid [spawn $sacctmgr $mod $clu set $fs=$fs2   \
-$gm=$gm2 $gc=$gc2 $gj=$gj2 $gn=$gn2 $gs=$gs2 $gw=$gw2 $mc=$mc2 $mm=$mm2 \
+$gc=$gc2 $gj=$gj2 $gn=$gn2 $gs=$gs2 $mc=$mc2 $mm=$mm2 \
 $mj=$mj2 $ms=$ms2 $mn=$mn2 $mw=$mw2 where $nams=$tc2]
 expect {
 	-re "Setting" {
@@ -238,10 +225,6 @@ expect {
 		incr mmatches
 		exp_continue
 	}
-	-re "$gm *= $gm2" {
-		incr mmatches
-		exp_continue
-	}
 	-re "$gc *= $gc2" {
 		incr mmatches
 		exp_continue
@@ -258,10 +241,6 @@ expect {
 		incr mmatches
 		exp_continue
 	}
-	-re "$gw *= $gw2" {
-		incr mmatches
-		exp_continue
-	}
 	-re "$mm *= $mm2" {
 		incr mmatches
 		exp_continue
@@ -305,7 +284,7 @@ expect {
 	}
 }
 
-if {$mmatches != 17} {
+if {$mmatches != 15} {
 	send_user "\nFAILURE:  sacctmgr had a problem modifying clusters\n"
 	set exit_code 1
 }
@@ -317,7 +296,7 @@ if { ![check_acct_associations] } {
 #
 # Use sacctmgr to list the addition of cluster
 #
-set slist_pid [spawn $sacctmgr $lis $clu format=$fs,$gm,$gc,$gj,$gs,$gn,$gw,$mm,$mc,$mj,$ms,$mn,$mw]
+set slist_pid [spawn $sacctmgr $lis $clu format=$fs,$gc,$gj,$gs,$gn,$mm,$mc,$mj,$ms,$mn,$mw]
 expect {
 	-re "Cluster" {
 		incr lmatches
@@ -327,7 +306,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
+	-re "$fs1 *$gc1 *$gj1 *$gs1 *$gn1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
@@ -335,7 +314,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs2 *$gm2 *$gc2 *$gj2 *$gs2 *$gn2 *$gw2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
+	-re "$fs2 *$gc2 *$gj2 *$gs2 *$gn2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
 		incr lmatches
 		exp_continue
 	}
@@ -343,7 +322,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs1 *$gm1 *$gc1 *$gj1 *$gs1 *$gn1 *$gw1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
+	-re "$fs1 *$gc1 *$gj1 *$gs1 *$gn1 *$mm1 *$mc1 *$mj1 *$ms1 *$mn1 *$mw1" {
 		incr lmatches
 		exp_continue
 	}
diff --git a/testsuite/expect/test21.9 b/testsuite/expect/test21.9
index 372ba5c44b40917eaba1ab79611a18a50472c1ec..d4d031ccf776868a08fceb69e66b8684f331f081 100755
--- a/testsuite/expect/test21.9
+++ b/testsuite/expect/test21.9
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Joseph Donaghy <donaghy1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -45,12 +46,10 @@ set mod		modify
 set nams	Names
 set nam		Name
 set fs		Fairshare
-set gm		GrpCPUMins
 set gc		GrpCPUs
 set gj		GrpJobs
 set gs		GrpSubmitJobs
 set gn		GrpNodes
-set gw		GrpWall
 set mm		MaxCPUMins
 set mc		MaxCPUs
 set mj		MaxJobs
@@ -63,24 +62,20 @@ set tc2		tcluster2
 set tc3		tcluster3
 set fs1		2500
 set fs2		1375
-set gm1		1000
 set gc1		20
 set gj1		100
 set gs1		300
 set gn1		100
-set gw1		00:45:00
 set mc1		200
 set mm1		100000
 set mj1		500
 set ms1		400
 set mn1		200
 set mw1		01:00:00
-set gm2		2000
 set gc2		50
 set gj2		200
 set gs2		400
 set gn2		150
-set gw2		00:45:00
 set mc2		100
 set mm2		20000
 set mj2		600
@@ -109,7 +104,7 @@ if { [test_account_storage] == 0 } {
 # Use sacctmgr to create a cluster
 #
 set sadd_pid [spawn $sacctmgr $add $clu $nams=$tc1,$tc2,$tc3 $fs=$fs1   \
-$gm=$gm1 $gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $gw=$gw1 $mc=$mc1 $mm=$mm1 \
+$gc=$gc1 $gj=$gj1 $gn=$gn1 $gs=$gs1 $mc=$mc1 $mm=$mm1 \
 $mj=$mj1 $ms=$ms1 $mn=$mn1 $mw=$mw1]
 expect {
 	-re "privilege to preform this action" {
@@ -140,10 +135,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gm *= $gm1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$gc *= $gc1" {
 		incr amatches
 		exp_continue
@@ -160,10 +151,6 @@ expect {
 		incr amatches
 		exp_continue
 	}
-	-re "$gw *= $gw1" {
-		incr amatches
-		exp_continue
-	}
 	-re "$mm *= $mm1" {
 		incr amatches
 		exp_continue
@@ -210,7 +197,7 @@ if {$access_err != 0} {
 	send_user "\nWARNING: not authorized to perform this test\n"
 	exit $exit_code
 }
-if {$amatches != 20} {
+if {$amatches != 18} {
 	send_user "\nFAILURE:  sacctmgr had a problem adding clusters\n"
 	set exit_code 1
 }
@@ -223,7 +210,7 @@ if { ![check_acct_associations] } {
 # Use sacctmgr to modify one cluster
 #
 set smod_pid [spawn $sacctmgr $mod $clu set $fs=$fs2    \
-$gm=$gm2 $gc=$gc2 $gj=$gj2 $gn=$gn2 $gs=$gs2 $gw=$gw2 $mc=$mc2 $mm=$mm2 \
+$gc=$gc2 $gj=$gj2 $gn=$gn2 $gs=$gs2 $mc=$mc2 $mm=$mm2 \
 $mj=$mj2 $ms=$ms2 $mn=$mn2 $mw=$mw2 where $nams=$tc1,$tc2,$tc3]
 expect {
 	-re "Setting" {
@@ -238,10 +225,6 @@ expect {
 		incr mmatches
 		exp_continue
 	}
-	-re "$gm *= $gm2" {
-		incr mmatches
-		exp_continue
-	}
 	-re "$gc *= $gc2" {
 		incr mmatches
 		exp_continue
@@ -258,10 +241,6 @@ expect {
 		incr mmatches
 		exp_continue
 	}
-	-re "$gw *= $gw2" {
-		incr mmatches
-		exp_continue
-	}
 	-re "$mm *= $mm2" {
 		incr mmatches
 		exp_continue
@@ -305,7 +284,7 @@ expect {
 	}
 }
 
-if {$mmatches != 17} {
+if {$mmatches != 15} {
 	send_user "\nFAILURE:  sacctmgr had a problem modifying clusters\n"
 	set exit_code 1
 }
@@ -317,7 +296,7 @@ if { ![check_acct_associations] } {
 #
 # Use sacctmgr to list the addition of cluster
 #
-set slist_pid [spawn $sacctmgr $lis $clu format=$fs,$gm,$gc,$gj,$gs,$gn,$gw,$mm,$mc,$mj,$ms,$mn,$mw]
+set slist_pid [spawn $sacctmgr $lis $clu format=$fs,$gc,$gj,$gs,$gn,$mm,$mc,$mj,$ms,$mn,$mw]
 expect {
 	-re "Cluster" {
 		incr lmatches
@@ -327,7 +306,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs2 *$gm2 *$gc2 *$gj2 *$gs2 *$gn2 *$gw2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
+	-re "$fs2 *$gc2 *$gj2 *$gs2 *$gn2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
 		incr lmatches
 		exp_continue
 	}
@@ -335,7 +314,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs2 *$gm2 *$gc2 *$gj2 *$gs2 *$gn2 *$gw2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
+	-re "$fs2 *$gc2 *$gj2 *$gs2 *$gn2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
 		incr lmatches
 		exp_continue
 	}
@@ -343,7 +322,7 @@ expect {
 		incr lmatches
 		exp_continue
 	}
-	-re "$fs2 *$gm2 *$gc2 *$gj2 *$gs2 *$gn2 *$gw2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
+	-re "$fs2 *$gc2 *$gj2 *$gs2 *$gn2 *$mm2 *$mc2 *$mj2 *$ms2 *$mn2 *$mw2" {
 		incr lmatches
 		exp_continue
 	}
diff --git a/testsuite/expect/test22.1 b/testsuite/expect/test22.1
new file mode 100755
index 0000000000000000000000000000000000000000..1811e8495301292eecac70eec9770d453a80eb0a
--- /dev/null
+++ b/testsuite/expect/test22.1
@@ -0,0 +1,1128 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test sreport cluster utilization.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Danny Auble <da@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals_accounting
+
+set test_id     "test22.1"
+set exit_code   0
+set matches     0
+set not_support 0
+set sql_in     "$test_id-in.sql"
+set sql_rem    "$test_id-rem.sql"
+set sql_out    "$test_id-out.sql"
+set cluster    [format "%s%s" $test_id "clus"]
+set account1   [format "%s%s" $test_id "acct1"]
+set account2   [format "%s%s" $test_id "acct2"]
+set account3   [format "%s%s" $test_id "acct3"]
+set accounts   [format "%s,%s,%s" $account1 $account2 $account3]
+set user1      [format "%s%s" $test_id "user1"]
+set user2      [format "%s%s" $test_id "user2"]
+set users      [format "%s,%s" $user1 $user2]
+set node0      [format "%s%d" $cluster 0]
+set node1      [format "%s%d" $cluster 1]
+set node0_cpus 2
+set node1_cpus 2
+set cluster_cpus [expr $node0_cpus + $node1_cpus]
+set access_err  0
+set uid [get_my_uid]
+set gid [get_my_gid]
+set timeout 120
+
+print_header $test_id
+
+# Fri Thu Jan 31 00:00:00 2008 - Needed only for the 00:00:00 for timing purposes
+set midnight 1201766400
+
+# Mon Dec 31 23:00:00 2007
+set period_start 1199170800
+# Thu Jan 31 23:59:59 2008
+set period_end 1201852799 
+
+set start_str [timestamp -format %Y-%m-%dT%X -seconds $period_start]
+set end_str [timestamp -format %Y-%m-%dT%X -seconds $period_end]
+
+#job1
+set job1_start $period_start
+set job1_run 3900
+set job1_end [expr $job1_start+$job1_run]
+# This will give us the correct time we ran for
+set job1_diff_str [timestamp -format %X -seconds [expr $midnight+$job1_run]]
+set job1_start_str [timestamp -format %Y-%m-%dT%X -seconds $job1_start]
+set job1_end_str [timestamp -format %Y-%m-%dT%X -seconds $job1_end]
+set job1_nodes $node1
+set job1_cpus $node1_cpus
+set job1_alloc [expr $job1_run * $job1_cpus]
+set job1_acct $account1
+
+#job2
+#make job eligible an hour into the allocation
+set job2_elig [expr $period_start+3600]
+#start the job 65 minutes later so we can check reserved time
+set job2_start [expr $job2_elig+3900]
+#run for a day
+set job2_run 86400
+set job2_end [expr $job2_start+$job2_run]
+# This will give us the correct time we ran for
+set job2_diff_str "1-00:00:00"
+set job2_start_str [timestamp -format %Y-%m-%dT%X -seconds $job2_start]
+set job2_end_str [timestamp -format %Y-%m-%dT%X -seconds $job2_end]
+set job2_nodes [format "%s\[%s\]" $cluster "0-1"]
+set job2_cpus [expr $node0_cpus + $node1_cpus]
+set job2_alloc [expr $job2_run * $job2_cpus]
+set job2_acct $account3
+
+#job3
+#make job eligible an hour before the end of job2
+set job3_elig [expr $job2_end-3600]
+#start the job at the end of job2
+set job3_start $job2_end
+#run for 65 minutes
+set job3_run 3900
+set job3_end [expr $job3_start+$job3_run]
+# This will give us the correct time we ran for
+set job3_diff_str [timestamp -format %X -seconds [expr $midnight+$job1_run]]
+set job3_start_str [timestamp -format %Y-%m-%dT%X -seconds $job3_start]
+set job3_end_str [timestamp -format %Y-%m-%dT%X -seconds $job3_end]
+#run on just node0
+set job3_nodes $node0
+set job3_cpus $node0_cpus
+set job3_alloc [expr $job3_run * $job3_cpus]
+set job3_acct $account2
+
+set acct1_alloc $job1_alloc
+set acct3_alloc $job2_alloc
+set acct2_alloc [expr $acct3_alloc + $job3_alloc]
+set total_alloc [expr $job1_alloc + $job2_alloc + $job3_alloc]
+
+set size2_alloc [expr $job1_alloc + $job3_alloc]
+set size4_alloc $job3_alloc
+
+#node0 down
+set node0_down_start [expr $period_start+(60*45)]
+set node0_down_end [expr $period_start+(60*75)]
+
+#
+# Check accounting config and bail if not found.
+#
+if { [test_account_storage] == 0 } {
+	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
+	exit 0
+}
+
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME admin=admin.\n"
+	exit 0
+}
+
+remove_user "" "" $users
+remove_acct "" $accounts
+remove_cluster "$cluster"
+#add cluster
+incr exit_code [add_cluster "$cluster" "" "" "" "" "" "" "" "" "" "" "" ""]
+if { $exit_code } {
+ 	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+#add accounts
+incr exit_code [add_acct "$cluster" "" "$account1,$account2" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ]
+if { $exit_code } {
+	remove_acct "" "$account1,$account2"
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+#add accounts
+incr exit_code [add_acct "$cluster" "$account2" "$account3" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ]
+if { $exit_code } {
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+#add users
+incr exit_code [add_user "$cluster" $accounts $users "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" "" ]
+if { $exit_code } {
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+#get the user association ids for the jobs we plan to add
+set user1acct1 0
+set user1acct2 0
+set user1acct3 0
+set user2acct1 0
+set user2acct2 0
+set user2acct3 0
+set my_pid [eval spawn $sacctmgr -n -p list assoc users=$users account=$accounts cluster=$cluster format="User,account,id"]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$user1.$account1.($number)." {
+		set user1acct1 $expect_out(1,string)
+		exp_continue
+	}
+	-re "$user1.$account2.($number)." {
+		set user1acct2 $expect_out(1,string)
+		exp_continue
+	}
+	-re "$user1.$account3.($number)." {
+		set user1acct3 $expect_out(1,string)
+		exp_continue
+	}
+	-re "$user2.$account1.($number)." {
+		set user2acct1 $expect_out(1,string)
+		exp_continue
+	}
+	-re "$user2.$account2.($number)." {
+		set user2acct2 $expect_out(1,string)
+		exp_continue
+	}
+	-re "$user2.$account3.($number)." {
+		set user2acct3 $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr list associations not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {!$user1acct1 || !$user1acct2 || !$user1acct3 || !$user2acct1 || !$user2acct2|| !$user2acct3} {
+	send_user "\nFAILURE:  Didn't get one of the user associations $user1acct1 $user1acct2 $user1acct3 $user2acct1 $user2acct2 $user2acct3.\n"
+	incr exit_code 1
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+exec $bin_rm -f $sql_in
+set file [open $sql_in "w"]
+
+# DON'T MESS WITH THIS UNLESS YOU REALLY UNDERSTAND WHAT YOU ARE DOING!!!!!
+# THIS COULD SERIOUSLY MESS UP YOUR DATABASE IF YOU ALTER THIS INCORRECTLY
+# JUST A FRIENDLY REMINDER ;)
+
+# put in the cluster for back in the day before accounting was made here for us we are using 'Tue Jan  1 00:00:00 2008' = 1199174400 as the start
+puts $file "insert into cluster_event_table (node_name, cluster, cpu_count, period_start, period_end, reason) values"
+puts $file "('', '$cluster', $cluster_cpus, $period_start, $period_end, 'Cluster processor count')"
+
+#put a node down for 30 minutes starting at 45 minutes after the start to make sure our rollups work so we should get 15 minutes on one hour and 15 on the other
+puts $file ", ('$node0', '$cluster', $node0_cpus, $node0_down_start, $node0_down_end, 'down')"
+#puts $file ", ('$node1', '$cluster', $node1_cpus, $period_start, $period_end, 'down')"
+puts $file "on duplicate key update period_start=VALUES(period_start), period_end=VALUES(period_end);"
+
+#now we will put in a job running for an hour and 5 minutes
+puts $file "insert into job_table (jobid, associd, wckey, wckeyid, uid, gid, partition, blockid, cluster, account, eligible, submit, start, end, suspended, name, track_steps, state, comp_code, priority, req_cpus, alloc_cpus, nodelist, kill_requid, qos, deleted) values"
+puts $file "('65537', '$user1acct1', '', '0', '$uid', '$gid', 'debug', '', '$cluster', '$job1_acct', $job1_start, $job1_start, $job1_start, $job1_end, '0', 'test_job1', '0', '3', '0', '$job1_cpus', '$job1_cpus', '$job1_cpus', '$job1_nodes', '0', '0', '0')"
+puts $file ", ('65538', '$user2acct3', '', '0', '$uid', '$gid', 'debug', '', '$cluster', '$job2_acct', $job2_elig, $job2_elig, $job2_start, $job2_end, '0', 'test_job2', '0', '3', '0', '$job2_cpus', '$job2_cpus', '$job2_cpus', '$job2_nodes', '0', '0', '0')"
+puts $file ", ('65539', '$user1acct2', '', '0', '$uid', '$gid', 'debug', '', '$cluster', '$job3_acct', $job3_elig, $job3_elig, $job3_start, $job3_end, '0', 'test_job3', '0', '3', '0', '$job3_cpus', '$job3_cpus', '$job3_cpus', '$job3_nodes', '0', '0', '0')"
+puts $file "on duplicate key update id=LAST_INSERT_ID(id), eligible=VALUES(eligible), submit=VALUES(submit), start=VALUES(start), end=VALUES(end), associd=VALUES(associd), alloc_cpus=VALUES(alloc_cpus);";
+close $file
+
+exec $bin_rm -f $sql_rem
+set file [open $sql_rem "w"]
+
+puts $file "delete from cluster_event_table where cluster='$cluster';"
+puts $file "delete from job_table where cluster='$cluster';"
+puts $file "delete from cluster_day_usage_table where cluster='$cluster';"
+puts $file "delete from cluster_hour_usage_table where cluster='$cluster';"
+puts $file "delete from cluster_month_usage_table where cluster='$cluster';"
+puts $file "delete from assoc_day_usage_table where id=$user1acct1 || id=$user1acct2 || id=$user1acct3 || id=$user2acct1 || id=$user2acct2 || id=$user2acct3;"
+puts $file "delete from assoc_hour_usage_table where id=$user1acct1 || id=$user1acct2 || id=$user1acct3 || id=$user2acct1 || id=$user2acct2 || id=$user2acct3;"
+puts $file "delete from assoc_month_usage_table where id=$user1acct1 || id=$user1acct2 || id=$user1acct3 || id=$user2acct1 || id=$user2acct2 || id=$user2acct3;"
+close $file
+
+#
+# Use sacctmgr to load info
+#
+incr exit_code [archive_load $sql_in]
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+#
+# Use sacct to see if the job loaded
+#
+set matches 0
+set my_pid [eval spawn $sacct -p -C $cluster --fields=cluster,account,associd,start,end,elapsed --noheader --start=$start_str --end=$end_str]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$account1.$user1acct1.$job1_start_str.$job1_end_str.$job1_diff_str." {
+		send_user "got 1\n"
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account3.$user2acct3.$job2_start_str.$job2_end_str.$job2_diff_str." {
+		send_user "got 2\n"
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account2.$user1acct2.$job3_start_str.$job3_end_str.$job3_diff_str." {
+		send_user "got 3\n"
+		incr matches
+		exp_continue
+	}
+
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 3} {
+	send_user "\nFAILURE:  job wasn't loaded correctly with only $matches.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	#incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+
+#
+# Use sacctmgr to roll up that time period
+#
+set matches 0
+set my_pid [eval spawn $sacctmgr -i roll $start_str $end_str]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster" {
+		incr matches
+		exp_continue
+	}
+	-re "SUCCESS" {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 1} {
+	send_user "\nFAILURE:  sacctmgr wasn't able to roll data.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# Use sreport to get cluster usage for the first hour
+#
+
+# Mon Dec 31 23:00:00 2007
+set period_start 1199170800
+
+# Tue Jan 1 00:00:00 2008
+set period_end  1199174400
+set start_str [timestamp -format %Y-%m-%dT%X -seconds $period_start]
+set end_str [timestamp -format %Y-%m-%dT%X -seconds $period_end]
+
+set reported [expr ($period_end - $period_start) * $cluster_cpus]
+set down [expr ($period_end-$node0_down_start) * $node0_cpus] 
+set alloc_sec [expr ($period_end-$job1_start) * $node1_cpus]
+set resv 0
+set idle [expr $reported - ($down + $alloc_sec + $resv)]
+
+set down [format "%d\\\(%.2f%%\\\)" $down [expr double($down * 100)/$reported]] 
+set alloc [format "%d\\\(%.2f%%\\\)" $alloc_sec [expr double($alloc_sec * 100)/$reported]] 
+set resv [format "%d\\\(%.2f%%\\\)" $resv  [expr double($resv * 100)/$reported]]
+set idle [format "%d\\\(%.2f%%\\\)" $idle [expr double($idle * 100)/$reported]] 
+set reported [format "%d\\\(%.2f%%\\\)" $reported 100]
+
+send_user "\nTesting sreport for first hour\n"
+set matches 0
+set my_pid [eval spawn $sreport cluster utilization cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,idle,down,alloc,res,reported]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$idle.$down.$alloc.$resv.$reported." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 1} {
+	send_user "\nFAILURE:  sreport didn't give good info.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the UserUtilizationByAccount report
+set matches 0
+set my_pid [eval spawn $sreport cluster UserUtilizationByAccount cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,login,account,used]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$user1.$account1.$alloc." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 1} {
+	send_user "\nFAILURE:  sreport didn't give good info 2.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the AccountUtilizationByUser report
+set matches 0
+set my_pid [eval spawn $sreport cluster AccountUtilizationByUser cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,account,login,used]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.root..$alloc." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account1..$alloc." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account1.$user1.$alloc." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 3} {
+	send_user "\nFAILURE:  sreport didn't give good info 3.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the User Top report
+set matches 0
+set my_pid [eval spawn $sreport user top cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,account,login,used]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$account1.$user1.$alloc." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 1} {
+	send_user "\nFAILURE:  sreport didn't give good info 3.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the Job Size report
+set matches 0
+set my_pid [eval spawn $sreport job size grouping=2,4 cluster='$cluster' start=$start_str end=$end_str -tsec -p -n]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$account1.0.$alloc_sec.0." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account2.0.0.0." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 2} {
+	send_user "\nFAILURE:  sreport didn't give good info 4 $matches.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+
+# Use sreport to get cluster usage for the second hour
+#
+
+# Tue Jan 1 00:00:00 2008
+set period_start 1199174400
+
+# Tue Jan 1 01:00:00 2008
+set period_end  1199178000
+set start_str [timestamp -format %Y-%m-%dT%X -seconds $period_start]
+set end_str [timestamp -format %Y-%m-%dT%X -seconds $period_end]
+
+set reported [expr ($period_end - $period_start) * $cluster_cpus]
+set down [expr ($node0_down_end-$period_start) * $node0_cpus]
+set alloc_sec [expr ($job1_end-$period_start) * $job1_cpus]
+set resv [expr ($period_end - $job2_elig) * $job2_cpus]
+set idle [expr $reported - ($down + $alloc_sec + $resv)]
+# do the same logic inside the plugin to figure out the correct 
+# idle time and resv time
+if {$idle < 0 } {
+	set resv [expr $resv + $idle]
+	set idle 0
+	if {$resv < 0} {
+		set resv 0
+	}
+}
+set down [format "%d\\\(%.2f%%\\\)" $down [expr double($down * 100)/$reported]] 
+set alloc [format "%d\\\(%.2f%%\\\)" $alloc_sec [expr double($alloc_sec * 100)/$reported]] 
+set resv [format "%d\\\(%.2f%%\\\)" $resv  [expr double($resv * 100)/$reported]]
+set idle [format "%d\\\(%.2f%%\\\)" $idle [expr double($idle * 100)/$reported]] 
+set reported [format "%d\\\(%.2f%%\\\)" $reported 100]
+
+send_user "\nTesting sreport for second hour\n"
+set matches 0
+set my_pid [eval spawn $sreport cluster utilization cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,idle,down,alloc,res,reported]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$idle.$down.$alloc.$resv.$reported." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 1} {
+	send_user "\nFAILURE:  sreport didn't give good info 5 $matches.\n"
+	send_user "we are looking for $cluster.$idle.$down.$alloc.$resv.$reported.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the UserUtilizationByAccount report
+set matches 0
+set my_pid [eval spawn $sreport cluster UserUtilizationByAccount cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,login,account,used]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$user1.$account1.$alloc." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 1} {
+	send_user "\nFAILURE:  sreport didn't give good info 6.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the AccountUtilizationByUser report
+set matches 0
+set my_pid [eval spawn $sreport cluster AccountUtilizationByUser cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,account,login,used]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.root..$alloc." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account1..$alloc." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account1.$user1.$alloc." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 3} {
+	send_user "\nFAILURE:  sreport didn't give good info 7.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the User Top report
+set matches 0
+set my_pid [eval spawn $sreport user top cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,account,login,used]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$account1.$user1.$alloc." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 1} {
+	send_user "\nFAILURE:  sreport didn't give good info 8.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the Job Size report
+set matches 0
+set my_pid [eval spawn $sreport job size grouping=2,4 cluster='$cluster' start=$start_str end=$end_str -tsec -p -n]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$account1.0.$alloc_sec.0." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account2.0.0.0." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 2} {
+	send_user "\nFAILURE:  sreport didn't give good info 9 $matches.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# Use sreport to get cluster usage for the first 3 days
+#
+
+# Mon Dec 31 23:00:00 2007
+set period_start 1199170800
+
+# Tue Jan 3 00:00:00 2008
+set period_end 1199347200
+set start_str [timestamp -format %Y-%m-%dT%X -seconds $period_start]
+set end_str [timestamp -format %Y-%m-%dT%X -seconds $period_end]
+
+set reported [expr ($period_end - $period_start) * $cluster_cpus]
+set down [expr ($node0_down_end - $node0_down_start) * $node0_cpus]
+set alloc_sec [expr ($job1_end-$job1_start) * $job1_cpus]
+set alloc_sec [expr $alloc_sec + (($job2_end-$job2_start) * $job2_cpus)]
+set alloc_sec [expr $alloc_sec + (($job3_end-$job3_start) * $job3_cpus)]
+set resv [expr ($job2_start - $job2_elig) * $job2_cpus]
+set resv [expr $resv + (($job3_start - $job3_elig) * $job3_cpus)]
+# I didn't have time to do the correct math here so I am just putting in 9000 which should be the correct value of over commit
+set over 9000
+set resv [expr $resv - $over]
+set idle [expr $reported - ($down + $alloc_sec + $resv)]
+# do the same logic inside the plugin to figure out the correct 
+# idle time and resv time
+if {$idle < 0 } {
+	set resv [expr $resv + $idle]
+	set idle 0
+	if {$resv < 0} {
+		set resv 0
+	}
+}
+set down [format "%d\\\(%.2f%%\\\)" $down [expr double($down * 100)/$reported]] 
+set alloc [format "%d\\\(%.2f%%\\\)" $alloc_sec [expr double($alloc_sec * 100)/$reported]] 
+set resv [format "%d\\\(%.2f%%\\\)" $resv  [expr double($resv * 100)/$reported]]
+set idle [format "%d\\\(%.2f%%\\\)" $idle [expr double($idle * 100)/$reported]] 
+set over [format "%d\\\(%.2f%%\\\)" $over [expr double($over * 100)/$reported]] 
+
+set job1_alloc_str [format "%d\\\(%.2f%%\\\)" $job1_alloc [expr double($job1_alloc * 100)/$reported]] 
+set job2_alloc_str [format "%d\\\(%.2f%%\\\)" $job2_alloc [expr double($job2_alloc * 100)/$reported]] 
+set job3_alloc_str [format "%d\\\(%.2f%%\\\)" $job3_alloc [expr double($job3_alloc * 100)/$reported]] 
+set total_alloc_str [format "%d\\\(%.2f%%\\\)" $total_alloc [expr double($total_alloc * 100)/$reported]] 
+set acct1_alloc_str [format "%d\\\(%.2f%%\\\)" $acct1_alloc [expr double($acct1_alloc * 100)/$reported]] 
+set acct2_alloc_str [format "%d\\\(%.2f%%\\\)" $acct2_alloc [expr double($acct2_alloc * 100)/$reported]] 
+set acct3_alloc_str [format "%d\\\(%.2f%%\\\)" $acct3_alloc [expr double($acct3_alloc * 100)/$reported]] 
+set reported [format "%d\\\(%.2f%%\\\)" $reported 100]
+
+send_user "\nTesting sreport for 3 days\n"
+set matches 0
+set my_pid [eval spawn $sreport cluster utilization cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,idle,down,alloc,res,over,reported]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$idle.$down.$alloc.$resv.$over.$reported." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 1} {
+	send_user "\nFAILURE:  sreport didn't give good info 10 $matches.\n"
+	send_user "we are looking for $cluster.$idle.$down.$alloc.$resv.$over.$reported.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the UserUtilizationByAccount report
+set matches 0
+set my_pid [eval spawn $sreport cluster UserUtilizationByAccount cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,login,account,used]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$user2.$account3.$job2_alloc_str." {
+		send_user "got 2\n"
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$user1.$account1.$job1_alloc_str." {
+		send_user "got 1\n"
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$user1.$account2.$job3_alloc_str." {
+		send_user "got 3\n"
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 3} {
+	send_user "\nFAILURE:  sreport didn't give good info 11 $matches.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the AccountUtilizationByUser report
+set matches 0
+set my_pid [eval spawn $sreport cluster AccountUtilizationByUser cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,account,login,used]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.root..$total_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account1..$acct1_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account1.$user1.$job1_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account2..$acct2_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account2.$user1.$job3_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account3..$acct3_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account3.$user2.$job2_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 7} {
+	send_user "\nFAILURE:  sreport didn't give good info 11 $matches.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the User Top report
+set matches 0
+set my_pid [eval spawn $sreport user top cluster='$cluster' start=$start_str end=$end_str -tsecper -p -n format=cluster,account,login,used]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$account3.$user2.$job2_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account1.$user1.$job1_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account2.$user1.$job3_alloc_str." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 3} {
+	send_user "\nFAILURE:  sreport didn't give good info 12 $matches.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the Job Size report
+set matches 0
+set my_pid [eval spawn $sreport job size grouping=2,4 cluster='$cluster' start=$start_str end=$end_str -tsec -p -n]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$account1.0.$job1_alloc.0." {
+		incr matches
+		exp_continue
+	}
+	-re "$cluster.$account2.0.$job3_alloc.$job2_alloc." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 2} {
+	send_user "\nFAILURE:  sreport didn't give good info 13 $matches.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+# test the Job Size report down a level
+set matches 0
+set my_pid [eval spawn $sreport job size grouping=2,4 cluster='$cluster' account='$account2' start=$start_str end=$end_str -tsec -p -n]
+expect {
+	-re "There was a problem" {
+	        send_user "FAILURE: there was a problem with the sacctmgr command\n"
+	    	incr exit_code 1
+	}
+	-re "$cluster.$account3.0.0.$job2_alloc." {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 1} {
+	send_user "\nFAILURE:  sreport didn't give good info 14 $matches.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
+if {$exit_code == 0} {
+	incr exit_code [archive_load $sql_rem]
+	incr exit_code [remove_user "" "" $users]
+	incr exit_code [remove_acct "" $accounts]
+	incr exit_code [remove_cluster "$cluster"]
+	
+ 	exec $bin_rm -f $sql_in
+ 	exec $bin_rm -f $sql_rem
+        if {$exit_code == 0} {
+		send_user "\nSUCCESS\n"
+	} else {
+		send_user "\nFAILURE\n"
+	}
+} else {
+	send_user "\nFAILURE\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test22.2 b/testsuite/expect/test22.2
new file mode 100755
index 0000000000000000000000000000000000000000..4abcb74b11a9e1deb488da19e26b6041588d4e5c
--- /dev/null
+++ b/testsuite/expect/test22.2
@@ -0,0 +1,324 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM sreport functionality
+#          sreport h, n, p, P, t, V options.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2009 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "22.2"
+set exit_code   0
+set timeout 60
+print_header $test_id
+
+#
+# Check accounting config and bail if not found.
+#
+if { [test_account_storage] == 0 } {
+	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
+	exit 0
+}
+
+################################################################
+#
+# Proc: sreport_opt
+#
+# Purpose:  Pass sreport options and test
+#
+# Returns: Number of matches.
+#
+# Input: Switch options not requiring arguments
+#
+################################################################
+
+proc sreport_opt { soption } {
+	global number sreport
+	set debug       0
+	set exit_code   0
+	set matches     0
+	set not_support 0
+	send_user "$sreport -$soption \n"
+
+	if { $soption == "-help"|| $soption == "-usage" || $soption == "h" } {
+
+	spawn $sreport -$soption
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "sreport...OPTION.....COMMAND" {
+			if {$debug} {send_user "\nmatch1\n"}
+			incr matches
+			exp_continue
+		}
+		-re "Valid .OPTION. values are" {
+			if {$debug} {send_user "\nmatch2\n"}
+			incr matches
+			exp_continue
+		}
+		-re "Valid .COMMAND. values are" {
+			if {$debug} {send_user "\nmatch3\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sreport not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support == 1} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+
+	if {$matches != 3} {
+		send_user "\nFAILURE: sreport -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+	return $matches
+}
+
+	if { $soption == "-noheader" || $soption == "n" } {
+
+	spawn $sreport -$soption job sizes
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "Cluster|Account|0-49 cpus|50-249 cpus|250-499 cpus|500-999 cpus|1000 cpus|of cluster" {	
+			if {$debug} {send_user "\nmatch4\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sreport not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support == 1} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 0} {
+		send_user "\nFAILURE: sreport -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+	return $matches
+}
+
+	if { $soption == "-parsable" || $soption == "p" } {
+
+	spawn $sreport -$soption job sizes
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "Cluster\\|Account\\|0-49 cpus\\|50-249 cpus\\|250-499 cpus\\|" {
+			if {$debug} {send_user "\nmatch5\n"}
+			incr matches
+			exp_continue
+		}
+		-re "500-999 cpus\\|>= 1000 cpus\\|% of cluster\\|" {
+			if {$debug} {send_user "\nmatch6\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sreport not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	return $matches
+}
+
+	if { $soption == "-parsable2" || $soption == "P" } {
+
+	spawn $sreport -$soption job sizes
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "Cluster\\|Account\\|0-49 cpus\\|50-249 cpus\\|250-499 cpus\\|" {
+			if {$debug} {send_user "\nmatch7\n"}
+			incr matches
+			exp_continue
+		}
+		-re "500-999 cpus\\|>= 1000 cpus\\|% of cluster" {
+			if {$debug} {send_user "\nmatch8\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sreport not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support == 1} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+
+	if {$matches != 2} {
+		send_user "\nFAILURE: sreport -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "-version" || $soption == "V" } {
+
+	spawn $sreport -$soption
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "slurm $number.$number.$number" {
+			if {$debug} {send_user "\nmatch9\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sreport not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support == 1} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE: sreport -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+}
+
+################################################################
+
+set matches [sreport_opt h ]
+if {$matches != 3} {
+	send_user "\nFAILURE: sreport -h failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt -help ]
+if {$matches != 3} {
+	send_user "\nFAILURE: sreport --help failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt -usage ]
+if {$matches != 3} {
+	send_user "\nFAILURE: sreport --usage failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt n ]
+if {$matches != 0} {
+	send_user "\nFAILURE: sreport -n failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt -noheader ]
+if {$matches != 0} {
+	send_user "\nFAILURE: sreport --noheader failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt p ]
+if {$matches != 2} {
+	send_user "\nFAILURE: sreport -p failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt -parsable ]
+if {$matches != 2} {
+	send_user "\nFAILURE: sreport --parsable failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt P ]
+if {$matches != 2} {
+	send_user "\nFAILURE: sreport -P failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt -parsable2 ]
+if {$matches != 2} {
+	send_user "\nFAILURE: sreport --parsable2 failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt V ]
+if {$matches != 1} {
+	send_user "\nFAILURE: sreport -V failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sreport_opt -version ]
+if {$matches != 1} {
+	send_user "\nFAILURE: sreport --verbose failed ($matches)\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test23.1 b/testsuite/expect/test23.1
new file mode 100755
index 0000000000000000000000000000000000000000..4d1634eb73d169c968aa40e054936f444a9e62c2
--- /dev/null
+++ b/testsuite/expect/test23.1
@@ -0,0 +1,260 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test sstat h, e, usage and V options.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008 - 2009 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "23.1"
+set exit_code   0
+set matches     0
+set not_support 0
+
+print_header $test_id
+
+################################################################
+#
+# Proc: sstat_job
+#
+# Purpose:  Pass sstat option and test
+#
+# Returns: Number of matches.
+#
+# Input: sstat options not requiring arguments
+#
+################################################################
+
+proc sstat_job { soption } {
+	global number sstat
+	set debug	0
+	set exit_code   0
+	set matches     0
+	set not_support 0
+	send_user "sstat -$soption \n"
+
+	if { $soption == "e" || $soption == "-helpformat" } {
+
+	spawn $sstat -$soption 
+	expect {
+		-re "AveCPU *AvePages *AveRSS *AveVMSize" {
+			if {$debug} {send_user "\nmatch1\n"}
+			incr matches
+			exp_continue
+		}
+		-re "JobID *MaxPages *MaxPagesNode *MaxPagesTask" {
+			if {$debug} {send_user "\nmatch2\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxRSS *MaxRSSNode *MaxRSSTask *MaxVMSize" {
+			if {$debug} {send_user "\nmatch3\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxVMSizeNode *MaxVMSizeTask *MinCPU *MinCPUNode" {
+			if {$debug} {send_user "\nmatch4\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MinCPUTask *NTasks *SystemCPU *TotalCPU" {
+			if {$debug} {send_user "\nmatch5\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 5} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "h" || $soption == "-help" } {
+
+	spawn $sstat -$soption 
+	expect {
+		-re "sstat...OPTION" {
+			if {$debug} {send_user "\nmatch6\n"}
+			incr matches
+			exp_continue
+		}
+		-re "Valid..OPTION" {
+			if {$debug} {send_user "\nmatch7\n"}
+			incr matches
+			exp_continue
+		}
+		-re "-e, --helpformat" {
+			if {$debug} {send_user "\nmatch8\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 3} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "-usage" } {
+
+	spawn $sstat -$soption 
+	expect {
+		-re "Usage: sstat .options. -j .job..stepid." {
+			if {$debug} {send_user "\nmatch9\n"}
+			incr matches
+			exp_continue
+		}
+		-re "Use --help for help" {
+			if {$debug} {send_user "\nmatch10\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 2} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "V" || $soption == "-version" } {
+
+	spawn $sstat -$soption 
+	expect {
+		-re "slurm ($number).($number)." {
+			if {$debug} {send_user "\nmatch11\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$matches != 1} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+}
+################################################################
+
+set matches [sstat_job e ]
+if {$matches != 5} {
+	send_user "\nFAILURE: sstat -e failed ($matches)\n"
+	set exit_code 1
+	}	else	{
+		send_user "\nsstat -e test GOOD!\n"
+}
+
+set matches [sstat_job -helpformat ]
+if {$matches != 5} {
+	send_user "\nFAILURE: sstat --helpformat failed ($matches)\n"
+	set exit_code 1
+	}	else	{
+		send_user "\nsstat --helpformat test GOOD!\n"
+}
+
+set matches [sstat_job h ]
+if {$matches != 3} {
+	send_user "\nFAILURE: sstat -h failed ($matches)\n"
+	set exit_code 1
+	}	else	{
+		send_user "\nsstat -h test GOOD!\n"
+}
+
+set matches [sstat_job -help ]
+if {$matches != 3} {
+	send_user "\nFAILURE: sstat --help failed ($matches)\n"
+	set exit_code 1
+	}	else	{
+		send_user "\nsstat --help test GOOD!\n"
+}
+
+set matches [sstat_job -usage ]
+if {$matches != 2} {
+	send_user "\nFAILURE: sstat --usage failed ($matches)\n"
+	set exit_code 1
+	}	else	{
+		send_user "\nsstat --usage test GOOD!\n"
+}
+
+set matches [sstat_job V ]
+if {$matches != 1} {
+	send_user "\nFAILURE: sstat -V failed ($matches)\n"
+	set exit_code 1
+	}	else	{
+		send_user "\nsstat -V test GOOD!\n"
+}
+
+set matches [sstat_job -version ]
+if {$matches != 1} {
+	send_user "\nFAILURE: sstat --version failed ($matches)\n"
+	set exit_code 1
+	}	else	{
+		send_user "\nsstat --version test GOOD!\n"
+}
+
+
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test23.2 b/testsuite/expect/test23.2
new file mode 100755
index 0000000000000000000000000000000000000000..3618ffb84415dfdfc9016fe49bd32fedf4dcb8af
--- /dev/null
+++ b/testsuite/expect/test23.2
@@ -0,0 +1,126 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test sstat --helpformat option. 
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "23.2"
+set exit_code   0
+set matches     0
+set not_support 0
+
+print_header $test_id
+
+#
+# Report the sstat --helpformat option
+#
+
+spawn $sstat --helpformat
+expect {
+	-re "AveCPU *AvePages *AveRSS *AveVMSize" {
+		incr matches
+		exp_continue
+	}
+	-re "JobID *MaxPages *MaxPagesNode *MaxPagesTask" {
+		incr matches
+		exp_continue
+	}
+	-re "MaxRSS *MaxRSSNode *MaxRSSTask *MaxVMSize" {
+		incr matches
+		exp_continue
+	}
+	-re "MaxVMSizeNode *MaxVMSizeTask *MinCPU *MinCPUNode" {
+		incr matches
+		exp_continue
+	}
+	-re "MinCPUTask *NTasks *SystemCPU *TotalCPU" {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sstat not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 5} {
+	send_user "\nFAILURE: sstat --helpformat failed ($matches)\n"
+	set exit_code 1
+}
+
+#
+# Report the sstat -e option
+#
+set matches     0
+
+spawn $sstat -e
+expect {
+	-re "AveCPU *AvePages *AveRSS *AveVMSize" {
+		incr matches
+		exp_continue
+	}
+	-re "JobID *MaxPages *MaxPagesNode *MaxPagesTask" {
+		incr matches
+		exp_continue
+	}
+	-re "MaxRSS *MaxRSSNode *MaxRSSTask *MaxVMSize" {
+		incr matches
+		exp_continue
+	}
+	-re "MaxVMSizeNode *MaxVMSizeTask *MinCPU *MinCPUNode" {
+		incr matches
+		exp_continue
+	}
+	-re "MinCPUTask *NTasks *SystemCPU *TotalCPU" {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sstat not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 5} {
+	send_user "\nFAILURE: sstat -e failed ($matches)\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test23.3 b/testsuite/expect/test23.3
new file mode 100755
index 0000000000000000000000000000000000000000..67080abddbeaa85671c42735d8c6546fa868e257
--- /dev/null
+++ b/testsuite/expect/test23.3
@@ -0,0 +1,672 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM sstat functionality
+#          sstat a, n, o, p, P and v options.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2009 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "23.3"
+set exit_code   0
+set file_in1    "test.$test_id.input1"
+set file_in2    "test.$test_id.input2"
+set file_in3    "test.$test_id.input3"
+set test_acct   "test_acct"
+set timeout 60
+print_header $test_id
+
+set ac		AveCPU        
+set ap		AvePages      
+set ar		AveRSS        
+set av		AveVMSize
+set ji		JobID         
+set mp		MaxPages      
+set mpn		MaxPagesNode  
+set mpt		MaxPagesTask
+set mr		MaxRSS        
+set mrn		MaxRSSNode    
+set mrt		MaxRSSTask    
+set mvs		MaxVMSize
+set mvn		MaxVMSizeNode  
+set mvt		MaxVMSizeTask  
+set mc		MinCPU        
+set mn		MinCPUNode
+set mt		MinCPUTask    
+set nt		NTasks        
+set sc		SystemCPU     
+set tc		TotalCPU
+#
+# Check accounting config and bail if not found.
+#
+if { [test_account_storage] == 0 } {
+	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
+	exit 0
+}
+
+if { [string compare [check_accounting_admin_level] "Administrator"] } {
+	send_user "\nWARNING: This test can't be run without being an Accounting administrator.\nUse sacctmgr mod user \$USER_NAME set admin=admin.\n"
+	exit 0
+}
+
+#
+# Identify the user and his current default account
+#
+set acct_name ""
+set user_name ""
+set user_gid ""
+spawn $bin_id -u -n
+expect {
+	 -re "($alpha_numeric_under)" {
+		set user_name $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+spawn $bin_id -u
+expect {
+	 -re "($alpha_numeric_under)" {
+		set user_gid $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+set s_pid [spawn $sacctmgr show user $user_name]
+expect {
+	-re "$user_name *($alpha_numeric_under)" {
+		set acct_name $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "FAILURE: sacctmgr add not responding\n"
+		slow_kill $s_pid
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Use sacctmgr to add an account
+#
+set aamatches 0
+set sadd_pid [spawn $sacctmgr -i add account $test_acct]
+expect {
+	-re "Adding Account" {
+		incr aamatches
+		exp_continue
+	}
+	-re "Nothing new added" {
+		send_user "\nWARNING: vestigial account $test_acct found\n"
+		incr aamatches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr add not responding\n"
+		slow_kill $sadd_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$aamatches != 1} {
+	send_user "\nFAILURE:  sacctmgr had a problem adding account.\n"
+	exit 1
+}
+
+#
+# Add self to this new account
+#
+set sadd_pid [spawn $sacctmgr -i create user name=$user_name account=$test_acct]
+expect {
+	 timeout {
+		send_user "\nFAILURE: sacctmgr add not responding\n"
+		slow_kill $sadd_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Build input script file1
+#
+make_bash_script $file_in1 "
+$srun $file_in2
+
+"
+
+#
+# Build input script file2
+#
+make_bash_script $file_in2 "
+$srun $file_in3
+
+"
+
+#
+# Build input script file3
+#
+make_bash_script $file_in3 "
+$srun sleep 10
+
+"
+
+#
+# Spawn a job via srun using this account
+#
+set job_id3 0
+spawn $sbatch -N1 -v --account=$test_acct $file_in1
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id3 $expect_out(1,string)
+		send_user "\nFOUND JobID to be $job_id3\n"
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id3 == 0} {
+	send_user "\nFAILURE: did not get srun job_id3\n"
+	set exit_code 1
+} else {
+	set matches 0
+	spawn $scontrol show job $job_id3
+	expect {
+		 -re "Account=$test_acct" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: srun failed to use specified account\n"
+		set exit_code 1
+	}
+}
+
+#
+# Wait for job to start running, then signal it
+#
+if {[wait_for_job $job_id3 "RUNNING"] != 0} {
+	send_user "\nFAILURE: waiting for job to start running\n"
+	exit 1
+}
+################################################################
+#
+# Proc: sstat_job
+#
+# Purpose:  Pass sstat options and test
+#
+# Returns: Number of matches.
+#
+# Input: Switch options not requiring arguments
+#
+################################################################
+
+proc sstat_job { soption job_id} {
+	global sstat
+	set debug       0
+	set exit_code   0
+	set matches     0
+	set not_support 0
+	send_user "sstat -$soption -p -j $job_id\n"
+
+	if { $soption == "-allsteps" || $soption == "a" } {
+
+	spawn $sstat -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "$job_id.0" {
+			if {$debug} {send_user "\nmatch1\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id.1" {
+			if {$debug} {send_user "\nmatch2\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id.2" {
+			if {$debug} {send_user "\nmatch3\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 3} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+	return $matches
+}
+
+	if { $soption == "-noheader" || $soption == "n" } {
+
+	spawn $sstat -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "JobID|MaxVMSize|MaxVMSizeNode|MaxVMSizeTask|	\
+			AveVMSize|MaxRSS|MaxRSSNode|MaxRSSTask|AveRSS|	\
+			MaxPages|MaxPagesNode|MaxPagesTask|AvePages|	\
+			MinCPU|MinCPUNode|MinCPUTask|AveCPU|NTasks" {	
+			if {$debug} {send_user "\nmatch4\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id" {
+			if {$debug} {send_user "\nmatch5\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 1} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+	return $matches
+}
+
+	if { $soption == "-parsable" || $soption == "p" } {
+
+	spawn $sstat -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "JobID\\|MaxVMSize\\|MaxVMSizeNode\\|MaxVMSizeTask\\|AveVMSize\\|MaxRSS\\|" {
+			if {$debug} {send_user "\nmatch6\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxRSSNode\\|MaxRSSTask\\|AveRSS\\|MaxPages\\|MaxPagesNode\\|MaxPagesTask\\|" {
+			if {$debug} {send_user "\nmatch7\n"}
+			incr matches
+			exp_continue
+		}
+		-re "AvePages\\|MinCPU\\|MinCPUNode\\|MinCPUTask\\|AveCPU\\|NTasks\\|" {
+			if {$debug} {send_user "\nmatch8\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id..\\|" {
+			if {$debug} {send_user "\nmatch9\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 4} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+	return $matches
+}
+
+	if { $soption == "-parsable2" || $soption == "P" } {
+
+	spawn $sstat -$soption -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "JobID\\|MaxVMSize\\|MaxVMSizeNode\\|MaxVMSizeTask\\|AveVMSize\\|MaxRSS\\|" {
+			if {$debug} {send_user "\nmatch10\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxRSSNode\\|MaxRSSTask\\|AveRSS\\|MaxPages\\|MaxPagesNode\\|MaxPagesTask\\|" {
+			if {$debug} {send_user "\nmatch11\n"}
+			incr matches
+			exp_continue
+		}
+		-re "AvePages\\|MinCPU\\|MinCPUNode\\|MinCPUTask\\|AveCPU\\|NTasks" {
+			if {$debug} {send_user "\nmatch12\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id..\\|" {
+			if {$debug} {send_user "\nmatch13\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 4} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+	if { $soption == "-verbose" || $soption == "v" } {
+
+	spawn $sstat -$soption -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "JobID.MaxVMSize.MaxVMSizeNode.MaxVMSizeTask.AveVMSize.MaxRSS" {
+			if {$debug} {send_user "\nmatch14\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxRSSNode.MaxRSSTask.AveRSS.MaxPages.MaxPagesNode.MaxPagesTask" {
+			if {$debug} {send_user "\nmatch15\n"}
+			incr matches
+			exp_continue
+		}
+		-re "AvePages.MinCPU.MinCPUNode.MinCPUTask.AveCPU.NTasks" {
+			if {$debug} {send_user "\nmatch16\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id" {
+			if {$debug} {send_user "\nmatch17\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 4} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+
+
+}
+
+################################################################
+#
+# Proc: sstat_vargs
+#
+# Purpose:  Pass sstat options with arguments and test
+#
+# Returns: Number of matches.
+#
+# Input: Switch options with argument
+#
+################################################################
+
+proc sstat_vargs { soption vargs job_id} {
+	global sstat
+	set debug	0
+	set exit_code   0
+	set matches     0
+	set not_support 0
+	send_user "sstat -$soption $vargs -p -j $job_id\n"
+
+	if { $soption == "o" || $soption == "-format" } {
+
+	spawn $sstat -$soption $vargs -p -j $job_id
+	expect {
+		-re "SLURM accounting storage is disabled" {
+			set not_support 1
+			exp_continue
+		}
+		-re "AveCPU.AvePages.AveRSS.AveVMSize" {
+			if {$debug} {send_user "\nmatch18\n"}
+			incr matches
+			exp_continue
+		}
+		-re "JobID.MaxPages.MaxPagesNode.MaxPagesTask" {
+			if {$debug} {send_user "\nmatch19\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxRSS.MaxRSSNode.MaxRSSTask.MaxVMSize" {
+			if {$debug} {send_user "\nmatch20\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MaxVMSizeNode.MaxVMSizeTask.MinCPU.MinCPUNode" {
+			if {$debug} {send_user "\nmatch21\n"}
+			incr matches
+			exp_continue
+		}
+		-re "MinCPUTask.NTasks.SystemCPU.TotalCPU" {
+			if {$debug} {send_user "\nmatch22\n"}
+			incr matches
+			exp_continue
+		}
+		-re "$job_id" {
+			if {$debug} {send_user "\nmatch23\n"}
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sstat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$not_support != 0} {
+		send_user "\nWARNING: can not test without accounting enabled\n"
+		exit 0
+	}
+	if {$matches != 6} {
+		send_user "\nFAILURE: sstat -$soption failed ($matches)\n"
+		set exit_code 1
+	}
+		return $matches
+	}
+}
+################################################################
+sleep 1
+set matches [sstat_job a $job_id3]
+if {$matches != 3} {
+	send_user "\nFAILURE: sstat -a failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_job -allsteps $job_id3]
+if {$matches != 3} {
+	send_user "\nFAILURE: sstat --allsteps failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_job n $job_id3]
+if {$matches != 1} {
+	send_user "\nFAILURE: sstat -n failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_job -noheader $job_id3]
+if {$matches != 1} {
+	send_user "\nFAILURE: sstat --noheader failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_job p $job_id3]
+if {$matches != 4} {
+	send_user "\nFAILURE: sstat -p failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_job -parsable $job_id3]
+if {$matches != 4} {
+	send_user "\nFAILURE: sstat --parsable failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_job P $job_id3]
+if {$matches != 4} {
+	send_user "\nFAILURE: sstat -P failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_job -parsable2 $job_id3]
+if {$matches != 4} {
+	send_user "\nFAILURE: sstat --parsable2 failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_job v $job_id3]
+if {$matches != 4} {
+	send_user "\nFAILURE: sstat -v failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_job -verbose $job_id3]
+if {$matches != 4} {
+	send_user "\nFAILURE: sstat --verbose failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_vargs o $ac,$ap,$ar,$av,$ji,$mp,$mpn,$mpt,$mr,$mrn,$mrt,$mvs,$mvn,$mvt,$mc,$mn,$mt,$nt,$sc,$tc $job_id3]
+if {$matches != 6} {
+	send_user "\nFAILURE: sstat -o failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sstat_vargs -format $ac,$ap,$ar,$av,$ji,$mp,$mpn,$mpt,$mr,$mrn,$mrt,$mvs,$mvn,$mvt,$mc,$mn,$mt,$nt,$sc,$tc  $job_id3]
+if {$matches != 6} {
+	send_user "\nFAILURE: sstat --format failed ($matches)\n"
+	set exit_code 1
+}
+
+
+#
+# Use sacctmgr to delete the test account
+#
+set damatches 0
+set sadel_pid [spawn $sacctmgr -i delete account $test_acct]
+expect {
+	-re "Deleting account" {
+		incr damatches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr delete not responding\n"
+		slow_kill $sadel_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$damatches != 1} {
+	send_user "\nFAILURE: sacctmgr had a problem deleting account\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $file_in1 $file_in2 $file_in3
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test24.1 b/testsuite/expect/test24.1
new file mode 100755
index 0000000000000000000000000000000000000000..318e7ac44f95d52920449b83509b85e276ba1d0c
--- /dev/null
+++ b/testsuite/expect/test24.1
@@ -0,0 +1,133 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose:  Test of priority multifactor algo to get correct decay and
+#           such.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+#
+# Note:    This script generates and then deletes files in the working directory
+#          named test24.1.prog
+############################################################################
+# Copyright (C) 2009 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Danny Auble <da@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "24.1"
+set exit_code   0
+set test_prog   "test$test_id.prog"
+set matches     0
+print_header $test_id
+
+#
+# Delete left-over programs and rebuild them 
+#
+file delete $test_prog
+
+send_user "build_dir is $build_dir\n"
+if {[test_aix]} {
+	send_user "$bin_cc ${test_prog}.c -ldl -lntbl -fno-gcse -fno-strict-aliasing -Wl,-brtl -Wl,-bgcbypass:1000 -Wl,-bexpfull -Wl,-bmaxdata:0x70000000 -Wl,-brtl -g -lpthreads -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/common/libcommon.o ${build_dir}/src/slurmctld/locks.o  ${build_dir}/src/sshare/process.o\n"
+	exec       $bin_cc ${test_prog}.c -ldl -lntbl -fno-gcse -fno-strict-aliasing -Wl,-brtl -Wl,-bgcbypass:1000 -Wl,-bexpfull -Wl,-bmaxdata:0x70000000 -Wl,-brtl -g -lpthreads -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/common/libcommon.o ${build_dir}/src/slurmctld/locks.o  ${build_dir}/src/sshare/process.o 
+} else {
+	send_user "$bin_cc ${test_prog}.c -g -pthread -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/common/libcommon.o ${build_dir}/src/slurmctld/locks.o  ${build_dir}/src/sshare/process.o -ldl -export-dynamic \n"
+	exec       $bin_cc ${test_prog}.c -g -pthread -o ${test_prog} -I${build_dir} -I${src_dir} ${build_dir}/src/common/libcommon.o ${build_dir}/src/slurmctld/locks.o ${build_dir}/src/sshare/process.o -ldl -export-dynamic 
+}
+	exec $bin_chmod 700 $test_prog
+
+# Usage: test24.1.prog
+spawn ./$test_prog
+expect {
+	"No last decay" {
+		send_user "This error is expected.  No worries.\n"
+		exp_continue
+	}
+	"error: Can't save decay state" {
+		send_user "This error is expected.  No worries.\n"
+		exp_continue
+	}
+	"AccountA||40|0.400000|45|0.450000|0.450000|" {
+		incr matches
+		exp_continue
+	}
+	"AccountB||30|0.300000|20|0.200000|0.387500|" {
+		incr matches
+		exp_continue
+	}
+	"AccountB|User1|1|0.300000|20|0.200000|0.387500|" {
+		incr matches
+		exp_continue
+	}
+	"AccountC||10|0.100000|25|0.250000|0.300000|" {
+		incr matches
+		exp_continue
+	}
+	"AccountC|User2|1|0.050000|25|0.250000|0.275000|" {
+		incr matches
+		exp_continue
+	}
+	"AccountC|User3|1|0.050000|0|0.000000|0.150000|" {
+		incr matches
+		exp_continue
+	} 
+	"AccountD||60|0.600000|25|0.250000|0.250000|" {
+		incr matches
+		exp_continue
+	}
+	"AccountE||25|0.250000|25|0.250000|0.250000|" {
+		incr matches
+		exp_continue
+	}
+	"AccountE|User4|1|0.250000|25|0.250000|0.250000|" {
+		incr matches
+		exp_continue
+	}
+	"AccountF||35|0.350000|0|0.000000|0.145833|" {
+		incr matches
+		exp_continue
+	} 
+	"AccountF|User5|1|0.350000|0|0.000000|0.145833|" {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: spawn IO not responding\n"
+		cancel_job $job_id
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 11} {
+	send_user "\nFAILURE: we didn't get the correct priorities from the plugin $matches\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	file delete $test_prog
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test24.1.prog.c b/testsuite/expect/test24.1.prog.c
new file mode 100644
index 0000000000000000000000000000000000000000..885395dbe8064fb1a0ba02118de93603cbc3f13a
--- /dev/null
+++ b/testsuite/expect/test24.1.prog.c
@@ -0,0 +1,270 @@
+/*****************************************************************************\
+ *  test24.1.prog.c - link and test algo of the multifactor plugin.
+ *  
+ *  Usage: test24.1.prog 
+ *****************************************************************************
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble <da@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *  
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
+ *  
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *  
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *  
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+#  include "config.h"
+#  if HAVE_INTTYPES_H
+#    include <inttypes.h>
+#  else
+#    if HAVE_STDINT_H
+#      include <stdint.h>
+#    endif
+#  endif			/* HAVE_INTTYPES_H */
+
+#include <time.h>
+#include <strings.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <slurm/slurm.h>
+#include <slurm/slurm_errno.h>
+
+#include "src/common/slurm_priority.h"
+#include "src/common/assoc_mgr.h"
+#include "src/common/xstring.h"
+#include "src/common/log.h"
+#include "src/sshare/sshare.h"
+
+/* set up some fake system */
+int cluster_procs = 50;
+int long_flag = 1;
+int node_record_count = 1;
+int exit_code = 0;
+sshare_time_format_t time_format = SSHARE_TIME_MINS;
+char *time_format_string = "Minutes";
+
+List   job_list = NULL;		/* job_record list */
+static pthread_mutex_t state_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* this will leak memory, but we don't care really */
+static void _list_delete_job(void *job_entry)
+{
+	struct job_record *job_ptr = (struct job_record *) job_entry;
+
+	xfree(job_ptr);
+}
+
+int _setup_assoc_list()
+{
+	acct_update_object_t update;
+	acct_association_rec_t *assoc = NULL;
+	/* make the main list */
+	assoc_mgr_association_list = list_create(destroy_acct_association_rec);
+	
+	/* we just want make it so we setup_childern so just pretend
+	   we are running off cache */
+	running_cache = 1;
+	assoc_mgr_init(NULL, NULL);
+
+	/* Here we make the associations we want to add to the
+	   system.  We do this as an update to avoid having to do
+	   setup.
+	*/
+	memset(&update, 0, sizeof(acct_update_object_t));
+	update.type = ACCT_ADD_ASSOC;
+	update.objects = list_create(destroy_acct_association_rec);
+	
+	/* Since we don't want to worry about lft and rgt's here we
+	 * need to put the assocs in hierarchical order using push
+	 * not append. */
+
+	/* First only add the accounts */
+	/* root association */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 1;
+	assoc->acct = xstrdup("root");
+	list_push(update.objects, assoc);
+
+	/* sub of root id 1 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 2;
+	assoc->parent_id = 1;
+	assoc->shares_raw = 40;
+	assoc->acct = xstrdup("AccountA");
+	list_push(update.objects, assoc);
+
+	/* sub of AccountA id 2 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 21;
+	assoc->parent_id = 2;
+	assoc->shares_raw = 30;
+	assoc->acct = xstrdup("AccountB");
+	list_push(update.objects, assoc);
+
+	/* sub of AccountB id 21 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 211;
+	assoc->parent_id = 21;
+	assoc->shares_raw = 1;
+	assoc->usage_raw = 20;
+	assoc->acct = xstrdup("AccountB");
+	assoc->user = xstrdup("User1");
+	list_push(update.objects, assoc);
+
+	/* sub of AccountA id 2 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 22;
+	assoc->parent_id = 2;
+	assoc->shares_raw = 10;
+	assoc->acct = xstrdup("AccountC");
+	list_push(update.objects, assoc);
+
+	/* sub of AccountC id 22 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 221;
+	assoc->parent_id = 22;
+	assoc->shares_raw = 1;
+	assoc->usage_raw = 25;
+	assoc->acct = xstrdup("AccountC");
+	assoc->user = xstrdup("User2");
+	list_push(update.objects, assoc);
+
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 222;
+	assoc->parent_id = 22;
+	assoc->shares_raw = 1;
+	assoc->usage_raw = 0;
+	assoc->acct = xstrdup("AccountC");
+	assoc->user = xstrdup("User3");
+	list_push(update.objects, assoc);
+
+	/* sub of root id 1 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 3;
+	assoc->parent_id = 1;
+	assoc->shares_raw = 60;
+	assoc->acct = xstrdup("AccountD");
+	list_push(update.objects, assoc);
+
+	/* sub of AccountD id 3 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 31;
+	assoc->parent_id = 3;
+	assoc->shares_raw = 25;
+	assoc->acct = xstrdup("AccountE");
+	list_push(update.objects, assoc);
+
+	/* sub of AccountE id 31 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 311;
+	assoc->parent_id = 31;
+	assoc->shares_raw = 1;
+	assoc->usage_raw = 25;
+	assoc->acct = xstrdup("AccountE");
+	assoc->user = xstrdup("User4");
+	list_push(update.objects, assoc);
+
+	/* sub of AccountD id 3 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 32;
+	assoc->parent_id = 3;
+	assoc->shares_raw = 35;
+	assoc->acct = xstrdup("AccountF");
+	list_push(update.objects, assoc);
+
+	/* sub of AccountF id 32 */
+	assoc = xmalloc(sizeof(acct_association_rec_t));
+	assoc->id = 321;
+	assoc->parent_id = 32;
+	assoc->shares_raw = 1;
+	assoc->usage_raw = 0;
+	assoc->acct = xstrdup("AccountF");
+	assoc->user = xstrdup("User5");
+	list_push(update.objects, assoc);
+
+	assoc_mgr_update_assocs(&update);
+	list_destroy(update.objects);
+	return SLURM_SUCCESS;
+}
+
+int main (int argc, char **argv)
+{
+	log_options_t logopt = LOG_OPTS_STDERR_ONLY;
+	slurm_ctl_conf_t *conf = NULL;
+	shares_response_msg_t resp;
+
+	log_init(xbasename(argv[0]), logopt, 0, NULL);
+	xfree(slurmctld_conf.priority_type);
+	//logopt.stderr_level += 5;
+	logopt.prefix_level = 1;
+	log_alter(logopt, 0, NULL);
+	print_fields_have_header = 0;
+	print_fields_parsable_print = PRINT_FIELDS_PARSABLE_ENDING;
+	
+	conf = slurm_conf_lock();
+	/* force priority type to be multifactor */
+	xfree(conf->priority_type);
+	conf->priority_type = xstrdup("priority/multifactor");
+	/* force accounting type to be slurmdbd (It doesn't really talk
+	 * to any database, but needs this to work with fairshare
+	 * calculation). */
+	xfree(conf->accounting_storage_type);
+	conf->accounting_storage_type = xstrdup("accounting_storage/slurmdbd");
+	/* set up a known environment to test against.  Since we are
+	   only concerned about the fairshare we won't look at the other
+	   factors here.
+	*/
+	conf->priority_decay_hl = 1;
+	conf->priority_favor_small = 0;
+	conf->priority_max_age = conf->priority_decay_hl;
+	conf->priority_weight_age = 0;
+	conf->priority_weight_fs = 10000;
+	conf->priority_weight_js = 0;
+	conf->priority_weight_part = 0;
+	conf->priority_weight_qos = 0;
+	slurm_conf_unlock();
+
+	/* we don't want to do any decay here so make the save state
+	 * to /dev/null */
+	xfree(slurmctld_conf.state_save_location);
+	slurmctld_conf.state_save_location = "/dev/null";
+	/* now set up the association tree */
+	_setup_assoc_list();
+	/* now set up the job list */
+	job_list = list_create(_list_delete_job);
+
+	/* now init the priorities of the associations */
+	if (slurm_priority_init() != SLURM_SUCCESS)
+		fatal("failed to initialize priority plugin");
+	/* on some systems that don't have multiple cores we need to
+	   sleep to make sure the tread get started. */
+	sleep(1);
+	memset(&resp, 0, sizeof(shares_response_msg_t));
+	resp.assoc_shares_list = assoc_mgr_get_shares(NULL, 0, NULL, NULL);
+	process(&resp);
+
+	/* free memory */
+	if (slurm_priority_fini() != SLURM_SUCCESS)
+		fatal("failed to finalize priority plugin");
+	if(job_list)
+		list_destroy(job_list);
+	if(resp.assoc_shares_list)
+		list_destroy(resp.assoc_shares_list);
+	if(assoc_mgr_association_list)
+		list_destroy(assoc_mgr_association_list);
+	return 0;
+}
diff --git a/testsuite/expect/test24.2 b/testsuite/expect/test24.2
new file mode 100755
index 0000000000000000000000000000000000000000..1344b4f1096173ad3f9226bb2f4cc1c99a1229ec
--- /dev/null
+++ b/testsuite/expect/test24.2
@@ -0,0 +1,430 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM sshare functionality
+#          sshare h, p, P, v and V options.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2009 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "24.2"
+set exit_code   0
+set timeout 60
+print_header $test_id
+
+#
+# Check accounting config and bail if not found.
+#
+if { [test_account_storage] == 0 } {
+	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
+	exit 0
+}
+
+################################################################
+#
+# Proc: sshare_opt
+#
+# Purpose:  Pass sshare options and test
+#
+# Returns: Number of matches.
+#
+# Input: Switch options not requiring arguments
+#
+################################################################
+
+proc sshare_opt { soption } {
+	global sshare number
+	set debug       1
+	set exit_code   0
+	set matches     0
+	set not_support 0
+	send_user "$sshare -$soption \n"
+
+	if { $soption == "-help"|| $soption == "-usage" } {
+		
+		spawn $sshare -$soption
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "You are not running a supported priority plugin" {
+				set not_support 2
+				exp_continue
+			}
+			-re "Usage: *sshare *.OPTION" {
+				if {$debug} {send_user "\nmatch1\n"}
+				incr matches
+				exp_continue
+			}
+			-re "Valid OPTIONs are" {
+				if {$debug} {send_user "\nmatch2\n"}
+				incr matches
+				exp_continue
+			}
+			-re "verbose *display more information" {
+				if {$debug} {send_user "\nmatch3\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sshare not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+
+		if {$not_support == 2} {
+			send_user "\nWARNING: can not test without priority/multifactor plugin\n"
+			exit 0
+		}
+
+		if {$matches != 3} {
+			send_user "\nFAILURE: sshare -$soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+	if { $soption == "-noheader" || $soption == "h" } {
+
+		spawn $sshare -$soption
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "You are not running a supported priority plugin" {
+				set not_support 2
+				exp_continue
+			}
+			-re "Account|User|Raw Shares|Norm Shares|Raw Usage|Norm Usage|Effectv Usage" {	
+				if {$debug} {send_user "\nmatch4\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sshare not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+
+		if {$not_support == 2} {
+			send_user "\nWARNING: can not test without priority/multifactor plugin\n"
+			exit 0
+		}
+
+		if {$matches != 0} {
+			send_user "\nFAILURE: sshare -$soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+	if { $soption == "-parsable" || $soption == "p" } {
+
+		spawn $sshare -$soption
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "You are not running a supported priority plugin" {
+				set not_support 2
+				exp_continue
+			}
+			-re "Account\\|User\\|Raw Shares\\|Norm Shares\\|" {
+				if {$debug} {send_user "\nmatch5\n"}
+				incr matches
+				exp_continue
+			}
+			-re "Raw Usage\\|Effectv Usage\\|Fair-share\\|" {
+				if {$debug} {send_user "\nmatch5\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sshare not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support != 0} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 2} {
+			send_user "\nFAILURE: sshare -$soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+	if { $soption == "-parsable2" || $soption == "P" } {
+
+		spawn $sshare -$soption
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "You are not running a supported priority plugin" {
+				set not_support 2
+				exp_continue
+			}
+			-re "Account\\|User\\|Raw Shares\\|Norm Shares\\|" {
+				if {$debug} {send_user "\nmatch6\n"}
+				incr matches
+				exp_continue
+			}
+			-re "Raw Usage\\|Effectv Usage\\|Fair-share" {
+				if {$debug} {send_user "\nmatch7\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sshare not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+
+		if {$not_support == 2} {
+			send_user "\nWARNING: can not test without priority/multifactor plugin\n"
+			exit 0
+		}
+
+		if {$matches != 2} {
+			send_user "\nFAILURE: sshare -$soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+	if { $soption == "-verbose" || $soption == "v" } {
+
+		spawn $sshare -$soption
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "You are not running a supported priority plugin" {
+				set not_support 2
+				exp_continue
+			}
+			-re "Users requested" {
+				if {$debug} {send_user "\nmatch8\n"}
+				incr matches
+				exp_continue
+			}
+			-re "Accounts requested" {
+				if {$debug} {send_user "\nmatch9\n"}
+				incr matches
+				exp_continue
+			}
+			-re "sshare: .* loaded" {
+				if {$debug} {send_user "\nmatch10\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sshare not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+
+		if {$not_support == 2} {
+			send_user "\nWARNING: can not test without priority/multifactor plugin\n"
+			exit 0
+		}
+
+		if {$matches != 3} {
+			send_user "\nFAILURE: sshare -$soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+
+	if { $soption == "-version" || $soption == "V" } {
+
+		spawn $sshare -$soption
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "You are not running a supported priority plugin" {
+				set not_support 2
+				exp_continue
+			}
+			-re "slurm $number.$number.$number" {
+				if {$debug} {send_user "\nmatch11\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sshare not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+
+		if {$not_support == 2} {
+			send_user "\nWARNING: can not test without priority/multifactor plugin\n"
+			exit 0
+		}
+
+		if {$matches != 1} {
+			send_user "\nFAILURE: sshare -$soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+}
+
+################################################################
+
+set matches [sshare_opt -help ]
+if {$matches != 3} {
+	send_user "\nFAILURE: sshare --help failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt -usage ]
+if {$matches != 3} {
+	send_user "\nFAILURE: sshare --usage failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt h ]
+if {$matches != 0} {
+	send_user "\nFAILURE: sshare -n failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt -noheader ]
+if {$matches != 0} {
+	send_user "\nFAILURE: sshare --noheader failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt p ]
+if {$matches != 2} {
+	send_user "\nFAILURE: sshare -p failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt -parsable ]
+if {$matches != 2} {
+	send_user "\nFAILURE: sshare --parsable failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt P ]
+if {$matches != 2} {
+	send_user "\nFAILURE: sshare -P failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt -parsable2 ]
+if {$matches != 2} {
+	send_user "\nFAILURE: sshare --parsable2 failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt v ]
+if {$matches != 3} {
+	send_user "\nFAILURE: sshare -v failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt -verbose ]
+if {$matches != 3} {
+	send_user "\nFAILURE: sshare --verbose failed ($matches)\n"
+	set exit_code 1
+}
+
+
+set matches [sshare_opt V ]
+if {$matches != 1} {
+	send_user "\nFAILURE: sshare -v failed ($matches)\n"
+	set exit_code 1
+}
+
+set matches [sshare_opt -version ]
+if {$matches != 1} {
+	send_user "\nFAILURE: sshare --verbose failed ($matches)\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test25.1 b/testsuite/expect/test25.1
new file mode 100755
index 0000000000000000000000000000000000000000..5635c0dfc68c8754e4b606439b70dfd2724108c4
--- /dev/null
+++ b/testsuite/expect/test25.1
@@ -0,0 +1,690 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM sprio functionality.
+#
+#          sprio all options all arguments
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2009 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Joseph Donaghy <donaghy1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "25.1"
+set exit_code   0
+set file_in     "test$test_id.input"
+set timeout	60
+print_header	$test_id
+
+#
+# Check accounting config and bail if not found.
+#
+if { [test_account_storage] == 0 } {
+	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
+	exit 0
+}
+if { [string compare [priority_type] multifactor] } {
+	send_user "\nWARNING: This test can't be run without a usable PriorityType\n"
+	exit 0
+}
+
+#
+# Build input script file
+#
+make_bash_script $file_in "$bin_sleep 600"
+
+proc def_node_cnt { } {
+	global sinfo alpha_numeric_under number exit_code
+
+	set node_cnt 1
+	spawn $sinfo -h -o "name=%20P node_cnt=%D"
+	expect {
+		-re "name=($alpha_numeric_under)\\\* *node_cnt=($number)" {
+			set node_cnt $expect_out(2,string) 
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sbatch not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	return $node_cnt
+}
+
+################################################################
+#
+# Proc: sub_job
+#
+# Purpose:  Submit a job
+#
+# Returns: Job ID
+#
+################################################################
+
+proc sub_job { node_cnt } {
+
+	global exit_code file_in number sbatch test_id
+	set file_in	test${test_id}.input
+
+	set job_id	0
+	spawn $sbatch --output=/dev/null --error=/dev/null -N $node_cnt --exclusive $file_in
+	expect {
+		-re "Submitted batch job ($number)" {
+			set job_id $expect_out(1,string) 
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sbatch not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$job_id == 0} {
+		send_user "\nFAILURE: did not get sbatch job_id\n"
+		set exit_code 1
+	}
+	return $job_id
+}
+
+################################################################
+#
+# Proc: sprio_opt
+#
+# Purpose:  Pass sprio options and test
+#
+# Returns: Number of matches.
+#
+# Input: Switch options not requiring arguments
+#
+################################################################
+
+proc sprio_opt { soption } {
+	global number sprio exit_code
+	set debug       0
+	set matches     0
+	set not_support 0
+	send_user "$sprio $soption \n"
+
+############# sprio help option
+	if { $soption == "--help" } {
+
+		spawn $sprio $soption
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "noheader.*jobs.*long.*norm.*format.*user.*verbose.*version.*weights" {
+				if {$debug} {send_user "\nmatch1\n"}
+				incr matches
+				exp_continue
+			}
+			-re "Help options:" {
+				if {$debug} {send_user "\nmatch2\n"}
+				incr matches
+				exp_continue
+			}
+			-re "help *show this help message" {
+				if {$debug} {send_user "\nmatch3\n"}
+				incr matches
+				exp_continue
+			}
+			-re "usage *display a brief summary of sprio options" {
+				if {$debug} {send_user "\nmatch4\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+
+		if {$matches != 4} {
+			send_user "\nFAILURE: sprio $soption failed ($matches)\n"
+			set exit_code 1
+		}
+	return $matches
+	}
+
+############# sprio usage option
+	if { $soption == "--usage" } {
+
+		spawn $sprio $soption job sizes
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "Usage: sprio .-j jid.s.. .-u user_name.s.. .-o format. .--usage. .-hlnvVw." {	
+				if {$debug} {send_user "\nmatch5\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 1} {
+			send_user "\nFAILURE: sprio -$soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+############# sprio version options
+	if { $soption == "--version" || $soption == "-V" } {
+
+		spawn $sprio $soption
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "slurm $number.$number.$number" {
+				if {$debug} {send_user "\nmatch6\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+
+		if {$matches != 1} {
+			send_user "\nFAILURE: sprio -$soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+############# sprio weights options
+	if { $soption == "--weights" || $soption == "-w" } {
+
+		spawn $sprio $soption
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JOBID *PRIORITY" {
+				if {$debug} {send_user "\nmatch7\n"}
+				incr matches
+				exp_continue
+			}
+			-re "Weights" {
+				if {$debug} {send_user "\nmatch8\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+
+		if {$matches != 2} {
+			send_user "\nFAILURE: sprio -$soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+}
+
+################################################################
+#
+# Proc: sprio_args
+#
+# Purpose:  Pass sprio options, arguments and test
+#
+# Returns: Number of matches.
+#
+# Input: Switch options requiring arguments
+#
+################################################################
+
+proc sprio_args { soption sargs jobid} {
+	global number float sprio exit_code
+	set debug       0
+	set matches     0
+	set not_support 0
+	send_user "$sprio $soption $sargs $jobid\n"
+
+############# sprio noheader options
+	if { $soption == "--noheader" || $soption == "-h" } {
+
+		spawn $sprio $soption $sargs $jobid
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JOBID|PRIORITY|AGE|FAIRSHARE|JOBSIZE|PARTITION|QOS" {	
+				if {$debug} {send_user "\nmatch9\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 0} {
+			send_user "\nFAILURE: sprio $soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+############# sprio jobs options
+	if { $soption == "--jobs" || $soption == "-j" } {
+
+		spawn $sprio $soption $jobid
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JOBID.*PRIORITY" {	
+				if {$debug} {send_user "\nmatch10\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$jobid *$number" {	
+				if {$debug} {send_user "\nmatch11\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 2} {
+			send_user "\nFAILURE: sprio $soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+############# sprio long options
+	if { $soption == "--long" || $soption == "-l" } {
+
+		spawn $sprio $soption $sargs $jobid
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JOBID     USER   PRIORITY        AGE  FAIRSHARE    JOBSIZE  PARTITION        QOS   NICE" {	
+				if {$debug} {send_user "\nmatch12\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$jobid.*$number *$number *$number *$number *$number *$number *$number" {	
+				if {$debug} {send_user "\nmatch13\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 2} {
+			send_user "\nFAILURE: sprio $soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+############# sprio norm options
+	if { $soption == "--norm" || $soption == "-n" } {
+
+		spawn $sprio $soption $sargs $jobid
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JOBID *PRIORITY" {	
+				if {$debug} {send_user "\nmatch14\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$jobid *$float" {	
+				if {$debug} {send_user "\nmatch15\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 2} {
+			send_user "\nFAILURE: sprio $soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+############# sprio format options
+	if { $soption == "--format" || $soption == "-o" } {
+
+		spawn $sprio $soption "%.7i %.8u %.10y %.10Y %.10a %.10A %.10f %.10F %.10j %.10J %.10p %.10P %.10q %.10Q %.6N" $sargs $jobid
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JOBID     USER   PRIORITY   PRIORITY        AGE        AGE  " {
+				if {$debug} {send_user "\nmatch16\n"}
+				incr matches
+				exp_continue
+			}
+			-re "FAIRSHARE  FAIRSHARE    JOBSIZE    JOBSIZE  PARTITION  PARTITION        QOS        QOS   NICE" {
+				if {$debug} {send_user "\nmatch17\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$jobid *.* *$float *$number *$float *$number *$float *$number *$float *$number *$float *$number *$float *$number *$number" {	
+				if {$debug} {send_user "\nmatch18\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 3} {
+			send_user "\nFAILURE: sprio $soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+############# sprio u option
+	if { $soption == "-u" } {
+
+		spawn $sprio $soption $sargs
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JOBID *USER" {	
+				if {$debug} {send_user "\nmatch19\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$jobid *$sargs" {	
+				if {$debug} {send_user "\nmatch20\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 2} {
+			send_user "\nFAILURE: sprio $soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+############# sprio usage option
+	if { $soption == "--user=" } {
+
+		spawn $sprio $soption$sargs
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JOBID *USER" {	
+				if {$debug} {send_user "\nmatch21\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$jobid *$sargs" {	
+				if {$debug} {send_user "\nmatch22\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 2} {
+			send_user "\nFAILURE: sprio $soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+############# sprio verbose options
+	if { $soption == "--verbose" || $soption == "-v" } {
+
+		spawn $sprio $soption $sargs $jobid
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "format.*job_flag.*jobs.*$jobid.*verbose" {	
+				if {$debug} {send_user "\nmatch23\n"}
+				incr matches
+				exp_continue
+			}
+			-re "JOBID *PRIORITY" {	
+				if {$debug} {send_user "\nmatch24\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$jobid *$number *$number" {	
+				if {$debug} {send_user "\nmatch25\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sprio not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support == 1} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		if {$matches != 3} {
+			send_user "\nFAILURE: sprio $soption failed ($matches)\n"
+			set exit_code 1
+		}
+		return $matches
+	}
+
+}
+
+################################################################
+# Start a group of jobs
+
+set node_cnt [def_node_cnt]
+set jobid1 [sub_job $node_cnt]
+set jobid2 [sub_job $node_cnt]
+if {$exit_code != 0} {
+	cancel_job $jobid1
+	cancel_job $jobid2
+	exit $exit_code
+}
+send_user "\nSubmitted 2 jobs successfully\n\n"
+
+#
+# Collect uid
+#
+set nuid [get_my_nuid]
+
+#
+# Start testing sprio options and arguments
+#
+sprio_args -h -j $jobid2
+sprio_args --noheader -j $jobid2
+sprio_args -j -j $jobid2
+sprio_args --jobs -j $jobid2
+sprio_args -l -j $jobid2
+sprio_args --long -j $jobid2
+sprio_args -n -j $jobid2
+sprio_args --norm -j $jobid2
+sprio_args -o -j $jobid2
+sprio_args --format -j $jobid2
+sprio_args -u $nuid $jobid2
+sprio_args --user= $nuid $jobid2
+sprio_args -v -j $jobid2
+sprio_args -verbose -j $jobid2
+sprio_opt -V
+sprio_opt --version
+sprio_opt -w
+sprio_opt --weights
+sprio_opt --help
+sprio_opt --usage
+
+#
+# Cancel jobs
+#
+cancel_job $jobid1 
+cancel_job $jobid2
+
+#
+# Exit with code as appropriate
+#
+if {$exit_code == 0} {
+	exec $bin_rm -f $file_in
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test3.1 b/testsuite/expect/test3.1
index 34eaa662f1b60b3b4cabcde8d6fc3450f1b4e594..20656c90d12b13bb80cbb810023b5c7abf6511a8 100755
--- a/testsuite/expect/test3.1
+++ b/testsuite/expect/test3.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test3.10 b/testsuite/expect/test3.10
index 4197f9dd90dcdb012bc65937d7dd44993f1d6ba0..67ea4c99ded02b223e6ffe7bb4503a834fe09e3c 100755
--- a/testsuite/expect/test3.10
+++ b/testsuite/expect/test3.10
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test3.11 b/testsuite/expect/test3.11
new file mode 100755
index 0000000000000000000000000000000000000000..aa976e8033195d14a4d7fa39a620ce552ddcd6d4
--- /dev/null
+++ b/testsuite/expect/test3.11
@@ -0,0 +1,525 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Validate scontrol create, delete, and update for reservations.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2009 Lawrence Livermore National Security
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Dave Bremer <dbremer@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+#
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id       "3.11"
+set file_in       "test$test_id.input"
+set exit_code     0
+set res_name      ""
+set user_name     ""
+set def_partition ""
+set def_node      ""
+set ii 0
+
+print_header $test_id
+
+
+#
+# Procedure to create a new reservation and validate it.
+# Modifies the global var res_name in the process
+#
+proc create_res { res_params failure_expected } {
+	#exp_internal 1
+	global scontrol
+	global alpha_numeric_under
+	global res_name
+
+	set ret_code 0
+	set res_name ""
+
+	#
+	# Create a reservation using the list of params in res_params
+	#
+	set arglist [linsert $res_params 0 $scontrol create res]
+	eval spawn $arglist
+	expect {
+		-re "Reservation created: ($alpha_numeric_under)" {
+			set res_name $expect_out(1,string)
+		}
+		-re "Error creating the reservation: Invalid user" {
+			if {!$failure_expected} {
+				send_user "\nFAILURE: user not authorized "
+				send_user "to create reservation\n"
+			}
+			set ret_code 1
+			exp_continue
+		}
+		-re "Error" {
+			if {!$failure_expected} {
+				send_user "\nFAILURE: problem creating "
+				send_user "reservation with args: $res_params\n"
+			}
+			set ret_code 1
+			exp_continue
+		}
+		-re "error" {
+			if {!$failure_expected} {
+				send_user "\nFAILURE: problem creating "
+				send_user "reservation with args: $res_params\n"
+			}
+			set ret_code 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set ret_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if { $ret_code != 0 } {
+		return $ret_code
+	}
+
+	spawn $scontrol show res $res_name
+	expect {
+		-re "ReservationName=($alpha_numeric_under)" {
+			set tmp_res_name $expect_out(1,string)
+			if {$tmp_res_name != $res_name} {
+				if {!$failure_expected} {
+					send_user "\nFAILURE: problem showing "
+					send_user "reservation created with:  "
+					send_user "$res_params\n"
+				}
+				set ret_code 1
+				exp_continue
+			}
+		}
+		-re "No reservations in the system" {
+			if {!$failure_expected} {
+				send_user "\nFAILURE: no reservations found  "
+				send_user "after reservation created with:  "
+				send_user "$res_params\n"
+			}
+			set ret_code 1
+			exp_continue
+		}
+		-re "Reservation ($alpha_numeric_under) not found" {
+			if {!$failure_expected} {
+				send_user "\nFAILURE: Reservation $res_name not"
+				send_user "found after reservation created "
+				send_user "with:  $res_params\n"
+			}
+			set ret_code 1
+			exp_continue
+		}
+	}
+	#exp_internal 0
+
+	return $ret_code
+}
+
+
+#
+# Procedure to update a reservation
+#
+proc update_res { res_name res_params failure_expected } {
+	global scontrol
+	global alpha_numeric_under
+	set ret_code 0
+
+	#
+	# Update the reservation using the list of arguments in res_params
+	#
+	set arglist [linsert $res_params 0 $scontrol update ReservationName=$res_name]
+	eval spawn $arglist
+	expect {
+		-re "Reservation updated." {
+			exp_continue
+		}
+		-re "Error creating the reservation: Invalid user" {
+			if {!$failure_expected} {
+				send_user "\nWARNING: user not authorized "
+				send_user "to update reservation\n"
+			}
+			set ret_code 1
+			exp_continue
+		}
+		-re "Error" {
+			if {!$failure_expected} {
+				send_user "\nFAILURE: problem updating "
+				send_user "reservation $res_name with "
+				send_user "$res_params\n"
+			}
+			set ret_code 1
+			exp_continue
+		}
+		-re "error" {
+			if {!$failure_expected} {
+				send_user "\nFAILURE: problem updating "
+				send_user "reservation $res_name with "
+				send_user "$res_params\n"
+			}
+			set ret_code 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set ret_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	return $ret_code
+}
+
+
+#
+# Procedure to delete a reservation
+#
+proc delete_res { res_name } {
+	global scontrol
+	set ret_code 0
+	
+	spawn $scontrol delete ReservationName=$res_name
+	expect {
+		-re "invalid" {
+			send_user "\nFAILURE: problem deleting reservation $res_name\n"
+			set ret_code 1
+			exp_continue
+		}
+		-re "reservation is in use" {
+			send_user "\nFAILURE: $res_name is in use\n"
+			set ret_code 1
+			exp_continue
+		}
+	}
+	return $ret_code
+}
+
+
+
+
+#
+# Identify usable nodes in default partition
+#
+spawn $sinfo -h -o %32P
+expect {
+	-re "($alpha_numeric_under)(\\*)" {
+		set def_partition $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+if {[string compare $def_partition ""] == 0} {
+	send_user "\nFAILURE: failed to find default partition\n"
+	exit 1
+}
+spawn $sinfo -h -o "=%N=" -p $def_partition
+expect {
+	-re "=(.+)=" {
+		set def_node $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+if {[string compare $def_node ""] == 0} {
+	send_user "\nFAILURE:default partition seems to have no nodes\n"
+	exit 1
+}
+
+
+#
+# Get the user name
+#
+spawn $bin_id -un
+expect {
+	-re "($alpha_numeric_under)" {
+		set user_name $expect_out(1,string)
+	}
+	eof {
+		wait
+	}
+}
+
+
+#
+# TEST 1
+# Make a list of lists with a series of parameters to test.  All the tests 
+# in goodtests should pass, all those in badtests should fail.
+#
+set badtests "
+	{}
+	{Duration=5   Nodes=$def_node   User=$user_name}
+	{StartTime=now   Nodes=$def_node   User=$user_name}
+	{StartTime=midnight   Duration=600   User=$user_name}
+	{StartTime=now   Duration=5   Nodes=ALL}
+	{StartTime=now   Duration=5   NodeCnt=  Nodes=   User=$user_name}
+	{StartTime=now   Duration=5   User=$user_name}
+	{StartTime=blah   Duration=5   Nodes=$def_node   User=$user_name}
+	{StartTime=now   Duration=foo   Nodes=$def_node   User=$user_name}
+	{StartTime=now   Duration=5   Nodes=$def_node   User=$user_name  PartitionName=badpartname}
+	{StartTime=now   Duration=5   Nodes=$def_node   User=$user_name  Flags=badtype}
+	{StartTime=now+10minutes   EndTime=now   Nodes=$def_node   User=$user_name}
+"
+#	{StartTime=now   Duration=5   Nodes=$def_node   Account=badaccountname}
+
+foreach test $badtests {
+	set ret_code [create_res $test 1]
+	if {$ret_code == 0} {
+		send_user "\nFAILURE: Reservation $test did not fail but should have\n"
+		delete_res $res_name
+		exit 1
+	} else {
+		send_user "Expected error.  You can turn that frown upside-down.\n"
+	}
+}
+
+if {[test_super_user] == 0} {
+	send_user "\nWARNING: can not test more unless SlurmUser or root\n"
+	exit $exit_code
+}
+
+set goodtests "
+	{StartTime=now   Duration=5   Nodes=$def_node   User=$user_name}
+	{StartTime=now+5minutes   EndTime=now+10minutes   Nodes=$def_node   User=$user_name}
+	{StartTime=midnight   Duration=600   Nodes=$def_node   User=$user_name}
+	{StartTime=now   Duration=5   Nodes=ALL   User=$user_name}
+	{StartTime=now   Duration=5   NodeCnt=1   User=$user_name}
+	{StartTime=now   Duration=5   Nodes=$def_node   User=$user_name  PartitionName=$def_partition}
+	{StartTime=now   Duration=5   Nodes=$def_node   User=$user_name  Flags=Maint}
+"
+foreach test $goodtests {
+	set ret_code [create_res $test 0]
+	if {$ret_code != 0} {
+		send_user "\nFAILURE: Unable to create a valid reservation\n"
+		exit $ret_code
+	}
+	set ret_code [delete_res $res_name]
+	if {$ret_code != 0} {
+		send_user "\nFAILURE: Unable to delete a reservation\n"
+		exit $ret_code
+	}
+}
+
+
+#
+# TEST 2
+# Create a reservation and update it in various ways
+#
+set ret_code [create_res "StartTime=now+60minutes Duration=60 NodeCnt=1 User=$user_name" 0]
+if {$ret_code != 0} {
+	send_user "\nFAILURE: Unable to create a valid reservation\n"
+	exit $ret_code
+}
+
+set goodupdates "
+	{PartitionName=$def_partition}
+	{PartitionName=}
+	{Duration=90}
+	{StartTime=now+30minutes}
+	{Nodes=$def_node}
+	{EndTime=now+60minutes Flags=Maint NodeCnt=1 Nodes=}
+"
+#	{Flags=Maint}
+#	{Flags=}
+
+if {$user_name != "root"} {
+	lappend goodupdates {Users+=root} {Users-=root}
+}
+
+foreach test $goodupdates {
+	set ret_code [update_res $res_name $test 0]
+	if {$ret_code != 0} {
+		send_user "\nFAILURE: Unable to create a valid reservation\n"
+		set exit_code 1
+		break
+	}
+
+}
+
+set ret_code [delete_res $res_name]
+if {$ret_code != 0} {
+	send_user "\nFAILURE: Unable to delete a reservation\n"
+	exit $ret_code
+}
+
+
+#
+# TEST 3
+# Make a reservation, submit a job to it, confirm that the job is paired 
+# with the reservation.
+#
+
+# Make the reservation
+set ret_code [create_res "StartTime=now+60minutes Duration=60 NodeCnt=1 User=$user_name" 0]
+if {$ret_code != 0} {
+	send_user "\nFAILURE: Unable to create a valid reservation\n"
+	exit $ret_code
+}
+
+# Make the job script
+exec $bin_rm -f $file_in
+make_bash_script $file_in "$bin_sleep 10"
+
+# Submit the batch job
+set sbatch_pid [spawn $sbatch -N1 --reservation=$res_name $file_in]
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		slow_kill $sbatch_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: batch submit failure\n"
+	exit 1
+}
+
+# Show the job, make sure reservation tag is right
+spawn $scontrol show job $job_id
+expect {
+	-re "Reservation=($alpha_numeric_under)" {
+		set tmp_res_name $expect_out(1,string)
+		if {$tmp_res_name != $res_name} {
+			send_user "\nFAILURE: problem submitting a job to a "
+			send_user "reservation.  Job $job_id is running on "
+			send_user "reservation $tmp_res_name, not $res_name\n"
+			set exit_code 1
+			exp_continue
+		}
+	}
+	-re "Invalid job id specified" {
+		send_user "\nFAILURE: Job $job_id not found\n"
+		set exit_code 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+# Cancel the job
+spawn $scancel -v $job_id
+expect {
+	-re "Invalid job_id" {
+		send_user "\nFAILURE: Error cancelling the job submitted "
+		send_user "to the reservation.  Job $job_id not found\n"
+		set exit_code 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scancel not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+# Delete the reservation
+set ret_code [delete_res $res_name]
+if {$ret_code != 0} {
+	exit $ret_code
+}
+
+
+#
+# TEST 4
+# If not running as root, make a reservation restricted to root, submit a job,
+# and confirm that the job is rejected.
+#
+if {$user_name != "root"} {
+	# Make the reservation
+	set ret_code [create_res "StartTime=now+60minutes Duration=60 NodeCnt=1 User=root" 0]
+	if {$ret_code != 0} {
+		send_user "\nFAILURE: Unable to create a reservation\n"
+		set exit_code 1
+	}
+
+	# Submit the batch job
+	set denied 0
+	set sbatch_pid [spawn $sbatch -N1 --reservation=$res_name $file_in]
+	expect {
+		-re "Submitted batch job ($number)" {
+			set job_id $expect_out(1,string)
+			exec $scancel $job_id
+			send_user "\nFAILURE: job submit should have been denied\n"
+			set exit_code 1
+			exp_continue
+		}
+		-re "Batch job submission failed: Access denied to requested reservation" {
+			# Job was correctly denied
+			set denied 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sbatch not responding\n"
+			slow_kill $sbatch_pid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$denied == 0} {
+		send_user "\nFAILURE: Job $job_id should have been rejected "
+		send_user "from reservation restricted to root.  Expected "
+		send_user "rejection message not given.\n"
+		set exit_code 1
+	} else {
+		send_user "Expected error, no worries mate.\n"
+	}
+	# Delete the reservation
+	set ret_code [delete_res $res_name]
+	if {$ret_code != 0} {
+		exit $ret_code
+	}
+}
+
+# Remove the temporary job script
+exec $bin_rm -f $file_in
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test3.2 b/testsuite/expect/test3.2
index d354eb1c10c108b252c3fa5fe4adcd158ce307e3..b5b37826fcf516ded0101276e64e824fb8585745 100755
--- a/testsuite/expect/test3.2
+++ b/testsuite/expect/test3.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test3.3 b/testsuite/expect/test3.3
index 47636f6304549e95c26c62fb189c88d02b43c287..a50315e29dd36d889565287a228de61383afe596 100755
--- a/testsuite/expect/test3.3
+++ b/testsuite/expect/test3.3
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test3.4 b/testsuite/expect/test3.4
index cac8b13572e588172a04e235d2188e496315794c..125fd0e89fbae1876c4b60826b12e5ec3f576566 100755
--- a/testsuite/expect/test3.4
+++ b/testsuite/expect/test3.4
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -128,7 +129,9 @@ if {$new_prio != $read_priority} {
 	set exit_code 1
 }
 
-cancel_job $job_id
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 }
diff --git a/testsuite/expect/test3.5 b/testsuite/expect/test3.5
index 401d7352b1ebe123ac7ee875b2b22ae7ef8a32a9..6e72334e079f90fe8e518d3f2f617b7d94da15b1 100755
--- a/testsuite/expect/test3.5
+++ b/testsuite/expect/test3.5
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -38,6 +39,11 @@ set part_name      "QA_TEST"
 
 print_header $test_id
 
+if {[test_super_user] == 0} {
+	send_user "\nWARNING: can not test more unless SlurmUser or root\n"
+	exit $exit_code
+}
+
 #
 # Confirm the partition name does not already exist
 #
@@ -107,13 +113,8 @@ if {[string compare $def_node ""] == 0} {
 #
 # Create a new partition
 #
-spawn $scontrol update PartitionName=$part_name Nodes=$def_node
+spawn $scontrol create PartitionName=$part_name Nodes=$def_node
 expect {
-	-re "slurm_update error: Invalid user" {
-		send_user "\nWARNING: user not authorized to create partition\n"
-		exit $exit_code
-		exp_continue
-	}
 	-re "error" {
 		send_user "\nFAILURE: something bad happened on partitiion create\n"
 		set exit_code 1
diff --git a/testsuite/expect/test3.6 b/testsuite/expect/test3.6
index 9b4896c655ae68056890ec977785f95f65a260a7..9fdf7578a18a618b64760d4580ff30e887b1717f 100755
--- a/testsuite/expect/test3.6
+++ b/testsuite/expect/test3.6
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -85,7 +86,13 @@ if {$found == 1} {
 #
 # We only reach this point if a hidden partition must be 
 # created to test this feature, which only super users can do 
-# 
+#
+if {[test_super_user] == 0} {
+	send_user "\nWARNING: can not test more unless SlurmUser or root\n"
+	exit $exit_code
+}
+ 
+#
 # Confirm the partition name does not already exist
 #
 set found -1
@@ -120,13 +127,8 @@ if {$found == 1} {
 #
 # Create a new partition
 #
-spawn $scontrol update PartitionName=$part_name Hidden=YES
+spawn $scontrol create PartitionName=$part_name Hidden=YES
 expect {
-	-re "slurm_update error: Invalid user" {
-		send_user "\nWARNING: user not authorized to create partition\n"
-		exit $exit_code
-		exp_continue
-	}
 	-re "error" {
 		send_user "\nFAILURE: something bad happened on partitiion create\n"
 		set exit_code 1
diff --git a/testsuite/expect/test3.7 b/testsuite/expect/test3.7
index 60f8808e4ba4e4776bd21bbcd19782b76a133bf0..cf8088f7faa4027e11aeadd9a6594381cb072c17 100755
--- a/testsuite/expect/test3.7
+++ b/testsuite/expect/test3.7
@@ -12,10 +12,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test3.7.prog.c b/testsuite/expect/test3.7.prog.c
index 489c3456447dada70ab3709efdfd931c8d32054b..356ee2cb0064c1088ddbfd7677886d71cfe7bbee 100644
--- a/testsuite/expect/test3.7.prog.c
+++ b/testsuite/expect/test3.7.prog.c
@@ -7,10 +7,11 @@
  *  Copyright (C) 2005 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test3.8 b/testsuite/expect/test3.8
index 1d5b4e949c75ca1a81f92bba993127c34b3f253f..6421c6fbf50a56010e8c30dd4de0697224e73342 100755
--- a/testsuite/expect/test3.8
+++ b/testsuite/expect/test3.8
@@ -14,10 +14,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -84,7 +85,7 @@ if { [test_bluegene] } {
 # Spawn a srun batch job that uses stdout/err and confirm their contents
 #
 set timeout $max_job_delay
-set sbatch_pid [spawn $sbatch -N$node_cnt --output=$file_out --error=$file_err -t1 $file_in]
+set sbatch_pid [spawn $sbatch --requeue -N$node_cnt --output=$file_out --error=$file_err -t1 $file_in]
 expect {
 	-re "Submitted batch job ($number)" {
 		set job_id $expect_out(1,string)
diff --git a/testsuite/expect/test3.9 b/testsuite/expect/test3.9
index c4115ad9c58716381021d6f6d8e79d4a829b4326..459853547c5c5ea1cfcfe97111ae3da767f07368 100755
--- a/testsuite/expect/test3.9
+++ b/testsuite/expect/test3.9
@@ -10,10 +10,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.1 b/testsuite/expect/test4.1
index 3fd1144df941945cad047bd2446342a36db37814..986fefcc33db44afccbfe209981a40b70b7422fd 100755
--- a/testsuite/expect/test4.1
+++ b/testsuite/expect/test4.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.10 b/testsuite/expect/test4.10
index 9e9affad6096e86b7a341b7563b8025888181d92..0a7867791328412cde9aff7b8b8053f9111dfc15 100755
--- a/testsuite/expect/test4.10
+++ b/testsuite/expect/test4.10
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.11 b/testsuite/expect/test4.11
index ca4f488c34ebe75fbe5debab1a6f3937a52bb412..bbd1f52228b27a16efac7897d5e630a53d71ee00 100755
--- a/testsuite/expect/test4.11
+++ b/testsuite/expect/test4.11
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.2 b/testsuite/expect/test4.2
index b5727a15f1d22b548baf6742f228dc71d077faf8..7e09e4c4fae5bd4191d6a5ebbc24e9fd6d51a915 100755
--- a/testsuite/expect/test4.2
+++ b/testsuite/expect/test4.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.3 b/testsuite/expect/test4.3
index 3943240cd27772d0cc4df0f16535f8eaf74c2a9a..7ee122278913132e793b54153c294e5aceda6cb9 100755
--- a/testsuite/expect/test4.3
+++ b/testsuite/expect/test4.3
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.4 b/testsuite/expect/test4.4
index 49e714054d8a95ebee637d7e7f647c040d7a2b2a..85c066fc57ef1584651908441ea89c255d652ab7 100755
--- a/testsuite/expect/test4.4
+++ b/testsuite/expect/test4.4
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.5 b/testsuite/expect/test4.5
index 2c17640358f1e66f5e319ee9c64fad93ff6c1eb0..4f8c93d5d7122996156c0899896734affbcb5d94 100755
--- a/testsuite/expect/test4.5
+++ b/testsuite/expect/test4.5
@@ -11,10 +11,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.6 b/testsuite/expect/test4.6
index 357ed35707c0851bd10155cf0b158e6bc6788cf4..bdc667ae6862ac6b45afa9284e723cdad1726c89 100755
--- a/testsuite/expect/test4.6
+++ b/testsuite/expect/test4.6
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.7 b/testsuite/expect/test4.7
index 31e7e3be653a6cd381e5ac7b501af89bbe3ff7cc..8c5c58a64c7935e3664d5bd895fce9d2d51a0a00 100755
--- a/testsuite/expect/test4.7
+++ b/testsuite/expect/test4.7
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.8 b/testsuite/expect/test4.8
index e6c9452bd67591d2e25a40a4441effa4334a34b3..515c0297ba6a93939ffdcd682bee06d56563c1ad 100755
--- a/testsuite/expect/test4.8
+++ b/testsuite/expect/test4.8
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test4.9 b/testsuite/expect/test4.9
index 8a58ca62d731613d8bd2202496a3bf61544cb49f..863574b356f391c5a4ec24337584759d85d4e30d 100755
--- a/testsuite/expect/test4.9
+++ b/testsuite/expect/test4.9
@@ -11,10 +11,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test5.1 b/testsuite/expect/test5.1
index df2e4c4b390091b7b1772bc9212025fe4641ded2..f7ee36bdb6469b2f9934f2bbb6918c3e928db5d1 100755
--- a/testsuite/expect/test5.1
+++ b/testsuite/expect/test5.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test5.2 b/testsuite/expect/test5.2
index a4c7cd94548be106891a6ae9e612ce77cc1f2892..d9b3fc2fd0899fa65b8d6e2ef6054c360aa0141e 100755
--- a/testsuite/expect/test5.2
+++ b/testsuite/expect/test5.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test5.3 b/testsuite/expect/test5.3
index 85bc7d468131039ea9011850d47f18f168fbd095..b7ce8fd3ae36a726734729671807b58673858c83 100755
--- a/testsuite/expect/test5.3
+++ b/testsuite/expect/test5.3
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test5.4 b/testsuite/expect/test5.4
index 9258116b32276db9ca5933c617469b2020b41325..78bac69282a53397aea5a5ca997e477747c2b574 100755
--- a/testsuite/expect/test5.4
+++ b/testsuite/expect/test5.4
@@ -11,10 +11,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test5.5 b/testsuite/expect/test5.5
index 34d224e235ad13cd410c6c19d609c3f7848f3860..b3fc8ebd924b1da3f3b36260bff4a4ea9d673a55 100755
--- a/testsuite/expect/test5.5
+++ b/testsuite/expect/test5.5
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test5.6 b/testsuite/expect/test5.6
index 7d2879d346f25a1880ae895e8be97410b17acc2b..22401d5f0acbd30edb0457fd7c7a58db51e03d16 100755
--- a/testsuite/expect/test5.6
+++ b/testsuite/expect/test5.6
@@ -11,10 +11,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test5.7 b/testsuite/expect/test5.7
index f5e0bb32c45aeb05884d8430689d43a3afafa700..b7b70b08b5b9ba8a5a479d02f63d9172a69a6b92 100755
--- a/testsuite/expect/test5.7
+++ b/testsuite/expect/test5.7
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test5.8 b/testsuite/expect/test5.8
index 51ca309f29243e85109a0adc7bd7b2e73b4c91a9..e81731c981b6f7c9e2763c9a52d8cc0e96b8a801 100755
--- a/testsuite/expect/test5.8
+++ b/testsuite/expect/test5.8
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.1 b/testsuite/expect/test6.1
index b983bc7315aecd7a691fc5cf122de765c81c032f..bbdda4db386370dd587519bbcdbd34c5878bb40e 100755
--- a/testsuite/expect/test6.1
+++ b/testsuite/expect/test6.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.10 b/testsuite/expect/test6.10
index 5d321322f5fc70b81e06e1b80c9b03d747a87cbc..0b2286bcc215822e58500018f035b919da737ba4 100755
--- a/testsuite/expect/test6.10
+++ b/testsuite/expect/test6.10
@@ -14,10 +14,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.11 b/testsuite/expect/test6.11
index 7a8575c57cdf68d91de0e028d208ba4a083132b6..de4e295b2181aa16b0bcd9f08669c48130757ab5 100755
--- a/testsuite/expect/test6.11
+++ b/testsuite/expect/test6.11
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.12 b/testsuite/expect/test6.12
index 191eef1b860cd739728ad9241e8891ae5c13171a..f650e68ce45f62dcbb995956ce384cf11b3fa709 100755
--- a/testsuite/expect/test6.12
+++ b/testsuite/expect/test6.12
@@ -13,10 +13,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.13 b/testsuite/expect/test6.13
index cb3934ca3e3cbc9c86c45390fec26cb6613e7e17..5915cd9c6674f23275df29d7934018c84fe53d78 100755
--- a/testsuite/expect/test6.13
+++ b/testsuite/expect/test6.13
@@ -14,7 +14,8 @@
 # UCRL-CODE-217948.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.14 b/testsuite/expect/test6.14
new file mode 100755
index 0000000000000000000000000000000000000000..cc0311d3014ce86267e347a5df6a2edf92f19d64
--- /dev/null
+++ b/testsuite/expect/test6.14
@@ -0,0 +1,195 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test scancel --nodelist option.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2008 Lawrence Livermore National Security
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by David Bremer <dbremer@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+# 
+# This file is part of SLURM, a resource management program.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
+#  
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+# 
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+# 
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id        "6.14"
+set exit_code      0
+set file_in        "test$test_id.input"
+set num_procs      10
+set ii             0
+set job_id         ""
+set job_map        {}
+set found          0
+set tmp_job_list   {}
+set tmp_map_entry  {}
+set submitted_jobs {}
+set job_list       {}
+set job_index      -1
+
+print_header $test_id
+
+#
+# Build input script file
+#
+make_bash_script $file_in "$srun $bin_sleep 600"
+
+#
+# Submit some jobs so we have something to work with
+#
+set timeout 20
+for {set ii 0} {$ii < $num_procs} {incr ii} {
+	set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null -n1 -N1 $file_in]
+	expect {
+		-re "Submitted batch job ($number)" {
+			set job_id $expect_out(1,string)
+			lappend submitted_jobs $job_id
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sbatch not responding\n"
+			slow_kill $sbatch_pid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$job_id == 0} {
+		send_user "\nFAILURE: job submit failure\n"
+		exit 1
+	}
+}
+
+#
+# Run squeue and build a map, implemented as a list of list of lists, like so:
+# { {node1  {job1 job2 job3}}
+#   {node2  {job4 job5}}
+# }
+#
+# Only put jobs into the map if they were submitted by this test.
+#
+
+spawn $squeue -h -t running -u $env(USER) -o "%10i %40N"
+expect {
+	-re "($number) *($alpha_numeric) *\r\n" {
+		set job_id $expect_out(1,string)
+		set node_name $expect_out(2,string)
+		
+		#This test doesn't need to track jobs that it didn't submit.
+		if { [lsearch $submitted_jobs $job_id] == -1 } {
+			exp_continue
+		}
+		#send_user "job $job_id: node $node_name\n"
+		#Insert into a table with node_name as the key, job_id as the value
+		set found 0
+		for {set ii 0} {$ii < [llength $job_map]} {incr ii} {
+			if { [lindex [lindex $job_map $ii] 0] == $node_name } {
+				set tmp_map_entry [list $node_name [concat [lindex [lindex $job_map $ii] 1] $job_id]]
+				set job_map [lreplace $job_map $ii $ii $tmp_map_entry]
+				set found 1
+				break
+			}
+		}
+		if {$found == 0} {
+			lappend job_map [list $node_name [list $job_id] ]
+		}
+		exp_continue
+	}
+}
+#send_user "job map: $job_map\n"
+
+
+#
+# Issue an scancel command against each node in the map described above.
+# Remove entries from the internal list, and ensure that the list is
+# empty at the end of the scancel call.
+#
+
+for {set ii 0} {$ii < [llength $job_map]} {incr ii} {
+	set node_name [lindex [lindex $job_map $ii] 0]
+	set job_list  [lindex [lindex $job_map $ii] 1]
+
+	if {$ii == 0} {
+		spawn $scancel -v -u $env(USER) --nodelist $node_name
+	} else {
+		spawn $scancel -v -u $env(USER) -w $node_name
+	}
+	expect {
+		-re "scancel: Terminating job ($number)" {
+			#Search for the terminated job in the list recently
+			#returned from squeue. Don't worry if an unknown job
+			#gets cancelled, because maybe one of our submitted
+			#jobs will start running while we cancel other jobs
+			#Issue cancel commands node by node until all the 
+			#jobs submitted for this test are gone.
+
+			set job_id $expect_out(1,string)
+			set job_index [lsearch $job_list $job_id]
+			if {$job_index != -1} {
+				set job_list [lreplace $job_list $job_index $job_index]
+			} 
+			set job_index [lsearch $submitted_jobs $job_id]
+			if {$job_index != -1} {
+				set submitted_jobs [lreplace $submitted_jobs $job_index $job_index]
+			}
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scancel not responding while cancelling for node $node_name\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if { [llength $job_list] != 0 } {
+		send_user "\nFAILURE: scancel did not remove jobs $job_list from node $node_name\n"
+		set exit_code 1
+	}
+}
+
+#
+# Clean up any jobs submitted by this test, which were not mapped to a node,
+# and thus not cancelled in the previous block of code
+#
+
+foreach job_id $submitted_jobs {
+	spawn $scancel $job_id
+	expect {
+		timeout {
+			send_user "\nFAILURE: scancel not responding while cancelling job $job_id\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+}
+
+
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
+
diff --git a/testsuite/expect/test6.2 b/testsuite/expect/test6.2
index 770dccd3b64eb71b06442a202dbd8f83cbde5084..3b9418565be257239f5a1de28dec21a6b2d1a54f 100755
--- a/testsuite/expect/test6.2
+++ b/testsuite/expect/test6.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.3 b/testsuite/expect/test6.3
index f9146f72c0f87b6a736954dc4b7b07ede6fd48dd..44b04e279e246ddc5222541412e5cac553e398a7 100755
--- a/testsuite/expect/test6.3
+++ b/testsuite/expect/test6.3
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -116,7 +117,8 @@ expect {
 
 spawn $scancel --interactive $job_id1
 expect {
-	-re "Job $job_id1 not found" {
+	-re "Kill job error.* $job_id1" {
+		send_user "\nNo worries, error is expected\n"
 		incr matches
 		exp_continue
 	}
diff --git a/testsuite/expect/test6.4 b/testsuite/expect/test6.4
index a8eae405eded89de46889b4596636d6e11c88bcb..06e94eb0dbfefa8aefa5302f24d2e1ff8a668faf 100755
--- a/testsuite/expect/test6.4
+++ b/testsuite/expect/test6.4
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.5 b/testsuite/expect/test6.5
index 52675a60921f2f0ee1acc702a775c7ce77bbf67d..5a13b6b68d90b80438570959f97cfd2e713dfe7a 100755
--- a/testsuite/expect/test6.5
+++ b/testsuite/expect/test6.5
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.6 b/testsuite/expect/test6.6
index ff9a2aa6c367daeda72ec2453ec9132444e8bf5b..a8d391a1168a1ac7dbd3b316af8d1bb4fc8077ae 100755
--- a/testsuite/expect/test6.6
+++ b/testsuite/expect/test6.6
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.7 b/testsuite/expect/test6.7
index bac8f26fe5a6968ccc067962860f51e0fe2f270b..0362de9f60e895f551fad179b57c1418b4b60b50 100755
--- a/testsuite/expect/test6.7
+++ b/testsuite/expect/test6.7
@@ -10,10 +10,11 @@
 # Copyright (C) 2002-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.8 b/testsuite/expect/test6.8
index 2de2cbbbf3c2115cfba6420f5f56e2d20f6e2af9..81e356a8267fab87129cc848dd40ef09a32d4576 100755
--- a/testsuite/expect/test6.8
+++ b/testsuite/expect/test6.8
@@ -13,10 +13,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test6.9 b/testsuite/expect/test6.9
index c1651e765a56fa47f501f80b9e09db6992a1f1e7..41ae263e72c679cf24ebdcbd034be9fc052ff665 100755
--- a/testsuite/expect/test6.9
+++ b/testsuite/expect/test6.9
@@ -13,10 +13,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.1 b/testsuite/expect/test7.1
index dc3a70220ea4dbda6bb180ebb8451636bac8e0b4..839f807a54b991e35d275249d8d54ce35c07d1b9 100755
--- a/testsuite/expect/test7.1
+++ b/testsuite/expect/test7.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -39,6 +40,7 @@ set job_id3              0
 set prio1                -1
 set prio2                -1
 set prio3                -1
+set multi                0
 
 print_header $test_id
 
@@ -48,6 +50,12 @@ if {[test_wiki_sched] == 1} {
 	exit $exit_code
 }
 
+# expected outcome is different under multifactor rule
+if { ![string compare [priority_type] multifactor] } {
+	set multi 1
+}
+
+
 make_bash_script "pwd_script" { $bin_pwd }
 
 #
@@ -167,9 +175,16 @@ if {$job_id3 != 0} {
 #
 # Confirm reasonable values for the job priorities
 #
-if {$prio1 <= $prio2} {
-	send_user "\FAILURE: Job priorities are not decreasing: $prio1 <= $prio2 \n"
-	set exit_code 1
+if {$multi} {
+	if {$prio1 != $prio2} {
+		send_user "\FAILURE: Job priorities are not same: $prio1 != $prio2 \n"
+		set exit_code 1
+	}
+} else {
+	if {$prio1 <= $prio2} {
+		send_user "\FAILURE: Job priorities are not decreasing: $prio1 <= $prio2 \n"
+		set exit_code 1
+	}
 }
 if {$prio3 != 0} {
 	send_user "\FAILURE: Held job has non-zero priority: $prio3\n"
diff --git a/testsuite/expect/test7.10 b/testsuite/expect/test7.10
index 61501f53aea6567ee7c8f33db1b6374bd5b1b3e1..00211a0981fc3ed5b4afefeba5e4af653b15f8c8 100755
--- a/testsuite/expect/test7.10
+++ b/testsuite/expect/test7.10
@@ -11,10 +11,11 @@
 # Copyright (C) 2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.11 b/testsuite/expect/test7.11
index 1803e22e36913f070edeff3dda9069c3d1adc152..01a04aa9c33a39d3247b939a594f43ad764f6e11 100755
--- a/testsuite/expect/test7.11
+++ b/testsuite/expect/test7.11
@@ -7,13 +7,14 @@
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
 #          anything else indicates a failure mode that must be investigated.
 ############################################################################
-# Copyright (C) 2008 Lawrence Livermore National Security.
+# Copyright (C) 2008-2009 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -56,7 +57,7 @@ if {[test_aix] == 1} {
 # Build the plugin
 #
 exec $bin_rm -f ${file_prog}.so
-exec $bin_cc -shared -I${slurm_dir}/include -o ${file_prog}.so ${file_prog}.c
+exec $bin_cc -fPIC -shared -I${slurm_dir}/include -o ${file_prog}.so ${file_prog}.c
 
 #
 # Locate slurm.conf's directory, copy the original plugstack.conf file
@@ -66,7 +67,7 @@ log_user 0
 set config_dir ""
 spawn $scontrol show config
 expect {
-	-re "SLURM_CONFIG_FILE.*= (/.*)/slurm.conf" {
+	-re "SLURM_CONF.*= (/.*)/slurm.conf.*SLURM_VERSION" {
 		set config_dir $expect_out(1,string)
 		exp_continue
 	}
@@ -97,6 +98,8 @@ if {[file exists $spank_conf_file]} {
 
 	exec $bin_cp $spank_conf_file $orig_spank_conf
 	exec $bin_cp $spank_conf_file $new_spank_conf
+	exec $bin_chmod 700 $spank_conf_file
+	exec $bin_chmod 700 $new_spank_conf
 } else {
 	exec $bin_cp /dev/null $new_spank_conf
 }
@@ -113,13 +116,19 @@ expect {
 	}
 }
 
+# Allow enough time for configuration file in NFS to be propogated
+# to all nodes of cluster
+exec sleep 60 
+
 #
 # Test of srun help message
 #
 # NOTE: Expect parsing failures have been noted running "$srun --help" 
 #       directly, so we build a script containing a pipe to tail
 #
+
 make_bash_script $file_in "$srun --help | $bin_grep test"
+
 set matches 0
 set srun_pid [spawn $file_in]
 expect {
@@ -145,6 +154,67 @@ if {$matches != 2} {
 	set exit_code 1
 }
 
+#
+# Test of alloc help message
+#
+# NOTE: Expect parsing failures have been noted running "$salloc --help" 
+#       directly, so we build a script containing a pipe to tail
+#
+
+make_bash_script $file_in "$salloc --help | $bin_grep test"
+
+set matches 0
+set srun_pid [spawn $file_in]
+expect {
+	-re "Registered component of slurm test suite" {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: salloc not responding\n"
+		slow_kill $srun_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$matches != 1} {
+	send_user "\nFAILURE: spank help message not in salloc help message\n"
+	set exit_code 1
+}
+
+#
+# Test of sbatch help message
+#
+# NOTE: Expect parsing failures have been noted running "$sbatch --help" 
+#       directly, so we build a script containing a pipe to tail
+#
+
+make_bash_script $file_in "$sbatch --help | $bin_grep test"
+
+set matches 0
+set srun_pid [spawn $file_in]
+expect {
+	-re "Registered component of slurm test suite" {
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$matches != 1} {
+	send_user "\nFAILURE: spank help message not in sbatch help message\n"
+	set exit_code 1
+}
+exec $bin_rm -f $file_in
+
 #
 # Test of locally logged messages().
 # We update the plugstatck.conf in this script since NFS delays may 
@@ -158,8 +228,17 @@ make_bash_script $file_in "
     $bin_cp $orig_spank_conf $spank_conf_file
   fi
 "
-spawn $sbatch -N1 -t1 -o $file_out $file_in
+set matches 0
+spawn $sbatch --test_suite_reg=4 -N1 -t1 -o $file_out $file_in
 expect {
+	-re "_test_opt_process: test_suite: opt_arg=4" {
+		incr matches
+		exp_continue
+	}
+	-re "slurm_spank_exit: opt_arg=4" {
+		incr matches
+		exp_continue
+	}
 	-re timeout {
 		send_user "\nFAILURE: sbatch not responding\n"
 		set exit_code 1
@@ -168,6 +247,10 @@ expect {
 		wait
 	}
 }
+if {$matches != 2} {
+	send_user "\nFAILURE: spank options not processed by sbatch\n"
+	set exit_code 1
+}
 
 # NOTE: spank logs from sbatch and srun would be intermingled here
 if {[wait_for_file $file_out] == 0} {
@@ -213,22 +296,34 @@ if {[wait_for_file $file_out] == 0} {
 #
 if {[wait_for_file $spank_out] == 0} {
 	send_user "\n\n"
-	set matches 0
-	spawn $bin_cat $spank_out
+	set matches        0
+	set matches_sbatch 0
+	set matches_srun   0
+	spawn $bin_sort $spank_out
 	expect {
-		-re "slurm_spank_task_init: opt_arg=5" {
-			incr matches
+		-re "slurm_spank_exit: opt_arg=($number)" {
+			if {$expect_out(1,string) == 4} {
+				incr matches_sbatch
+			}
+			if {$expect_out(1,string) == 5} {
+				incr matches_srun
+			}
 			exp_continue
 		}
-		-re "spank_get_item: my_uid=" {
-			incr matches
+		-re "slurm_spank_task_init: opt_arg=($number)" {
+			if {$expect_out(1,string) == 4} {
+				incr matches_sbatch
+			}
+			if {$expect_out(1,string) == 5} {
+				incr matches_srun
+			}
 			exp_continue
 		}
 		-re "spank_get_item: argv" {
 			incr matches
 			exp_continue
 		}
-		-re "slurm_spank_exit: opt_arg=5" {
+		-re "spank_get_item: my_uid=" {
 			incr matches
 			exp_continue
 		}
@@ -236,8 +331,14 @@ if {[wait_for_file $spank_out] == 0} {
 			wait
 		}
 	}
-	if {$matches != 4} {
-		send_user "\nFAILURE: remote (slurmd) spank plugin failure\n"
+	if {$matches_sbatch != 2} {
+		send_user "\nFAILURE: remote (slurmd) sbatch spank plugin failure ($matches_sbatch)\n"
+		set exit_code 1
+	} elseif {$matches_srun != 2} {
+		send_user "\nFAILURE: remote (slurmd) srun spank plugin failure ($matches_srun)\n"
+		set exit_code 1
+	} elseif {$matches != 4} {
+		send_user "\nFAILURE: remote (slurmd) spank plugin failure ($matches)\n"
 		set exit_code 1
 	} else {
 		send_user "\n remote (slurmd) spank plugin success\n"
diff --git a/testsuite/expect/test7.11.prog.c b/testsuite/expect/test7.11.prog.c
index 9d49ce56bb7307cc06c75357bacddf4449bbabec..aab5b3721b7f8c2e49cd14fd4f5415d3503aeb66 100644
--- a/testsuite/expect/test7.11.prog.c
+++ b/testsuite/expect/test7.11.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -43,7 +44,7 @@ static char *opt_out_file = NULL;
 static int _test_opt_process(int val, const char *optarg, int remote);
 
 /*
- *  Provide a --renice=[prio] option to srun:
+ *  Provide a --test_suite=[opt_arg] option to srun:
  */
 struct spank_option spank_options[] =
 {
@@ -73,6 +74,12 @@ static int _test_opt_process(int val, const char *optarg, int remote)
 /*  Called from both srun and slurmd */
 int slurm_spank_init(spank_t sp, int ac, char **av)
 {
+	spank_context_t context;
+
+	context = spank_context();
+	if ((context != S_CTX_LOCAL) && (context != S_CTX_REMOTE) &&
+	    (context != S_CTX_ALLOCATOR))
+		slurm_error("spank_context error");
 	if (spank_option_register(sp, spank_options_reg) != ESPANK_SUCCESS)
 		slurm_error("spank_option_register error");
 	if (spank_remote(sp) && (ac == 1))
diff --git a/testsuite/expect/test7.2 b/testsuite/expect/test7.2
index 6827f407531ec269f141bafd3786431e5c2cf619..bc789cc0337e40ae2d62e81b5549c121f112b207 100755
--- a/testsuite/expect/test7.2
+++ b/testsuite/expect/test7.2
@@ -11,10 +11,11 @@
 # Copyright (C) 2005-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.2.prog.c b/testsuite/expect/test7.2.prog.c
index e3b2e34df5b21594bc45842f0d248512205cac64..8f6fad51a6df22e2700864c1e1faabd904d3943f 100644
--- a/testsuite/expect/test7.2.prog.c
+++ b/testsuite/expect/test7.2.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2005-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -50,6 +51,7 @@ main (int argc, char **argv)
 {
 	int i, j, rc;
 	int nprocs, procid;
+	int clique_size, *clique_ranks = NULL;
 	char *nprocs_ptr, *procid_ptr;
 	int pmi_rank, pmi_size, kvs_name_len, key_len, val_len;
 	PMI_BOOL initialized;
@@ -120,6 +122,24 @@ main (int argc, char **argv)
 		exit(1);
 	}
 
+	if ((rc = PMI_Get_clique_size(&clique_size)) != PMI_SUCCESS) {
+		printf("FAILURE: PMI_Get_clique_size: %d, task %d\n",
+			rc, pmi_rank);
+		exit(1);
+	}
+	clique_ranks = malloc(sizeof(int) * clique_size);
+	if ((rc = PMI_Get_clique_ranks(clique_ranks, clique_size)) !=
+	     PMI_SUCCESS) {
+		printf("FAILURE: PMI_Get_clique_ranks: %d, task %d\n",
+			rc, pmi_rank);
+		exit(1);
+	}
+#if _DEBUG
+	for (i=0; i<clique_size; i++)
+		printf("PMI_Get_clique_ranks[%d]=%d\n", i, clique_ranks[i]);
+#endif
+	free(clique_ranks);
+
 	if ((rc = PMI_KVS_Get_name_length_max(&kvs_name_len)) != PMI_SUCCESS) {
 		printf("FAILURE: PMI_KVS_Get_name_length_max: %d, task %d\n", 
 			rc, pmi_rank);
diff --git a/testsuite/expect/test7.3 b/testsuite/expect/test7.3
index 07b72d80a214958e918c6dbaedcb2811076adc04..76a3492bc68a3338f61d1bb2a87bb178aa1823e2 100755
--- a/testsuite/expect/test7.3
+++ b/testsuite/expect/test7.3
@@ -12,10 +12,11 @@
 # Copyright (C) 2004 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.3.io.c b/testsuite/expect/test7.3.io.c
index 0f3917aeae3927d3571b6a9fa3dfe3e1eb24c634..46ca90074f11f9bcc911d27370553449e0d9066a 100644
--- a/testsuite/expect/test7.3.io.c
+++ b/testsuite/expect/test7.3.io.c
@@ -9,10 +9,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.3.prog.c b/testsuite/expect/test7.3.prog.c
index 0089321c11e2bec87dde81f432a82918e122701a..bab653f8f582820c3e808d71c899af8d737dc8ee 100644
--- a/testsuite/expect/test7.3.prog.c
+++ b/testsuite/expect/test7.3.prog.c
@@ -8,10 +8,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
@@ -144,7 +145,7 @@ int main (int argc, char *argv[])
 	launch->user_managed_io = true; /* This is the key to using
 					  "user managed" IO */
 	
-	if (slurm_step_launch(ctx, "", launch, NULL) != SLURM_SUCCESS) {
+	if (slurm_step_launch(ctx, launch, NULL) != SLURM_SUCCESS) {
 		slurm_perror("slurm_step_launch");
 		rc = 1;
 		goto done;
diff --git a/testsuite/expect/test7.4 b/testsuite/expect/test7.4
index 6ffda3dd69dd93b3b79675d3c562c785fd664d6e..4461137f76a6a17543512b62458363c5529ae738 100755
--- a/testsuite/expect/test7.4
+++ b/testsuite/expect/test7.4
@@ -12,10 +12,11 @@
 # Copyright (C) 2004-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov> and Dong Ang <dahn@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -49,8 +50,8 @@ set exit_code     0
 set test_prog     "test$test_id.prog"
 
 # Note this appears as a single argv value to totalviewcli
-set bulk "set issue_dgo false; dset TV::bulk_launch_enabled true; dset TV::bulk_launch_string {$srun -N%N -n%N --jobid=%J -w`awk -F. \'BEGIN {ORS=\",\"} {if (NR==%N) ORS=\"\"; print \$1}\' %t1` -l -i /dev/null %B/tvdsvr%K -callback_host %H -callback_ports %L -set_pws %P -verbosity %V -working_directory %D %F}"
-set no_bulk       "set issue_dgo false; dset TV::bulk_launch_enabled false"
+set bulk "dset TV::bulk_launch_enabled true; dset TV::bulk_launch_tmpfile1_host_lines %R; dset TV::bulk_launch_string {$srun --jobid=%J -N%N -n%N -w`awk -F. \'BEGIN {ORS=\",\"} {if (NR==%N) ORS=\"\"; print \$1}\' %t1` -l --input=none %B/tvdsvr%K -callback_host %H -callback_ports %L -set_pws %P -verbosity %V -working_directory %D %F}"
+set no_bulk       "dset TV::bulk_launch_enabled false"
 
 print_header $test_id
 
@@ -150,6 +151,10 @@ expect {
 		send "yes\n"
 		exp_continue
 	}
+	-re "Could not open breakpoint file" {
+		send_user "\nNOTE: Breakpoint file error is expected due to known Totalview bug\n"
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: totalviewcli not responding\n"
 		set exit_code 1
@@ -228,6 +233,10 @@ expect {
 		send "yes\n"
 		exp_continue
 	}
+	-re "Could not open breakpoint file" {
+		send_user "\nNOTE: Breakpoint file error is expected due to known Totalview bug\n"
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: totalviewcli not responding\n"
 		set exit_code 1
diff --git a/testsuite/expect/test7.4.prog.c b/testsuite/expect/test7.4.prog.c
index fc688154637d7d497cae5572ca30c659e55960db..e8e7843e5d444930440c5d5c4fd34754731cec44 100644
--- a/testsuite/expect/test7.4.prog.c
+++ b/testsuite/expect/test7.4.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dong Ang <dahn@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.6 b/testsuite/expect/test7.6
index dee627741ee3106440486fd51633fac49157f1a7..855e4fa5c76488fbd75a0f0841333fedacff3c33 100755
--- a/testsuite/expect/test7.6
+++ b/testsuite/expect/test7.6
@@ -11,10 +11,11 @@
 # Copyright (C) 2004-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov> and Dong Ang <dahn@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -50,7 +51,7 @@ set test_prog     "test$test_id.prog"
 set timeout       $max_job_delay
 
 # Note this appears as a single argv value to totalviewcli
-set no_bulk       "set issue_dgo false; dset TV::bulk_launch_enabled false"
+set no_bulk       "dset TV::bulk_launch_enabled false"
 
 print_header $test_id
 
@@ -180,6 +181,10 @@ expect {
 		send "yes\n"
 		exp_continue
 	}
+	-re "Could not open breakpoint file" {
+		send_user "\nNOTE: Breakpoint file error is expected due to known Totalview bug\n"
+		exp_continue
+	}
 	timeout {
 		send_user "\nFAILURE: totalviewcli not responding\n"
 		set exit_code 1
diff --git a/testsuite/expect/test7.6.prog.c b/testsuite/expect/test7.6.prog.c
index f2afcc281d1542a8c8ef5408e6658d0389484531..5df6a573f94778471bcf539e3de89ba8d6f87ef3 100644
--- a/testsuite/expect/test7.6.prog.c
+++ b/testsuite/expect/test7.6.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004-2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dong Ang <dahn@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.7 b/testsuite/expect/test7.7
index 7a90ba1cfe510b135911a6fe05d178f6db2afcb6..4c03e69b0c67381fa205d1bf25262cbf55b30639 100755
--- a/testsuite/expect/test7.7
+++ b/testsuite/expect/test7.7
@@ -12,10 +12,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -64,7 +65,7 @@ expect {
 		set sched_wiki 1
 		exp_continue
 	}
-	-re "SLURM_CONFIG_FILE *= (.*)/slurm.conf" {
+	-re "SLURM_CONF *= (.*)/slurm.conf.*SLURM_VERSION" {
 		set conf_dir $expect_out(1,string)
 		exp_continue
 	}
diff --git a/testsuite/expect/test7.7.prog.c b/testsuite/expect/test7.7.prog.c
index 81ba2a5e36d5c6f5e05ee36aae9fc00e9d915dff..8afd6f1a34898b558c6d386ea859b3821c75cd70 100644
--- a/testsuite/expect/test7.7.prog.c
+++ b/testsuite/expect/test7.7.prog.c
@@ -5,10 +5,11 @@
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.8 b/testsuite/expect/test7.8
index e170fa2f7ee06f66ebf4e03c9a216768ffe8dbd7..5918ed78d4968b3ddbfc5e2750c92c73806b9cc6 100755
--- a/testsuite/expect/test7.8
+++ b/testsuite/expect/test7.8
@@ -12,10 +12,11 @@
 # Copyright (C) 2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -68,7 +69,7 @@ expect {
 		set sched_wiki 1
 		exp_continue
 	}
-	-re "SLURM_CONFIG_FILE *= (.*)/slurm.conf" {
+	-re "SLURM_CONF *= (.*)/slurm.conf.*SLURM_VERSION" {
 		set conf_dir $expect_out(1,string)
 		exp_continue
 	}
diff --git a/testsuite/expect/test7.8.prog.c b/testsuite/expect/test7.8.prog.c
index 4f40119664b992684b15e201ee4e537e9ba8f438..8fef8c2b78f90f37749faf933100baef926f4dc4 100644
--- a/testsuite/expect/test7.8.prog.c
+++ b/testsuite/expect/test7.8.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.9 b/testsuite/expect/test7.9
index 1915f4d2dd34b7b480331187be70755f03b87485..2f823ee2f74cf1ca1b804b718b54695c8973d973 100755
--- a/testsuite/expect/test7.9
+++ b/testsuite/expect/test7.9
@@ -11,10 +11,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test7.9.prog.c b/testsuite/expect/test7.9.prog.c
index dc01a5e24dad18f40fe9f2715892881f434e5d76..6b8c0db10a1ebe5fd3ed62d0e4d6b19e882460a6 100644
--- a/testsuite/expect/test7.9.prog.c
+++ b/testsuite/expect/test7.9.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test8.1 b/testsuite/expect/test8.1
index 30ef18be376069d69e4858ab64a92a8e237ce046..6bcf8d79b0e6289b709648735582016f47d6359a 100755
--- a/testsuite/expect/test8.1
+++ b/testsuite/expect/test8.1
@@ -10,10 +10,11 @@
 # Copyright (C) 2004 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test8.2 b/testsuite/expect/test8.2
index 64f630a12e0d82092a9efd31e6913014c830a55d..38f8fc8cd5a16687f7264be64e63ee6bef905572 100755
--- a/testsuite/expect/test8.2
+++ b/testsuite/expect/test8.2
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test8.3 b/testsuite/expect/test8.3
index f5ab3ebbec06489c7695257dd3adbeac78e86879..25923580ee30de84d1905d3e3273f6367ae6f2a3 100755
--- a/testsuite/expect/test8.3
+++ b/testsuite/expect/test8.3
@@ -13,10 +13,11 @@
 # Copyright (C) 2004 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test8.4 b/testsuite/expect/test8.4
index e96045fd010440a6d3e6d6f495e7c3898c37d986..3a1f8c7568308aefde911f598e3de8a8ef63f2f0 100755
--- a/testsuite/expect/test8.4
+++ b/testsuite/expect/test8.4
@@ -14,10 +14,11 @@
 # Copyright (C) 2004 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test8.4.prog.c b/testsuite/expect/test8.4.prog.c
index c5b704cc81eab48b2fe091eb255c5f0702efb159..d5582171c6d5c6ab3354037a8184fc29b8a409ca 100644
--- a/testsuite/expect/test8.4.prog.c
+++ b/testsuite/expect/test8.4.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2004 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Dong Ang <dahn@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test8.5 b/testsuite/expect/test8.5
index b86b03569660f6934054284952e3241471c6c012..f13daf78b13ffde585d02ddd357577a82b001cbf 100755
--- a/testsuite/expect/test8.5
+++ b/testsuite/expect/test8.5
@@ -1,7 +1,7 @@
 #!/usr/bin/expect
 ############################################################################
 # Purpose: Test of BLUEGENE SLURM functionality
-#          Test sacct functionality and accuracy.
+#          Test creation of all blocks 1 midplane and smaller.
 #
 # Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
@@ -10,10 +10,11 @@
 # Copyright (C) 2006-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -42,225 +43,182 @@ set matches     0
 # job paramters
 set sleep_time 1
 
-print_header $test_id
-
-if {[test_bluegene] == 0} {
-	send_user "\nWARNING: This test is only compatable with bluegene systems\n"
-	exit $exit_code
-}
-#
-# Delete left-over input script files
-# Build input script file
-#
-exec $bin_rm -f $file_in
-exec echo "#!$bin_bash"    >$file_in
-exec echo "$bin_sleep $sleep_time "     >>$file_in
-exec $bin_chmod 700 $file_in
-
-#
-set timeout [expr $max_job_delay + $sleep_time]
+proc run_and_test { size } {
+	global number sbatch scontrol 
+	global file_out file_err file_in procs_per_cnode
 
-# make a 512 cnode block
+	set job_id 0
+	set exit_code 0
 
-set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 -N512-512 $file_in]
-expect {
-	-re "Submitted batch job ($number)" {
-		set job_id $expect_out(1,string)
-		exp_continue
-	}
-	timeout {
-		send_user "\nFAILURE: sbatch not responding\n"
-		slow_kill $sbatch_pid
-		exit 1
+	set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 -N$size-$size $file_in]
+	expect {
+		-re "Submitted batch job ($number)" {
+			set job_id $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sbatch not responding\n"
+			slow_kill $sbatch_pid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
 	}
-	eof {
-		wait
+
+	if {!$job_id || $exit_code} {
+		send_user "\nFAILURE: batch submit failure\n"
+		return 1
 	}
-}
-if {$job_id == 0} {
-	send_user "\nFAILURE: batch submit failure\n"
-	exit 1
-}
 
-#
-# Wait for job to run
-#
-if {[wait_for_job $job_id "DONE"] != 0} {
-	send_user "\nFAILURE: waiting for job to run\n"
-	set exit_code 1
-}
+	#
+	# Wait for job to run
+	#
+	if {[wait_for_job $job_id "DONE"] != 0} {
+		send_user "\nFAILURE: waiting for job to run\n"
+		return 1
+	}
 
-set matches 0
-set cpus_per_node 1
+	set matches 0
+	set requested_cpus [expr $size * $procs_per_cnode]
 
-spawn $scontrol show job $job_id
-expect {
-	-re "BP_List=$alpha_numeric " {
-		incr matches
-		exp_continue
-	}
-	-re "AllocCPUs=($number)" {
-		set cpu_count $expect_out(1,string)
-		set cpus_per_node [expr $cpu_count / 512]
-		if {($cpu_count != 512) && ($cpu_count != 1024)} {
-			send_user "\nFAILURE: Bad node count allocated\n"
+	set scon_pid [spawn $scontrol show job $job_id]
+	expect {
+		-re "AllocCPUs=$requested_cpus" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			slow_kill $scon_pid
 			set exit_code 1
 		}
-		exp_continue
+		eof {
+			wait
+		}
 	}
-	timeout {
-		send_user "\nFAILURE: scontrol not responding\n"
+	if {$matches != 1} {
+		send_user "\nFAILURE: Bluegene $size cnode block not created correctly\n"
 		set exit_code 1
 	}
-	eof {
-		wait
-	}
-}
-if {$matches != 1} {
-	send_user "\nFAILURE: Blue Gene 512 cnode block not created correctly\n"
-	set exit_code 1
-}
-if {$exit_code != 0} {
-	exit $exit_code
+	
+	
+	return $exit_code
 }
 
-# make a 128 cnode block
-
+proc run_bgl_test { } {
+	global psets
 
-set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 -N128-128 $file_in]
-expect {
-	-re "Submitted batch job ($number)" {
-		set job_id $expect_out(1,string)
-		exp_continue
+	# make a 512 cnode block
+	if {[run_and_test 512]} {
+		return 1
 	}
-	timeout {
-		send_user "\nFAILURE: sbatch not responding\n"
-		slow_kill $sbatch_pid
-		exit 1
+
+	if {[run_and_test 128]} {
+		return 1
 	}
-	eof {
-		wait
+
+	if {$psets >= 16} {
+		if {[run_and_test 32]} {
+			return 1
+		}
 	}
-}
-if {$job_id == 0} {
-	send_user "\nFAILURE: batch submit failure\n"
-	exit 1
-}
 
-#
-# Wait for job to run
-#
-if {[wait_for_job $job_id "DONE"] != 0} {
-	send_user "\nFAILURE: waiting for job to run\n"
-	set exit_code 1
+	return 0;
 }
 
-set matches 0
 
-spawn $scontrol show job $job_id
-expect {
-	-re "BP_List=$alpha_numeric\[$number-$number\] " {
-		incr matches
-		exp_continue
+proc run_bgp_test { } {
+	global psets
+
+	# make a 512 cnode block
+	if {[run_and_test 512]} {
+		return 1
 	}
-	-re "AllocCPUs=($number)" {
-		set cpu_count $expect_out(1,string)
-		set node_count [expr $cpu_count / $cpus_per_node]
-		if {$node_count != 128} {
-			send_user "\n=======================================\n"
-			send_user "\nFAILURE: Bad node count allocated\n"
-			send_user "This can indicate a low configured value of Numpsets\n"
-			send_user "  in bluegene.conf (few available I/O nodes).\n"
-			send_user "This could indicate Static Bluegene partitioning\n"
-			send_user "  with no small blocks (1/4 of a base partition).\n"
-			send_user "This could also indicate MinNodes for the partition\n"
-			send_user "  being too high for this test.\n"
-			send_user "=======================================\n"
-			set exit_code 1
-		}
-		exp_continue
+
+	if {[run_and_test 256]} {
+		return 1
 	}
-	timeout {
-		send_user "\nFAILURE: scontrol not responding\n"
-		set exit_code 1
+	if {[run_and_test 128]} {
+		return 1
 	}
-	eof {
-		wait
+
+	if {$psets >= 8} {
+		if {[run_and_test 64]} {
+			return 1
+		}
+		if {$psets >= 16} {
+			if {[run_and_test 32]} {
+				return 1
+			}
+			if {$psets >= 32} {
+				if {[run_and_test 16]} {
+					return 1
+				}
+			}
+		}
 	}
+	return 0;
+}
+
+# TEST STARTS HERE
+
+print_header $test_id
+
+if {[test_bluegene] == 0} {
+	send_user "\nWARNING: This test is only compatable with bluegene systems\n"
+	exit $exit_code
 }
-if {$matches != 1} {
-	send_user "\nFAILURE: Blue Gene 128 cnode block not created correctly\n"
-	set exit_code 1
+
+if {[string compare [get_bluegene_layout] Dynamic]} {
+	send_user "\nWARNING: This test is only compatable with dynamic bluegene systems\n"
+	exit $exit_code
 }
 
-# make a 32 cnode block
+set psets [get_bluegene_psets]
 
-set sbatch_pid [spawn $sbatch --output=$file_out --error=$file_err -t2 -N32-32 $file_in]
-expect {
-	-re "Submitted batch job ($number)" {
-		set job_id $expect_out(1,string)
-		exp_continue
-	}
-	timeout {
-		send_user "\nFAILURE: sbatch not responding\n"
-		slow_kill $sbatch_pid
-		exit 1
-	}
-	eof {
-		wait
-	}
+if {$psets == 0} {
+	send_user "\nFAILURE: No psets are set on this system\n"
+	exit 1
 }
-if {$job_id == 0} {
-	send_user "\nFAILURE: batch submit failure\n"
+
+set procs_per_cnode [get_bluegene_procs_per_cnode]
+
+if {$procs_per_cnode == 0} {
+	send_user "\nFAILURE: Couldn't determine procs per cnode\n"
 	exit 1
 }
 
+set type [get_bluegene_type]
+
+if {$type == 0} {
+	send_user "\nFAILURE: No bluegene type found \n"
+	exit 1
+}
+
+
 #
-# Wait for job to run
+# Delete left-over input script files
+# Build input script file
 #
-if {[wait_for_job $job_id "DONE"] != 0} {
-	send_user "\nFAILURE: waiting for job to run\n"
-	set exit_code 1
-}
+exec $bin_rm -f $file_in
+exec echo "#!$bin_bash"    >$file_in
+exec echo "$bin_sleep $sleep_time "     >>$file_in
+exec $bin_chmod 700 $file_in
 
-set matches 0
-spawn $scontrol show job $job_id
-expect {
-#	Could be one or multiple node cards (e.g. "bgl000[0]" or "bgl000[0-1]")
-	-re "BP_List=$alpha_numeric\[$number" {
-		incr matches
-		exp_continue
-	}
-	-re "AllocCPUs=($number)" {
-		set cpu_count $expect_out(1,string)
-		set node_count [expr $cpu_count / $cpus_per_node]
-		if {$node_count != 32} {
-			send_user "\n=======================================\n"
-			send_user "FAILURE: Bad node count allocated\n"
-			send_user "This can indicate a low configured value of Numpsets\n"
-			send_user "  in bluegene.conf (few available I/O nodes).\n"
-			send_user "This could indicate Static Bluegene partitioning\n"
-			send_user "  with no small blocks (1/16 of a base partition).\n"
-			send_user "This could also indicate MinNodes for the partition\n"
-			send_user "  being too high for this test.\n"
-			send_user "=======================================\n"
-			set exit_code 1
-		}
-		exp_continue
-	}
-	timeout {
-		send_user "\nFAILURE: scontrol not responding\n"
-		set exit_code 1
-	}
-	eof {
-		wait
-	}
-}
-if {$matches != 1} {
-	send_user "\nFAILURE: Blue Gene 32 cnode block not created correctly\n"
-	set exit_code 1
+#
+set timeout [expr $max_job_delay + $sleep_time]
+
+if {![string compare $type "P"]} {
+	set exit_code [run_bgp_test]
+} elseif {![string compare $type "L"]} {
+	set exit_code [run_bgl_test]
+} else {
+	send_user "\nFAILURE: unknown bluegene system type '$type'\n";
+	exit 1
 }
 
+
 if {$exit_code == 0} {
 	exec rm -f $file_in $file_out $file_err
 	send_user "\nSUCCESS\n"
diff --git a/testsuite/expect/test8.6 b/testsuite/expect/test8.6
index 9b78b162fb21e38d1769ecb2da27db6e6c9851ac..5992ea06ff2a6a52c5a354d77860f833ba843b1f 100755
--- a/testsuite/expect/test8.6
+++ b/testsuite/expect/test8.6
@@ -1,7 +1,7 @@
 #!/usr/bin/expect
 ############################################################################
 # Purpose: Test of BLUEGENE SLURM functionality
-#          Test sacct functionality and accuracy.
+#          Stress test Dynamic mode block creation.
 #
 # Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
@@ -11,10 +11,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Danny Auble <da@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -37,31 +38,13 @@ set exit_code   0
 set file_in     "test$test_id.input"
 set job_id      0
 set matches     0
-set 32node_block_cnt 16
-set 128node_block_cnt 8
-set 512node_block_cnt 8
-set 1knode_block_cnt  8
-set 4knode_block_cnt  8
-set 8knode_block_cnt  8
-set 16knode_block_cnt 8
-set 32knode_block_cnt 8
 
 # job paramters
 set sleep_time  5
 
-print_header $test_id
-
-if {[test_bluegene] == 0} {
-	send_user "\nWARNING: This test is only compatable with bluegene systems\n"
-	exit $exit_code
-}
-
-make_bash_script $file_in "$bin_sleep $sleep_time"
-set timeout [expr $max_job_delay + $sleep_time]
-
 # make a bunch of blocks of the specified size (node_cnt)
-proc run_batch_jobs { node_cnt job_cnt file_in } {
-	global sbatch srun number kill_srun
+proc run_batch_jobs { node_cnt job_cnt } {
+	global sbatch number file_in
 	set start_cnt 0	
 	for {set inx 0} {$inx < $job_cnt} {incr inx} {
 		set sbatch_pid [spawn $sbatch --output=/dev/null -t5 -N$node_cnt-$node_cnt $file_in]
@@ -96,16 +79,13 @@ proc run_batch_jobs { node_cnt job_cnt file_in } {
 			}
 		}
 	}
-	if { $start_cnt != $job_cnt } {
-		return 0
-	} else {
-		return 1
-	}	
+	
+	return $start_cnt
 }
 
 # Wait up to 900 seconds for all jobs to terminate
 # Return 0 if all jobs done, remainin job count otherwise
-proc wait_for_all_jobs { job_name } {
+proc wait_for_all_jobs { } {
 	global scancel squeue bin_sleep file_in
 
 	set last_matches 0
@@ -115,7 +95,7 @@ proc wait_for_all_jobs { job_name } {
 		log_user 0
 		spawn $squeue -o %j
 		expect {
-			-re "$job_name" {
+			-re "$file_in" {
 				incr matches
 				exp_continue
 			}
@@ -151,77 +131,188 @@ proc wait_for_all_jobs { job_name } {
 	return $matches
 }
 
-#
-# Run the jobs here. We can be reasonably sure of running 
-# 512 cnode jobs (one midplane). Other sizes may not be supported
-# due to architecture (I/O node count) and/or slurm configuration
-#
-if {[run_batch_jobs 32 $32node_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 32 cnodes can't be created\n"
-	set exit_code 1	
-}
+proc run_bgl_test { } {
+	global psets num_nodes
 
-if {[run_batch_jobs 128 $128node_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 128 cnodes can't be created\n"
-	set exit_code 1	
-}
+	set 32node_block_cnt 16
+	set 128node_block_cnt 8
+	set 512node_block_cnt 16
+	set 1knode_block_cnt  8
+	set 4knode_block_cnt  8
+	set 8knode_block_cnt  4
+	set 16knode_block_cnt 4
+	set 32knode_block_cnt 4
+	set started 0
+	if {$psets >= 16} {
+		incr started [run_batch_jobs 32 $32node_block_cnt]
+	}
+	
+	incr started [run_batch_jobs 128 $128node_block_cnt]	
+	incr started [run_batch_jobs 512 $512node_block_cnt]	
+	
+	if {$num_nodes >= 1024} {
+		incr started [run_batch_jobs 1k $1knode_block_cnt]
+		if {$num_nodes >= 4096} {
+			incr started [run_batch_jobs 4k $4knode_block_cnt]
+			if {$num_nodes >= 8192} {
+				incr started [run_batch_jobs 8k $8knode_block_cnt]
+				if {$num_nodes >= 16384} {
+					incr started [run_batch_jobs 16k $16knode_block_cnt]
+					if {$num_nodes >= 32768} {
+						incr started [run_batch_jobs 32k $1knode_block_cnt]
+					}
+				}
+			}
+		}
+		incr started [run_batch_jobs 1k $1knode_block_cnt]
+	}
 
-if {[run_batch_jobs 512 $512node_block_cnt $file_in ] != 1} {
-	send_user "\nFAILURE: 512 cnodes can't be created\n"
-	set exit_code 1	
-}
+	incr started [run_batch_jobs 512 $512node_block_cnt]	
+
+	incr started [run_batch_jobs 128 $128node_block_cnt]
+
+	if {$psets >= 16} {
+		incr started [run_batch_jobs 32 $32node_block_cnt]
+	}
 
-if {[run_batch_jobs 1k $1knode_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 1k cnodes can't be created\n"
-	set exit_code 1	
+	incr started [run_batch_jobs 512 $512node_block_cnt]	
+
+	return $started;
 }
 
-if {[run_batch_jobs 4k $4knode_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 4k cnodes can't be created\n"
-	set exit_code 1	
+
+proc run_bgp_test { } {
+	global psets num_nodes
+
+	set 16node_block_cnt 32
+	set 32node_block_cnt 16
+	set 64node_block_cnt 8
+	set 128node_block_cnt 8
+	set 256node_block_cnt 8
+	set 512node_block_cnt 16
+	set 1knode_block_cnt  8
+	set 4knode_block_cnt  8
+	set 8knode_block_cnt  4
+	set 16knode_block_cnt 4
+	set 32knode_block_cnt 4
+	set started 0
+
+	if {$psets >= 32} {
+		incr started [run_batch_jobs 16 $16node_block_cnt]
+	}
+
+	if {$psets >= 16} {
+		incr started [run_batch_jobs 32 $32node_block_cnt]
+	}
+	if {$psets >= 8} {
+		incr started [run_batch_jobs 64 $64node_block_cnt]
+	}
+	
+	incr started [run_batch_jobs 128 $128node_block_cnt]	
+	incr started [run_batch_jobs 256 $256node_block_cnt]	
+	incr started [run_batch_jobs 512 $512node_block_cnt]	
+	
+	if {$num_nodes >= 1024} {
+		incr started [run_batch_jobs 1k $1knode_block_cnt]
+		if {$num_nodes >= 4096} {
+			incr started [run_batch_jobs 4k $4knode_block_cnt]
+			if {$num_nodes >= 8192} {
+				incr started [run_batch_jobs 8k $8knode_block_cnt]
+				if {$num_nodes >= 16384} {
+					incr started [run_batch_jobs 16k $16knode_block_cnt]
+					if {$num_nodes >= 32768} {
+						incr started [run_batch_jobs 32k $1knode_block_cnt]
+					}
+				}
+			}
+		}
+		incr started [run_batch_jobs 1k $1knode_block_cnt]
+	}
+
+	incr started [run_batch_jobs 512 $512node_block_cnt]	
+	incr started [run_batch_jobs 256 $256node_block_cnt]	
+	incr started [run_batch_jobs 128 $128node_block_cnt]
+	if {$psets >= 8} {
+		incr started [run_batch_jobs 64 $64node_block_cnt]
+	}
+
+	if {$psets >= 16} {
+		incr started [run_batch_jobs 32 $32node_block_cnt]
+	}
+
+	if {$psets >= 32} {
+		incr started [run_batch_jobs 16 $16node_block_cnt]
+	}
+
+	incr started [run_batch_jobs 512 $512node_block_cnt]	
+
+	return $started;
 }
 
-if {[run_batch_jobs 8k $8knode_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 8k cnodes can't be created\n"
-	set exit_code 1	
+
+# TEST STARTS HERE
+
+print_header $test_id
+
+if {[test_bluegene] == 0} {
+	send_user "\nWARNING: This test is only compatable with bluegene systems\n"
+	exit $exit_code
 }
 
-if {[run_batch_jobs 16k $16knode_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 16k cnodes can't be created\n"
-	set exit_code 1	
+if {[string compare [get_bluegene_layout] Dynamic]} {
+	send_user "\nWARNING: This test is only compatable with dynamic bluegene systems\n"
+	exit $exit_code
 }
 
-if {[run_batch_jobs 32k $32knode_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 32k cnodes can't be created\n"
-	set exit_code 1	
+set psets [get_bluegene_psets]
+
+if {$psets == 0} {
+	send_user "\nFAILURE: No psets are set on this system\n"
+	exit 1
 }
 
-if {[run_batch_jobs 1k $1knode_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 1k cnodes can't be created\n"
-	set exit_code 1	
+set num_nodes [expr [get_node_cnt] * [get_bluegene_cnodes_per_mp]] 
+if {$num_nodes == 0} {
+	send_user "\nFAILURE: No nodes are found on this system\n"
+	exit 1
 }
 
-if {[run_batch_jobs 512 $512node_block_cnt $file_in ] != 1} {
-	send_user "\nFAILURE: 512 cnodes can't be created\n"
-	set exit_code 1	
+set procs_per_cnode [get_bluegene_procs_per_cnode]
+
+if {$procs_per_cnode == 0} {
+	send_user "\nFAILURE: Couldn't determine procs per cnode\n"
+	exit 1
 }
 
-if {[run_batch_jobs 128 $128node_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 128 cnodes can't be created\n"
-	set exit_code 1	
+set type [get_bluegene_type]
+
+if {$type == 0} {
+	send_user "\nFAILURE: No bluegene type found \n"
+	exit 1
 }
 
-if {[run_batch_jobs 32 $32node_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 32 cnodes can't be created\n"
-	set exit_code 1	
+
+make_bash_script $file_in "$bin_sleep $sleep_time"
+set timeout [expr $max_job_delay + $sleep_time]
+
+
+if {![string compare $type "P"]} {
+	set started [run_bgp_test]
+} elseif {![string compare $type "L"]} {
+	set started [run_bgl_test]
+} else {
+	send_user "\nFAILURE: unknown bluegene system type '$type'\n";
+	exit 1
 }
 
-if {[run_batch_jobs 512 $512node_block_cnt $file_in ] == 0} {
-	send_user "\nFAILURE: 512 cnodes can't be created\n"
-	set exit_code 1	
+if {!$started} {
+	send_user "\nFAILURE: No jobs were started\n";
+	exit 1
 }
 
-if {[wait_for_all_jobs $file_in] != 0} {
+send_user "Started $started jobs\n"
+
+if {[wait_for_all_jobs] != 0} {
 	send_user "\nFAILURE: some submitted jobs failed to terminate\n"
 	set exit_code 1
 }
diff --git a/testsuite/expect/test8.7 b/testsuite/expect/test8.7
index 2f59d6b71e1418dd9a86bfaf5d66e2831e76ca93..fffbf968da7d83054c54d4897fae0a41dfc84502 100755
--- a/testsuite/expect/test8.7
+++ b/testsuite/expect/test8.7
@@ -10,10 +10,11 @@
 # Copyright (C) 2006-2007 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -97,7 +98,7 @@ expect {
 		set is_bluegene 1
 		exp_continue
 	}
-	-re "SLURM_CONFIG_FILE *= (.*)/slurm.conf" {
+	-re "SLURM_CONF *= (.*)/slurm.conf" {
 		set conf_dir $expect_out(1,string)
 		exp_continue
 	}
diff --git a/testsuite/expect/test8.7.prog.c b/testsuite/expect/test8.7.prog.c
index 58b6c373abf24677599696e75e8cc1f1bd9ca771..8c13b76ecc4d21f4dae3ee056a4a8eff5eef2c94 100644
--- a/testsuite/expect/test8.7.prog.c
+++ b/testsuite/expect/test8.7.prog.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *  
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test9.1 b/testsuite/expect/test9.1
index a567fd837ea96e4162223807b22aea0998ecb215..4512b0d187e41fa3915e85b072a0c875630660fe 100755
--- a/testsuite/expect/test9.1
+++ b/testsuite/expect/test9.1
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test9.2 b/testsuite/expect/test9.2
index 7d0ef733f60b0a9adee8d56c89afd1ee7e896b36..52bfc1e4cfb0371cfa4eaf9e29c183c0080cc578 100755
--- a/testsuite/expect/test9.2
+++ b/testsuite/expect/test9.2
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test9.3 b/testsuite/expect/test9.3
index 63b497653c9e43971c4b4b945d451de565c96d0a..110acc032966512a94c22eb4227e017469296096 100755
--- a/testsuite/expect/test9.3
+++ b/testsuite/expect/test9.3
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test9.4 b/testsuite/expect/test9.4
index 46fc8140687fdbc626c1dfd61308f04c89b0726d..38fbdbcc49f99d1696bc504f9922eda3f93caaff 100755
--- a/testsuite/expect/test9.4
+++ b/testsuite/expect/test9.4
@@ -18,10 +18,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test9.5 b/testsuite/expect/test9.5
index 83004e005f5fb4d2005ef79b5c357b7b3caad1fd..e86c152e6a4f4f1e1c8783fd82a09de83253dd93 100755
--- a/testsuite/expect/test9.5
+++ b/testsuite/expect/test9.5
@@ -10,10 +10,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test9.6 b/testsuite/expect/test9.6
index 346e22f6cd74756c19fe7c97f903c50fd2b791b5..dd11aef621118b852c78db26b917dc5614ff7a96 100755
--- a/testsuite/expect/test9.6
+++ b/testsuite/expect/test9.6
@@ -13,10 +13,11 @@
 # Copyright (C) 2008 Lawrence Livermore National Security.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test9.7 b/testsuite/expect/test9.7
index fee3304135c181cb6c5dc76206ae97207fb93adb..5f266a5de9d0ff195b75bde647758a7f29beadfd 100755
--- a/testsuite/expect/test9.7
+++ b/testsuite/expect/test9.7
@@ -10,10 +10,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test9.7.bash b/testsuite/expect/test9.7.bash
index 15d53995b6e5f1556e5236c55a42bfd893287bf0..716b89d7bbedc0851901d4321826ba58e02e47aa 100755
--- a/testsuite/expect/test9.7.bash
+++ b/testsuite/expect/test9.7.bash
@@ -7,10 +7,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+ # Please also read the supplied file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/expect/test9.8 b/testsuite/expect/test9.8
index de7eacac2ef7c4d383f9e4ca94ec7f48bf66e7fb..aeb0d8d23ce12229555932bf5f0a6248cda5287d 100755
--- a/testsuite/expect/test9.8
+++ b/testsuite/expect/test9.8
@@ -13,10 +13,11 @@
 # Copyright (C) 2002-2006 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+# Please also read the included file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
@@ -38,7 +39,7 @@ set test_id      "9.8"
 set exit_code    0
 set file_in      "test$test_id.input"
 set job_cnt      10
-set delay        5
+set delay        10
 set job_name     "test$test_id"
 set sleep_time   300
 set task_cnt     60
@@ -60,7 +61,7 @@ if {[test_front_end] != 0} {
 #   This is especially important on very slow systems (e.g. AIX).
 #
 make_bash_script $file_in "
-$bin_sleep $delay
+$bin_sleep 5
 for ((inx=0; inx < $task_cnt; inx++)) ; do
         $srun -N1 -n1 $bin_sleep $sleep_time &
 done
@@ -71,7 +72,7 @@ $srun -N1 -n1 $bin_sleep $sleep_time
 # Initiate $job_cnt batch jobs
 #
 set start_cnt 0
-set timeout $delay
+set timeout 30
 for {set inx 0} {$inx < $job_cnt} {incr inx} {
 	set sbatch_pid [spawn $sbatch --job-name=$job_name --output=/dev/null --error=/dev/null -t5 $file_in]
 	expect {
@@ -84,22 +85,20 @@ for {set inx 0} {$inx < $job_cnt} {incr inx} {
 			exp_continue
 		}
 		timeout {
-			send_user "\nFAILURE: srun not responding\n"
+			send_user "\nFAILURE: sbatch not responding\n"
 			slow_kill $sbatch_pid
-			exit 1
+			set exit_code 1
 		}
 		eof {
 			wait
 		}
 	}
 }
-if {$start_cnt < 1} {
-	send_user "\nFAILURE: no jobs submitted\n"
-	exit 1
-}
 if {$start_cnt < $job_cnt} {
-	send_user "\nFAILURE: not all jobs submitted\n"
+	send_user "\nFAILURE: $job_cnt of $start_cnt jobs submitted\n"
 	set exit_code 1
+} else {
+	send_user "\nAll $start_cnt jobs submitted\n"
 }
 
 #
@@ -185,8 +184,9 @@ expect {
 # If message are lost, slurmctld re-sends job kill RPC 120 seconds later
 # In any case, make sure that all jobs get completed
 #
-exec $bin_sleep 10
+exec $bin_sleep     10
 set completing_jobs 0
+set running_jobs    0
 spawn $squeue --noheader --user $user_name
 expect {
 	-re "test9.8.*$user_name *CG" {
@@ -194,13 +194,31 @@ expect {
 		exp_continue
 	}
 	-re "test9.8.*$user_name" {
-		send_user "\nFAILURE: jobs not all gone\n"
-		set exit_code 1
+		incr running_jobs
+		exp_continue
 	}
 	eof {
 		wait
 	}
 }
+#
+# The following logic handles the scancel request failing
+# due to a very busy system (reports FAILURE above)
+#
+# Increasing the MessageTimeout configuration parameter
+# should fix this problem. 
+#
+if {$running_jobs != 0} {
+	send_user "\nFAILURE: jobs not all cancelled\n"
+	set exit_code 1
+
+	spawn $scancel --quiet --user $user_name
+	expect {
+		eof {
+			wait
+		}
+	}
+}
 if {$completing_jobs != 0} {
 	send_user "\nWaiting for slurmctld to re-send job kill RPC\n"
 	send_user "This will take 120 seconds...\n"
@@ -208,7 +226,7 @@ if {$completing_jobs != 0} {
 	set completing_jobs 0
 	spawn $squeue --noheader --user $user_name
 	expect {
-		-re "$user_name *CG" {
+		-re "$job_name *$user_name *CG" {
 			incr completing_jobs
 			exp_continue
 		}
@@ -228,7 +246,7 @@ if {$completing_jobs != 0} {
 		exec $bin_sleep $max_wait
 		spawn $squeue --noheader --user $user_name
 		expect {
-			-re "$user_name *CG" {
+			-re "$job_name *$user_name *CG" {
 				incr completing_jobs
 				exp_continue
 			}
diff --git a/testsuite/expect/usleep b/testsuite/expect/usleep
index 5759a4602700b883ae0546e1166c6f9b8fa745d1..c6011dac6ac6be9ce2f44dc8c76c80ebd511ec3a 100755
--- a/testsuite/expect/usleep
+++ b/testsuite/expect/usleep
@@ -7,10 +7,11 @@
 # Copyright (C) 2002 The Regents of the University of California.
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
-# LLNL-CODE-402394.
+# CODE-OCEC-09-009. All rights reserved.
 # 
 # This file is part of SLURM, a resource management program.
-# For details, see <http://www.llnl.gov/linux/slurm/>.
+# For details, see <https://computing.llnl.gov/linux/slurm/>.
+ # Please also read the supplied file: DISCLAIMER.
 #  
 # SLURM is free software; you can redistribute it and/or modify it under
 # the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/slurm_unit/Makefile.in b/testsuite/slurm_unit/Makefile.in
index 9d159aef3a107129f4de9940776197c91476a073..a2f37a610fa0dbe992250da35a948a511cc27c3b 100644
--- a/testsuite/slurm_unit/Makefile.in
+++ b/testsuite/slurm_unit/Makefile.in
@@ -40,14 +40,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -89,6 +93,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/testsuite/slurm_unit/api/Makefile.in b/testsuite/slurm_unit/api/Makefile.in
index e8658e20af0a19834318e27e31d0baf637c55cc8..190d3fc6ed71a881e26ba772ef9b703c9d2777fb 100644
--- a/testsuite/slurm_unit/api/Makefile.in
+++ b/testsuite/slurm_unit/api/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -109,6 +113,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/testsuite/slurm_unit/api/manual/Makefile.in b/testsuite/slurm_unit/api/manual/Makefile.in
index 650f48ce177ff12a9c83273cd736a7850927ac2a..b54026a38f75a0411b8242f22903738beb20e082 100644
--- a/testsuite/slurm_unit/api/manual/Makefile.in
+++ b/testsuite/slurm_unit/api/manual/Makefile.in
@@ -44,14 +44,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -131,6 +135,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/testsuite/slurm_unit/api/manual/cancel-tst.c b/testsuite/slurm_unit/api/manual/cancel-tst.c
index 8fce7808d63b89d9369a10988d0ab999f33e08c2..82b0da815011f4416cb05674a0b3bdfa42468b16 100644
--- a/testsuite/slurm_unit/api/manual/cancel-tst.c
+++ b/testsuite/slurm_unit/api/manual/cancel-tst.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et.al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/slurm_unit/api/manual/complete-tst.c b/testsuite/slurm_unit/api/manual/complete-tst.c
index 06926437d0a6ed391ea52048b8c8d05157af76bc..8503202e809d1d28e752485cabede90d745b9dbc 100644
--- a/testsuite/slurm_unit/api/manual/complete-tst.c
+++ b/testsuite/slurm_unit/api/manual/complete-tst.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et.al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/slurm_unit/api/manual/job_info-tst.c b/testsuite/slurm_unit/api/manual/job_info-tst.c
index 967e42088e19d1decc23e5565c6f070b8cc78c9b..e799cd4a5bd38d9a562db7a97d85845b30a0d4a9 100644
--- a/testsuite/slurm_unit/api/manual/job_info-tst.c
+++ b/testsuite/slurm_unit/api/manual/job_info-tst.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et.al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/slurm_unit/api/manual/node_info-tst.c b/testsuite/slurm_unit/api/manual/node_info-tst.c
index 9db08b49a496686fc201e7b65d2bfd8fc84ca010..fe8becdca49fd56a8b5294da0fac2fdd885716f0 100644
--- a/testsuite/slurm_unit/api/manual/node_info-tst.c
+++ b/testsuite/slurm_unit/api/manual/node_info-tst.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et.al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/slurm_unit/api/manual/partition_info-tst.c b/testsuite/slurm_unit/api/manual/partition_info-tst.c
index c75d98a26f994af4ab580bee664687c4e8ce0e70..a83a3bbf4d7c897d420623f4fa6d2694a55b596a 100644
--- a/testsuite/slurm_unit/api/manual/partition_info-tst.c
+++ b/testsuite/slurm_unit/api/manual/partition_info-tst.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et.al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/slurm_unit/api/manual/reconfigure-tst.c b/testsuite/slurm_unit/api/manual/reconfigure-tst.c
index 2064b77fd4b63f7498ce692d01a9ef094aee723e..d3a39d7b32bbad4011623dd96ecab166060f5a12 100644
--- a/testsuite/slurm_unit/api/manual/reconfigure-tst.c
+++ b/testsuite/slurm_unit/api/manual/reconfigure-tst.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et.al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/slurm_unit/api/manual/submit-tst.c b/testsuite/slurm_unit/api/manual/submit-tst.c
index f4b54bb321cc1cc5562aae2f066ea3164b5c2c28..b2b38623e786345d0a36b3f8d7fe82185f000be2 100644
--- a/testsuite/slurm_unit/api/manual/submit-tst.c
+++ b/testsuite/slurm_unit/api/manual/submit-tst.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et.al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/slurm_unit/api/manual/update_config-tst.c b/testsuite/slurm_unit/api/manual/update_config-tst.c
index 637622a8f19e5ebee8080018b475e0e97931ef43..dbb09de28129010ee922ab73d7a0dbf2b258788b 100644
--- a/testsuite/slurm_unit/api/manual/update_config-tst.c
+++ b/testsuite/slurm_unit/api/manual/update_config-tst.c
@@ -4,10 +4,11 @@
  *  Copyright (C) 2002 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et.al.
- *  LLNL-CODE-402394.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://www.llnl.gov/linux/slurm/>.
+ *  For details, see <https://computing.llnl.gov/linux/slurm/>.
+ *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
diff --git a/testsuite/slurm_unit/common/Makefile.in b/testsuite/slurm_unit/common/Makefile.in
index 0c177c639c7668b62f7bd58aae5fe42794e6a9fd..5b1b10a3a6888997c509308b039b652d1d2b289e 100644
--- a/testsuite/slurm_unit/common/Makefile.in
+++ b/testsuite/slurm_unit/common/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -117,6 +121,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/testsuite/slurm_unit/slurmctld/Makefile.in b/testsuite/slurm_unit/slurmctld/Makefile.in
index 5591b651710487870d18310ead674f45e1144ddd..17ec87503dfeef9d327558a3f4ac32bc0734ac9f 100644
--- a/testsuite/slurm_unit/slurmctld/Makefile.in
+++ b/testsuite/slurm_unit/slurmctld/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -80,6 +84,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/testsuite/slurm_unit/slurmd/Makefile.in b/testsuite/slurm_unit/slurmd/Makefile.in
index 340fcc85a99e6cf39b5df456356acb242e7421dc..6f8eefdbcc7220e270b0fad3845117a81a2ea805 100644
--- a/testsuite/slurm_unit/slurmd/Makefile.in
+++ b/testsuite/slurm_unit/slurmd/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -80,6 +84,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@
diff --git a/testsuite/slurm_unit/slurmdbd/Makefile.in b/testsuite/slurm_unit/slurmdbd/Makefile.in
index a96834972c514621f857f3662c508c370ed34877..67d452a3ede5a8d015488e7e545ec1adcc465f39 100644
--- a/testsuite/slurm_unit/slurmdbd/Makefile.in
+++ b/testsuite/slurm_unit/slurmdbd/Makefile.in
@@ -42,14 +42,18 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/acx_pthread.m4 \
 	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
 	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
 	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_elan.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
 	$(top_srcdir)/auxdir/x_ac_federation.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_gtk.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -80,6 +84,10 @@ AUTOHEADER = @AUTOHEADER@
 AUTOMAKE = @AUTOMAKE@
 AWK = @AWK@
 BG_INCLUDES = @BG_INCLUDES@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
 BLUEGENE_LOADED = @BLUEGENE_LOADED@
 CC = @CC@
 CCDEPMODE = @CCDEPMODE@